aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/genericirq.tmpl84
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/arm/kernel/irq.c6
-rw-r--r--arch/arm/mach-bcmring/irq.c6
-rw-r--r--arch/m32r/kernel/irq.c2
-rw-r--r--arch/m32r/platforms/m32104ut/setup.c2
-rw-r--r--arch/m32r/platforms/m32700ut/setup.c8
-rw-r--r--arch/m32r/platforms/mappi/setup.c2
-rw-r--r--arch/m32r/platforms/mappi2/setup.c2
-rw-r--r--arch/m32r/platforms/mappi3/setup.c2
-rw-r--r--arch/m32r/platforms/oaks32r/setup.c2
-rw-r--r--arch/m32r/platforms/opsput/setup.c6
-rw-r--r--arch/m32r/platforms/usrv/setup.c4
-rw-r--r--arch/sh/kernel/irq.c2
-rw-r--r--arch/tile/kernel/irq.c4
-rw-r--r--arch/um/kernel/irq.c6
-rw-r--r--arch/x86/Kconfig36
-rw-r--r--arch/x86/include/asm/apb_timer.h1
-rw-r--r--arch/x86/include/asm/cpu.h1
-rw-r--r--arch/x86/include/asm/irq_remapping.h27
-rw-r--r--arch/x86/kernel/apb_timer.c2
-rw-r--r--arch/x86/kernel/apic/apic.c3
-rw-r--r--arch/x86/kernel/apic/io_apic.c39
-rw-r--r--arch/x86/kernel/apic/probe_64.c3
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c2
-rw-r--r--arch/x86/kernel/early-quirks.c2
-rw-r--r--arch/x86/kernel/machine_kexec_64.c4
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/setup.c1
-rw-r--r--arch/x86/kernel/setup_percpu.c2
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kvm/lapic.c3
-rw-r--r--arch/x86/mm/init_32.c4
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/k8topology_64.c6
-rw-r--r--arch/xtensa/kernel/irq.c2
-rw-r--r--drivers/isdn/act2000/act2000.h6
-rw-r--r--drivers/isdn/hisax/config.c18
-rw-r--r--drivers/isdn/hisax/hisax.h1
-rw-r--r--drivers/mfd/twl4030-irq.c4
-rw-r--r--include/linux/irq.h451
-rw-r--r--include/linux/irqdesc.h171
-rw-r--r--include/linux/irqnr.h5
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--init/Kconfig2
-rw-r--r--init/main.c1
-rw-r--r--kernel/irq/Kconfig58
-rw-r--r--kernel/irq/Makefile2
-rw-r--r--kernel/irq/autoprobe.c15
-rw-r--r--kernel/irq/chip.c300
-rw-r--r--kernel/irq/dummychip.c68
-rw-r--r--kernel/irq/handle.c341
-rw-r--r--kernel/irq/internals.h130
-rw-r--r--kernel/irq/irqdesc.c424
-rw-r--r--kernel/irq/manage.c87
-rw-r--r--kernel/irq/migration.c12
-rw-r--r--kernel/irq/numa_migrate.c8
-rw-r--r--kernel/irq/proc.c26
-rw-r--r--kernel/irq/resend.c5
-rw-r--r--kernel/irq/spurious.c8
-rw-r--r--kernel/softirq.c4
63 files changed, 1510 insertions, 939 deletions
diff --git a/Documentation/DocBook/genericirq.tmpl b/Documentation/DocBook/genericirq.tmpl
index 1448b33fd222..fb10fd08c05c 100644
--- a/Documentation/DocBook/genericirq.tmpl
+++ b/Documentation/DocBook/genericirq.tmpl
@@ -28,7 +28,7 @@
28 </authorgroup> 28 </authorgroup>
29 29
30 <copyright> 30 <copyright>
31 <year>2005-2006</year> 31 <year>2005-2010</year>
32 <holder>Thomas Gleixner</holder> 32 <holder>Thomas Gleixner</holder>
33 </copyright> 33 </copyright>
34 <copyright> 34 <copyright>
@@ -100,6 +100,10 @@
100 <listitem><para>Edge type</para></listitem> 100 <listitem><para>Edge type</para></listitem>
101 <listitem><para>Simple type</para></listitem> 101 <listitem><para>Simple type</para></listitem>
102 </itemizedlist> 102 </itemizedlist>
103 During the implementation we identified another type:
104 <itemizedlist>
105 <listitem><para>Fast EOI type</para></listitem>
106 </itemizedlist>
103 In the SMP world of the __do_IRQ() super-handler another type 107 In the SMP world of the __do_IRQ() super-handler another type
104 was identified: 108 was identified:
105 <itemizedlist> 109 <itemizedlist>
@@ -153,6 +157,7 @@
153 is still available. This leads to a kind of duality for the time 157 is still available. This leads to a kind of duality for the time
154 being. Over time the new model should be used in more and more 158 being. Over time the new model should be used in more and more
155 architectures, as it enables smaller and cleaner IRQ subsystems. 159 architectures, as it enables smaller and cleaner IRQ subsystems.
160 It's deprecated for three years now and about to be removed.
156 </para> 161 </para>
157 </chapter> 162 </chapter>
158 <chapter id="bugs"> 163 <chapter id="bugs">
@@ -217,6 +222,7 @@
217 <itemizedlist> 222 <itemizedlist>
218 <listitem><para>handle_level_irq</para></listitem> 223 <listitem><para>handle_level_irq</para></listitem>
219 <listitem><para>handle_edge_irq</para></listitem> 224 <listitem><para>handle_edge_irq</para></listitem>
225 <listitem><para>handle_fasteoi_irq</para></listitem>
220 <listitem><para>handle_simple_irq</para></listitem> 226 <listitem><para>handle_simple_irq</para></listitem>
221 <listitem><para>handle_percpu_irq</para></listitem> 227 <listitem><para>handle_percpu_irq</para></listitem>
222 </itemizedlist> 228 </itemizedlist>
@@ -233,33 +239,33 @@
233 are used by the default flow implementations. 239 are used by the default flow implementations.
234 The following helper functions are implemented (simplified excerpt): 240 The following helper functions are implemented (simplified excerpt):
235 <programlisting> 241 <programlisting>
236default_enable(irq) 242default_enable(struct irq_data *data)
237{ 243{
238 desc->chip->unmask(irq); 244 desc->chip->irq_unmask(data);
239} 245}
240 246
241default_disable(irq) 247default_disable(struct irq_data *data)
242{ 248{
243 if (!delay_disable(irq)) 249 if (!delay_disable(data))
244 desc->chip->mask(irq); 250 desc->chip->irq_mask(data);
245} 251}
246 252
247default_ack(irq) 253default_ack(struct irq_data *data)
248{ 254{
249 chip->ack(irq); 255 chip->irq_ack(data);
250} 256}
251 257
252default_mask_ack(irq) 258default_mask_ack(struct irq_data *data)
253{ 259{
254 if (chip->mask_ack) { 260 if (chip->irq_mask_ack) {
255 chip->mask_ack(irq); 261 chip->irq_mask_ack(data);
256 } else { 262 } else {
257 chip->mask(irq); 263 chip->irq_mask(data);
258 chip->ack(irq); 264 chip->irq_ack(data);
259 } 265 }
260} 266}
261 267
262noop(irq) 268noop(struct irq_data *data))
263{ 269{
264} 270}
265 271
@@ -278,12 +284,27 @@ noop(irq)
278 <para> 284 <para>
279 The following control flow is implemented (simplified excerpt): 285 The following control flow is implemented (simplified excerpt):
280 <programlisting> 286 <programlisting>
281desc->chip->start(); 287desc->chip->irq_mask();
282handle_IRQ_event(desc->action); 288handle_IRQ_event(desc->action);
283desc->chip->end(); 289desc->chip->irq_unmask();
284 </programlisting> 290 </programlisting>
285 </para> 291 </para>
286 </sect3> 292 </sect3>
293 <sect3 id="Default_FASTEOI_IRQ_flow_handler">
294 <title>Default Fast EOI IRQ flow handler</title>
295 <para>
296 handle_fasteoi_irq provides a generic implementation
297 for interrupts, which only need an EOI at the end of
298 the handler
299 </para>
300 <para>
301 The following control flow is implemented (simplified excerpt):
302 <programlisting>
303handle_IRQ_event(desc->action);
304desc->chip->irq_eoi();
305 </programlisting>
306 </para>
307 </sect3>
287 <sect3 id="Default_Edge_IRQ_flow_handler"> 308 <sect3 id="Default_Edge_IRQ_flow_handler">
288 <title>Default Edge IRQ flow handler</title> 309 <title>Default Edge IRQ flow handler</title>
289 <para> 310 <para>
@@ -294,20 +315,19 @@ desc->chip->end();
294 The following control flow is implemented (simplified excerpt): 315 The following control flow is implemented (simplified excerpt):
295 <programlisting> 316 <programlisting>
296if (desc->status &amp; running) { 317if (desc->status &amp; running) {
297 desc->chip->hold(); 318 desc->chip->irq_mask();
298 desc->status |= pending | masked; 319 desc->status |= pending | masked;
299 return; 320 return;
300} 321}
301desc->chip->start(); 322desc->chip->irq_ack();
302desc->status |= running; 323desc->status |= running;
303do { 324do {
304 if (desc->status &amp; masked) 325 if (desc->status &amp; masked)
305 desc->chip->enable(); 326 desc->chip->irq_unmask();
306 desc->status &amp;= ~pending; 327 desc->status &amp;= ~pending;
307 handle_IRQ_event(desc->action); 328 handle_IRQ_event(desc->action);
308} while (status &amp; pending); 329} while (status &amp; pending);
309desc->status &amp;= ~running; 330desc->status &amp;= ~running;
310desc->chip->end();
311 </programlisting> 331 </programlisting>
312 </para> 332 </para>
313 </sect3> 333 </sect3>
@@ -342,9 +362,9 @@ handle_IRQ_event(desc->action);
342 <para> 362 <para>
343 The following control flow is implemented (simplified excerpt): 363 The following control flow is implemented (simplified excerpt):
344 <programlisting> 364 <programlisting>
345desc->chip->start();
346handle_IRQ_event(desc->action); 365handle_IRQ_event(desc->action);
347desc->chip->end(); 366if (desc->chip->irq_eoi)
367 desc->chip->irq_eoi();
348 </programlisting> 368 </programlisting>
349 </para> 369 </para>
350 </sect3> 370 </sect3>
@@ -375,8 +395,7 @@ desc->chip->end();
375 mechanism. (It's necessary to enable CONFIG_HARDIRQS_SW_RESEND when 395 mechanism. (It's necessary to enable CONFIG_HARDIRQS_SW_RESEND when
376 you want to use the delayed interrupt disable feature and your 396 you want to use the delayed interrupt disable feature and your
377 hardware is not capable of retriggering an interrupt.) 397 hardware is not capable of retriggering an interrupt.)
378 The delayed interrupt disable can be runtime enabled, per interrupt, 398 The delayed interrupt disable is not configurable.
379 by setting the IRQ_DELAYED_DISABLE flag in the irq_desc status field.
380 </para> 399 </para>
381 </sect2> 400 </sect2>
382 </sect1> 401 </sect1>
@@ -387,13 +406,13 @@ desc->chip->end();
387 contains all the direct chip relevant functions, which 406 contains all the direct chip relevant functions, which
388 can be utilized by the irq flow implementations. 407 can be utilized by the irq flow implementations.
389 <itemizedlist> 408 <itemizedlist>
390 <listitem><para>ack()</para></listitem> 409 <listitem><para>irq_ack()</para></listitem>
391 <listitem><para>mask_ack() - Optional, recommended for performance</para></listitem> 410 <listitem><para>irq_mask_ack() - Optional, recommended for performance</para></listitem>
392 <listitem><para>mask()</para></listitem> 411 <listitem><para>irq_mask()</para></listitem>
393 <listitem><para>unmask()</para></listitem> 412 <listitem><para>irq_unmask()</para></listitem>
394 <listitem><para>retrigger() - Optional</para></listitem> 413 <listitem><para>irq_retrigger() - Optional</para></listitem>
395 <listitem><para>set_type() - Optional</para></listitem> 414 <listitem><para>irq_set_type() - Optional</para></listitem>
396 <listitem><para>set_wake() - Optional</para></listitem> 415 <listitem><para>irq_set_wake() - Optional</para></listitem>
397 </itemizedlist> 416 </itemizedlist>
398 These primitives are strictly intended to mean what they say: ack means 417 These primitives are strictly intended to mean what they say: ack means
399 ACK, masking means masking of an IRQ line, etc. It is up to the flow 418 ACK, masking means masking of an IRQ line, etc. It is up to the flow
@@ -458,6 +477,7 @@ desc->chip->end();
458 <para> 477 <para>
459 This chapter contains the autogenerated documentation of the internal functions. 478 This chapter contains the autogenerated documentation of the internal functions.
460 </para> 479 </para>
480!Ikernel/irq/irqdesc.c
461!Ikernel/irq/handle.c 481!Ikernel/irq/handle.c
462!Ikernel/irq/chip.c 482!Ikernel/irq/chip.c
463 </chapter> 483 </chapter>
diff --git a/MAINTAINERS b/MAINTAINERS
index 668682d1f5fa..62c0acec298d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3210,6 +3210,12 @@ F: drivers/net/irda/
3210F: include/net/irda/ 3210F: include/net/irda/
3211F: net/irda/ 3211F: net/irda/
3212 3212
3213IRQ SUBSYSTEM
3214M: Thomas Gleixner <tglx@linutronix.de>
3215S: Maintained
3216T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core
3217F: kernel/irq/
3218
3213ISAPNP 3219ISAPNP
3214M: Jaroslav Kysela <perex@perex.cz> 3220M: Jaroslav Kysela <perex@perex.cz>
3215S: Maintained 3221S: Maintained
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index c0d5c3b3a760..5456d11d6ae4 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -157,10 +157,8 @@ void __init init_IRQ(void)
157 struct irq_desc *desc; 157 struct irq_desc *desc;
158 int irq; 158 int irq;
159 159
160 for (irq = 0; irq < nr_irqs; irq++) { 160 for (irq = 0; irq < nr_irqs; irq++)
161 desc = irq_to_desc_alloc_node(irq, 0);
162 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; 161 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
163 }
164 162
165 init_arch_irq(); 163 init_arch_irq();
166} 164}
@@ -169,7 +167,7 @@ void __init init_IRQ(void)
169int __init arch_probe_nr_irqs(void) 167int __init arch_probe_nr_irqs(void)
170{ 168{
171 nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; 169 nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS;
172 return 0; 170 return nr_irqs;
173} 171}
174#endif 172#endif
175 173
diff --git a/arch/arm/mach-bcmring/irq.c b/arch/arm/mach-bcmring/irq.c
index dc1c4939b0ce..e3152631eb37 100644
--- a/arch/arm/mach-bcmring/irq.c
+++ b/arch/arm/mach-bcmring/irq.c
@@ -67,21 +67,21 @@ static void bcmring_unmask_irq2(unsigned int irq)
67} 67}
68 68
69static struct irq_chip bcmring_irq0_chip = { 69static struct irq_chip bcmring_irq0_chip = {
70 .typename = "ARM-INTC0", 70 .name = "ARM-INTC0",
71 .ack = bcmring_mask_irq0, 71 .ack = bcmring_mask_irq0,
72 .mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */ 72 .mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */
73 .unmask = bcmring_unmask_irq0, /* unmaks an interrupt */ 73 .unmask = bcmring_unmask_irq0, /* unmaks an interrupt */
74}; 74};
75 75
76static struct irq_chip bcmring_irq1_chip = { 76static struct irq_chip bcmring_irq1_chip = {
77 .typename = "ARM-INTC1", 77 .name = "ARM-INTC1",
78 .ack = bcmring_mask_irq1, 78 .ack = bcmring_mask_irq1,
79 .mask = bcmring_mask_irq1, 79 .mask = bcmring_mask_irq1,
80 .unmask = bcmring_unmask_irq1, 80 .unmask = bcmring_unmask_irq1,
81}; 81};
82 82
83static struct irq_chip bcmring_irq2_chip = { 83static struct irq_chip bcmring_irq2_chip = {
84 .typename = "ARM-SINTC", 84 .name = "ARM-SINTC",
85 .ack = bcmring_mask_irq2, 85 .ack = bcmring_mask_irq2,
86 .mask = bcmring_mask_irq2, 86 .mask = bcmring_mask_irq2,
87 .unmask = bcmring_unmask_irq2, 87 .unmask = bcmring_unmask_irq2,
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index 3c71f776872c..7db26f1f082d 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p, void *v)
51 for_each_online_cpu(j) 51 for_each_online_cpu(j)
52 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 52 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
53#endif 53#endif
54 seq_printf(p, " %14s", irq_desc[i].chip->typename); 54 seq_printf(p, " %14s", irq_desc[i].chip->name);
55 seq_printf(p, " %s", action->name); 55 seq_printf(p, " %s", action->name);
56 56
57 for (action=action->next; action; action = action->next) 57 for (action=action->next; action; action = action->next)
diff --git a/arch/m32r/platforms/m32104ut/setup.c b/arch/m32r/platforms/m32104ut/setup.c
index 922fdfdadeaa..402a59d7219b 100644
--- a/arch/m32r/platforms/m32104ut/setup.c
+++ b/arch/m32r/platforms/m32104ut/setup.c
@@ -65,7 +65,7 @@ static void shutdown_m32104ut_irq(unsigned int irq)
65 65
66static struct irq_chip m32104ut_irq_type = 66static struct irq_chip m32104ut_irq_type =
67{ 67{
68 .typename = "M32104UT-IRQ", 68 .name = "M32104UT-IRQ",
69 .startup = startup_m32104ut_irq, 69 .startup = startup_m32104ut_irq,
70 .shutdown = shutdown_m32104ut_irq, 70 .shutdown = shutdown_m32104ut_irq,
71 .enable = enable_m32104ut_irq, 71 .enable = enable_m32104ut_irq,
diff --git a/arch/m32r/platforms/m32700ut/setup.c b/arch/m32r/platforms/m32700ut/setup.c
index 9c1bc7487c1e..80b1a026795a 100644
--- a/arch/m32r/platforms/m32700ut/setup.c
+++ b/arch/m32r/platforms/m32700ut/setup.c
@@ -71,7 +71,7 @@ static void shutdown_m32700ut_irq(unsigned int irq)
71 71
72static struct irq_chip m32700ut_irq_type = 72static struct irq_chip m32700ut_irq_type =
73{ 73{
74 .typename = "M32700UT-IRQ", 74 .name = "M32700UT-IRQ",
75 .startup = startup_m32700ut_irq, 75 .startup = startup_m32700ut_irq,
76 .shutdown = shutdown_m32700ut_irq, 76 .shutdown = shutdown_m32700ut_irq,
77 .enable = enable_m32700ut_irq, 77 .enable = enable_m32700ut_irq,
@@ -148,7 +148,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq)
148 148
149static struct irq_chip m32700ut_pld_irq_type = 149static struct irq_chip m32700ut_pld_irq_type =
150{ 150{
151 .typename = "M32700UT-PLD-IRQ", 151 .name = "M32700UT-PLD-IRQ",
152 .startup = startup_m32700ut_pld_irq, 152 .startup = startup_m32700ut_pld_irq,
153 .shutdown = shutdown_m32700ut_pld_irq, 153 .shutdown = shutdown_m32700ut_pld_irq,
154 .enable = enable_m32700ut_pld_irq, 154 .enable = enable_m32700ut_pld_irq,
@@ -217,7 +217,7 @@ static void shutdown_m32700ut_lanpld_irq(unsigned int irq)
217 217
218static struct irq_chip m32700ut_lanpld_irq_type = 218static struct irq_chip m32700ut_lanpld_irq_type =
219{ 219{
220 .typename = "M32700UT-PLD-LAN-IRQ", 220 .name = "M32700UT-PLD-LAN-IRQ",
221 .startup = startup_m32700ut_lanpld_irq, 221 .startup = startup_m32700ut_lanpld_irq,
222 .shutdown = shutdown_m32700ut_lanpld_irq, 222 .shutdown = shutdown_m32700ut_lanpld_irq,
223 .enable = enable_m32700ut_lanpld_irq, 223 .enable = enable_m32700ut_lanpld_irq,
@@ -286,7 +286,7 @@ static void shutdown_m32700ut_lcdpld_irq(unsigned int irq)
286 286
287static struct irq_chip m32700ut_lcdpld_irq_type = 287static struct irq_chip m32700ut_lcdpld_irq_type =
288{ 288{
289 .typename = "M32700UT-PLD-LCD-IRQ", 289 .name = "M32700UT-PLD-LCD-IRQ",
290 .startup = startup_m32700ut_lcdpld_irq, 290 .startup = startup_m32700ut_lcdpld_irq,
291 .shutdown = shutdown_m32700ut_lcdpld_irq, 291 .shutdown = shutdown_m32700ut_lcdpld_irq,
292 .enable = enable_m32700ut_lcdpld_irq, 292 .enable = enable_m32700ut_lcdpld_irq,
diff --git a/arch/m32r/platforms/mappi/setup.c b/arch/m32r/platforms/mappi/setup.c
index fb4b17799b66..ea00c84d6b1b 100644
--- a/arch/m32r/platforms/mappi/setup.c
+++ b/arch/m32r/platforms/mappi/setup.c
@@ -65,7 +65,7 @@ static void shutdown_mappi_irq(unsigned int irq)
65 65
66static struct irq_chip mappi_irq_type = 66static struct irq_chip mappi_irq_type =
67{ 67{
68 .typename = "MAPPI-IRQ", 68 .name = "MAPPI-IRQ",
69 .startup = startup_mappi_irq, 69 .startup = startup_mappi_irq,
70 .shutdown = shutdown_mappi_irq, 70 .shutdown = shutdown_mappi_irq,
71 .enable = enable_mappi_irq, 71 .enable = enable_mappi_irq,
diff --git a/arch/m32r/platforms/mappi2/setup.c b/arch/m32r/platforms/mappi2/setup.c
index 6a65eda0a056..c049376d0270 100644
--- a/arch/m32r/platforms/mappi2/setup.c
+++ b/arch/m32r/platforms/mappi2/setup.c
@@ -72,7 +72,7 @@ static void shutdown_mappi2_irq(unsigned int irq)
72 72
73static struct irq_chip mappi2_irq_type = 73static struct irq_chip mappi2_irq_type =
74{ 74{
75 .typename = "MAPPI2-IRQ", 75 .name = "MAPPI2-IRQ",
76 .startup = startup_mappi2_irq, 76 .startup = startup_mappi2_irq,
77 .shutdown = shutdown_mappi2_irq, 77 .shutdown = shutdown_mappi2_irq,
78 .enable = enable_mappi2_irq, 78 .enable = enable_mappi2_irq,
diff --git a/arch/m32r/platforms/mappi3/setup.c b/arch/m32r/platforms/mappi3/setup.c
index 9c337aeac94b..882de25c6e8c 100644
--- a/arch/m32r/platforms/mappi3/setup.c
+++ b/arch/m32r/platforms/mappi3/setup.c
@@ -72,7 +72,7 @@ static void shutdown_mappi3_irq(unsigned int irq)
72 72
73static struct irq_chip mappi3_irq_type = 73static struct irq_chip mappi3_irq_type =
74{ 74{
75 .typename = "MAPPI3-IRQ", 75 .name = "MAPPI3-IRQ",
76 .startup = startup_mappi3_irq, 76 .startup = startup_mappi3_irq,
77 .shutdown = shutdown_mappi3_irq, 77 .shutdown = shutdown_mappi3_irq,
78 .enable = enable_mappi3_irq, 78 .enable = enable_mappi3_irq,
diff --git a/arch/m32r/platforms/oaks32r/setup.c b/arch/m32r/platforms/oaks32r/setup.c
index ed865741c38d..d11d93bf74f5 100644
--- a/arch/m32r/platforms/oaks32r/setup.c
+++ b/arch/m32r/platforms/oaks32r/setup.c
@@ -63,7 +63,7 @@ static void shutdown_oaks32r_irq(unsigned int irq)
63 63
64static struct irq_chip oaks32r_irq_type = 64static struct irq_chip oaks32r_irq_type =
65{ 65{
66 .typename = "OAKS32R-IRQ", 66 .name = "OAKS32R-IRQ",
67 .startup = startup_oaks32r_irq, 67 .startup = startup_oaks32r_irq,
68 .shutdown = shutdown_oaks32r_irq, 68 .shutdown = shutdown_oaks32r_irq,
69 .enable = enable_oaks32r_irq, 69 .enable = enable_oaks32r_irq,
diff --git a/arch/m32r/platforms/opsput/setup.c b/arch/m32r/platforms/opsput/setup.c
index 80d680657019..5f3402a2fbaf 100644
--- a/arch/m32r/platforms/opsput/setup.c
+++ b/arch/m32r/platforms/opsput/setup.c
@@ -72,7 +72,7 @@ static void shutdown_opsput_irq(unsigned int irq)
72 72
73static struct irq_chip opsput_irq_type = 73static struct irq_chip opsput_irq_type =
74{ 74{
75 .typename = "OPSPUT-IRQ", 75 .name = "OPSPUT-IRQ",
76 .startup = startup_opsput_irq, 76 .startup = startup_opsput_irq,
77 .shutdown = shutdown_opsput_irq, 77 .shutdown = shutdown_opsput_irq,
78 .enable = enable_opsput_irq, 78 .enable = enable_opsput_irq,
@@ -149,7 +149,7 @@ static void shutdown_opsput_pld_irq(unsigned int irq)
149 149
150static struct irq_chip opsput_pld_irq_type = 150static struct irq_chip opsput_pld_irq_type =
151{ 151{
152 .typename = "OPSPUT-PLD-IRQ", 152 .name = "OPSPUT-PLD-IRQ",
153 .startup = startup_opsput_pld_irq, 153 .startup = startup_opsput_pld_irq,
154 .shutdown = shutdown_opsput_pld_irq, 154 .shutdown = shutdown_opsput_pld_irq,
155 .enable = enable_opsput_pld_irq, 155 .enable = enable_opsput_pld_irq,
@@ -218,7 +218,7 @@ static void shutdown_opsput_lanpld_irq(unsigned int irq)
218 218
219static struct irq_chip opsput_lanpld_irq_type = 219static struct irq_chip opsput_lanpld_irq_type =
220{ 220{
221 .typename = "OPSPUT-PLD-LAN-IRQ", 221 .name = "OPSPUT-PLD-LAN-IRQ",
222 .startup = startup_opsput_lanpld_irq, 222 .startup = startup_opsput_lanpld_irq,
223 .shutdown = shutdown_opsput_lanpld_irq, 223 .shutdown = shutdown_opsput_lanpld_irq,
224 .enable = enable_opsput_lanpld_irq, 224 .enable = enable_opsput_lanpld_irq,
diff --git a/arch/m32r/platforms/usrv/setup.c b/arch/m32r/platforms/usrv/setup.c
index 757302660af8..1beac7a51ed4 100644
--- a/arch/m32r/platforms/usrv/setup.c
+++ b/arch/m32r/platforms/usrv/setup.c
@@ -63,7 +63,7 @@ static void shutdown_mappi_irq(unsigned int irq)
63 63
64static struct irq_chip mappi_irq_type = 64static struct irq_chip mappi_irq_type =
65{ 65{
66 .typename = "M32700-IRQ", 66 .name = "M32700-IRQ",
67 .startup = startup_mappi_irq, 67 .startup = startup_mappi_irq,
68 .shutdown = shutdown_mappi_irq, 68 .shutdown = shutdown_mappi_irq,
69 .enable = enable_mappi_irq, 69 .enable = enable_mappi_irq,
@@ -136,7 +136,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq)
136 136
137static struct irq_chip m32700ut_pld_irq_type = 137static struct irq_chip m32700ut_pld_irq_type =
138{ 138{
139 .typename = "USRV-PLD-IRQ", 139 .name = "USRV-PLD-IRQ",
140 .startup = startup_m32700ut_pld_irq, 140 .startup = startup_m32700ut_pld_irq,
141 .shutdown = shutdown_m32700ut_pld_irq, 141 .shutdown = shutdown_m32700ut_pld_irq,
142 .enable = enable_m32700ut_pld_irq, 142 .enable = enable_m32700ut_pld_irq,
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 257de1f0692b..ae5bac39b896 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -290,7 +290,7 @@ void __init init_IRQ(void)
290int __init arch_probe_nr_irqs(void) 290int __init arch_probe_nr_irqs(void)
291{ 291{
292 nr_irqs = sh_mv.mv_nr_irqs; 292 nr_irqs = sh_mv.mv_nr_irqs;
293 return 0; 293 return NR_IRQS_LEGACY;
294} 294}
295#endif 295#endif
296 296
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 596c60086930..9a27d563fc30 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -208,7 +208,7 @@ static void tile_irq_chip_eoi(unsigned int irq)
208} 208}
209 209
210static struct irq_chip tile_irq_chip = { 210static struct irq_chip tile_irq_chip = {
211 .typename = "tile_irq_chip", 211 .name = "tile_irq_chip",
212 .ack = tile_irq_chip_ack, 212 .ack = tile_irq_chip_ack,
213 .eoi = tile_irq_chip_eoi, 213 .eoi = tile_irq_chip_eoi,
214 .mask = tile_irq_chip_mask, 214 .mask = tile_irq_chip_mask,
@@ -288,7 +288,7 @@ int show_interrupts(struct seq_file *p, void *v)
288 for_each_online_cpu(j) 288 for_each_online_cpu(j)
289 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 289 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
290#endif 290#endif
291 seq_printf(p, " %14s", irq_desc[i].chip->typename); 291 seq_printf(p, " %14s", irq_desc[i].chip->name);
292 seq_printf(p, " %s", action->name); 292 seq_printf(p, " %s", action->name);
293 293
294 for (action = action->next; action; action = action->next) 294 for (action = action->next; action; action = action->next)
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index a3f0b04d7101..a746e3037a5b 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -46,7 +46,7 @@ int show_interrupts(struct seq_file *p, void *v)
46 for_each_online_cpu(j) 46 for_each_online_cpu(j)
47 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 47 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
48#endif 48#endif
49 seq_printf(p, " %14s", irq_desc[i].chip->typename); 49 seq_printf(p, " %14s", irq_desc[i].chip->name);
50 seq_printf(p, " %s", action->name); 50 seq_printf(p, " %s", action->name);
51 51
52 for (action=action->next; action; action = action->next) 52 for (action=action->next; action; action = action->next)
@@ -369,7 +369,7 @@ static void dummy(unsigned int irq)
369 369
370/* This is used for everything else than the timer. */ 370/* This is used for everything else than the timer. */
371static struct irq_chip normal_irq_type = { 371static struct irq_chip normal_irq_type = {
372 .typename = "SIGIO", 372 .name = "SIGIO",
373 .release = free_irq_by_irq_and_dev, 373 .release = free_irq_by_irq_and_dev,
374 .disable = dummy, 374 .disable = dummy,
375 .enable = dummy, 375 .enable = dummy,
@@ -378,7 +378,7 @@ static struct irq_chip normal_irq_type = {
378}; 378};
379 379
380static struct irq_chip SIGVTALRM_irq_type = { 380static struct irq_chip SIGVTALRM_irq_type = {
381 .typename = "SIGVTALRM", 381 .name = "SIGVTALRM",
382 .release = free_irq_by_irq_and_dev, 382 .release = free_irq_by_irq_and_dev,
383 .shutdown = dummy, /* never called */ 383 .shutdown = dummy, /* never called */
384 .disable = dummy, 384 .disable = dummy,
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cea0cd9a316f..3ec657f7ee70 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -59,6 +59,11 @@ config X86
59 select ANON_INODES 59 select ANON_INODES
60 select HAVE_ARCH_KMEMCHECK 60 select HAVE_ARCH_KMEMCHECK
61 select HAVE_USER_RETURN_NOTIFIER 61 select HAVE_USER_RETURN_NOTIFIER
62 select HAVE_GENERIC_HARDIRQS
63 select HAVE_SPARSE_IRQ
64 select NUMA_IRQ_DESC if (SPARSE_IRQ && NUMA)
65 select GENERIC_IRQ_PROBE
66 select GENERIC_PENDING_IRQ if SMP
62 67
63config INSTRUCTION_DECODER 68config INSTRUCTION_DECODER
64 def_bool (KPROBES || PERF_EVENTS) 69 def_bool (KPROBES || PERF_EVENTS)
@@ -200,20 +205,6 @@ config HAVE_INTEL_TXT
200 def_bool y 205 def_bool y
201 depends on EXPERIMENTAL && DMAR && ACPI 206 depends on EXPERIMENTAL && DMAR && ACPI
202 207
203# Use the generic interrupt handling code in kernel/irq/:
204config GENERIC_HARDIRQS
205 def_bool y
206
207config GENERIC_HARDIRQS_NO__DO_IRQ
208 def_bool y
209
210config GENERIC_IRQ_PROBE
211 def_bool y
212
213config GENERIC_PENDING_IRQ
214 def_bool y
215 depends on GENERIC_HARDIRQS && SMP
216
217config USE_GENERIC_SMP_HELPERS 208config USE_GENERIC_SMP_HELPERS
218 def_bool y 209 def_bool y
219 depends on SMP 210 depends on SMP
@@ -296,23 +287,6 @@ config X86_X2APIC
296 287
297 If you don't know what to do here, say N. 288 If you don't know what to do here, say N.
298 289
299config SPARSE_IRQ
300 bool "Support sparse irq numbering"
301 depends on PCI_MSI || HT_IRQ
302 ---help---
303 This enables support for sparse irqs. This is useful for distro
304 kernels that want to define a high CONFIG_NR_CPUS value but still
305 want to have low kernel memory footprint on smaller machines.
306
307 ( Sparse IRQs can also be beneficial on NUMA boxes, as they spread
308 out the irq_desc[] array in a more NUMA-friendly way. )
309
310 If you don't know what to do here, say N.
311
312config NUMA_IRQ_DESC
313 def_bool y
314 depends on SPARSE_IRQ && NUMA
315
316config X86_MPPARSE 290config X86_MPPARSE
317 bool "Enable MPS table" if ACPI 291 bool "Enable MPS table" if ACPI
318 default y 292 default y
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h
index a69b1ac9eaf8..2fefa501d3ba 100644
--- a/arch/x86/include/asm/apb_timer.h
+++ b/arch/x86/include/asm/apb_timer.h
@@ -54,7 +54,6 @@ extern struct clock_event_device *global_clock_event;
54extern unsigned long apbt_quick_calibrate(void); 54extern unsigned long apbt_quick_calibrate(void);
55extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); 55extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
56extern void apbt_setup_secondary_clock(void); 56extern void apbt_setup_secondary_clock(void);
57extern unsigned int boot_cpu_id;
58 57
59extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); 58extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
60extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr); 59extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr);
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index b185091bf19c..4fab24de26b1 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -32,6 +32,5 @@ extern void arch_unregister_cpu(int);
32 32
33DECLARE_PER_CPU(int, cpu_state); 33DECLARE_PER_CPU(int, cpu_state);
34 34
35extern unsigned int boot_cpu_id;
36 35
37#endif /* _ASM_X86_CPU_H */ 36#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index f275e2244505..8d841505344e 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -3,4 +3,31 @@
3 3
4#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) 4#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
5 5
6#ifdef CONFIG_INTR_REMAP
7static inline void prepare_irte(struct irte *irte, int vector,
8 unsigned int dest)
9{
10 memset(irte, 0, sizeof(*irte));
11
12 irte->present = 1;
13 irte->dst_mode = apic->irq_dest_mode;
14 /*
15 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
16 * actual level or edge trigger will be setup in the IO-APIC
17 * RTE. This will help simplify level triggered irq migration.
18 * For more details, see the comments (in io_apic.c) explainig IO-APIC
19 * irq migration in the presence of interrupt-remapping.
20 */
21 irte->trigger_mode = 0;
22 irte->dlvry_mode = apic->irq_delivery_mode;
23 irte->vector = vector;
24 irte->dest_id = IRTE_DEST(dest);
25 irte->redir_hint = 1;
26}
27#else
28static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
29{
30}
31#endif
32
6#endif /* _ASM_X86_IRQ_REMAPPING_H */ 33#endif /* _ASM_X86_IRQ_REMAPPING_H */
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 8dd77800ff5d..08f75fb4f509 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -343,7 +343,7 @@ void apbt_setup_secondary_clock(void)
343 343
344 /* Don't register boot CPU clockevent */ 344 /* Don't register boot CPU clockevent */
345 cpu = smp_processor_id(); 345 cpu = smp_processor_id();
346 if (cpu == boot_cpu_id) 346 if (!cpu)
347 return; 347 return;
348 /* 348 /*
349 * We need to calculate the scaled math multiplication factor for 349 * We need to calculate the scaled math multiplication factor for
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index e3b534cda49a..8cf86fb3b4e3 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1665,10 +1665,7 @@ int __init APIC_init_uniprocessor(void)
1665 } 1665 }
1666#endif 1666#endif
1667 1667
1668#ifndef CONFIG_SMP
1669 enable_IR_x2apic();
1670 default_setup_apic_routing(); 1668 default_setup_apic_routing();
1671#endif
1672 1669
1673 verify_local_APIC(); 1670 verify_local_APIC();
1674 connect_bsp_APIC(); 1671 connect_bsp_APIC();
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 5c5b8f3dddb5..7556eb7a1a47 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -162,7 +162,7 @@ int __init arch_early_irq_init(void)
162 162
163 cfg = irq_cfgx; 163 cfg = irq_cfgx;
164 count = ARRAY_SIZE(irq_cfgx); 164 count = ARRAY_SIZE(irq_cfgx);
165 node= cpu_to_node(boot_cpu_id); 165 node = cpu_to_node(0);
166 166
167 for (i = 0; i < count; i++) { 167 for (i = 0; i < count; i++) {
168 desc = irq_to_desc(i); 168 desc = irq_to_desc(i);
@@ -1382,21 +1382,7 @@ int setup_ioapic_entry(int apic_id, int irq,
1382 if (index < 0) 1382 if (index < 0)
1383 panic("Failed to allocate IRTE for ioapic %d\n", apic_id); 1383 panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
1384 1384
1385 memset(&irte, 0, sizeof(irte)); 1385 prepare_irte(&irte, vector, destination);
1386
1387 irte.present = 1;
1388 irte.dst_mode = apic->irq_dest_mode;
1389 /*
1390 * Trigger mode in the IRTE will always be edge, and the
1391 * actual level or edge trigger will be setup in the IO-APIC
1392 * RTE. This will help simplify level triggered irq migration.
1393 * For more details, see the comments above explainig IO-APIC
1394 * irq migration in the presence of interrupt-remapping.
1395 */
1396 irte.trigger_mode = 0;
1397 irte.dlvry_mode = apic->irq_delivery_mode;
1398 irte.vector = vector;
1399 irte.dest_id = IRTE_DEST(destination);
1400 1386
1401 /* Set source-id of interrupt request */ 1387 /* Set source-id of interrupt request */
1402 set_ioapic_sid(&irte, apic_id); 1388 set_ioapic_sid(&irte, apic_id);
@@ -1488,7 +1474,7 @@ static void __init setup_IO_APIC_irqs(void)
1488 int notcon = 0; 1474 int notcon = 0;
1489 struct irq_desc *desc; 1475 struct irq_desc *desc;
1490 struct irq_cfg *cfg; 1476 struct irq_cfg *cfg;
1491 int node = cpu_to_node(boot_cpu_id); 1477 int node = cpu_to_node(0);
1492 1478
1493 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1479 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1494 1480
@@ -1553,7 +1539,7 @@ static void __init setup_IO_APIC_irqs(void)
1553void setup_IO_APIC_irq_extra(u32 gsi) 1539void setup_IO_APIC_irq_extra(u32 gsi)
1554{ 1540{
1555 int apic_id = 0, pin, idx, irq; 1541 int apic_id = 0, pin, idx, irq;
1556 int node = cpu_to_node(boot_cpu_id); 1542 int node = cpu_to_node(0);
1557 struct irq_desc *desc; 1543 struct irq_desc *desc;
1558 struct irq_cfg *cfg; 1544 struct irq_cfg *cfg;
1559 1545
@@ -2932,7 +2918,7 @@ static inline void __init check_timer(void)
2932{ 2918{
2933 struct irq_desc *desc = irq_to_desc(0); 2919 struct irq_desc *desc = irq_to_desc(0);
2934 struct irq_cfg *cfg = desc->chip_data; 2920 struct irq_cfg *cfg = desc->chip_data;
2935 int node = cpu_to_node(boot_cpu_id); 2921 int node = cpu_to_node(0);
2936 int apic1, pin1, apic2, pin2; 2922 int apic1, pin1, apic2, pin2;
2937 unsigned long flags; 2923 unsigned long flags;
2938 int no_pin1 = 0; 2924 int no_pin1 = 0;
@@ -3286,7 +3272,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
3286 3272
3287int create_irq(void) 3273int create_irq(void)
3288{ 3274{
3289 int node = cpu_to_node(boot_cpu_id); 3275 int node = cpu_to_node(0);
3290 unsigned int irq_want; 3276 unsigned int irq_want;
3291 int irq; 3277 int irq;
3292 3278
@@ -3340,14 +3326,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3340 ir_index = map_irq_to_irte_handle(irq, &sub_handle); 3326 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
3341 BUG_ON(ir_index == -1); 3327 BUG_ON(ir_index == -1);
3342 3328
3343 memset (&irte, 0, sizeof(irte)); 3329 prepare_irte(&irte, cfg->vector, dest);
3344
3345 irte.present = 1;
3346 irte.dst_mode = apic->irq_dest_mode;
3347 irte.trigger_mode = 0; /* edge */
3348 irte.dlvry_mode = apic->irq_delivery_mode;
3349 irte.vector = cfg->vector;
3350 irte.dest_id = IRTE_DEST(dest);
3351 3330
3352 /* Set source-id of interrupt request */ 3331 /* Set source-id of interrupt request */
3353 if (pdev) 3332 if (pdev)
@@ -3885,7 +3864,7 @@ int __init arch_probe_nr_irqs(void)
3885 if (nr < nr_irqs) 3864 if (nr < nr_irqs)
3886 nr_irqs = nr; 3865 nr_irqs = nr;
3887 3866
3888 return 0; 3867 return NR_IRQS_LEGACY;
3889} 3868}
3890#endif 3869#endif
3891 3870
@@ -3908,7 +3887,7 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq,
3908 if (dev) 3887 if (dev)
3909 node = dev_to_node(dev); 3888 node = dev_to_node(dev);
3910 else 3889 else
3911 node = cpu_to_node(boot_cpu_id); 3890 node = cpu_to_node(0);
3912 3891
3913 desc = irq_to_desc_alloc_node(irq, node); 3892 desc = irq_to_desc_alloc_node(irq, node);
3914 if (!desc) { 3893 if (!desc) {
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index 83e9be4778e2..f9e4e6a54073 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -54,6 +54,9 @@ static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
54 */ 54 */
55void __init default_setup_apic_routing(void) 55void __init default_setup_apic_routing(void)
56{ 56{
57
58 enable_IR_x2apic();
59
57#ifdef CONFIG_X86_X2APIC 60#ifdef CONFIG_X86_X2APIC
58 if (x2apic_mode 61 if (x2apic_mode
59#ifdef CONFIG_X86_UV 62#ifdef CONFIG_X86_UV
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index ba5f62f45f01..a8b4d91b8394 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -148,7 +148,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
148{ 148{
149#ifdef CONFIG_SMP 149#ifdef CONFIG_SMP
150 /* calling is from identify_secondary_cpu() ? */ 150 /* calling is from identify_secondary_cpu() ? */
151 if (c->cpu_index == boot_cpu_id) 151 if (!c->cpu_index)
152 return; 152 return;
153 153
154 /* 154 /*
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index f2f9ac7da25c..15c671385f59 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -665,7 +665,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
665 this_cpu->c_early_init(c); 665 this_cpu->c_early_init(c);
666 666
667#ifdef CONFIG_SMP 667#ifdef CONFIG_SMP
668 c->cpu_index = boot_cpu_id; 668 c->cpu_index = 0;
669#endif 669#endif
670 filter_cpuid_features(c, false); 670 filter_cpuid_features(c, false);
671} 671}
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index b4389441efbb..695f17731e23 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -170,7 +170,7 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
170{ 170{
171#ifdef CONFIG_SMP 171#ifdef CONFIG_SMP
172 /* calling is from identify_secondary_cpu() ? */ 172 /* calling is from identify_secondary_cpu() ? */
173 if (c->cpu_index == boot_cpu_id) 173 if (!c->cpu_index)
174 return; 174 return;
175 175
176 /* 176 /*
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index ebdb85cf2686..76b8cd953dee 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -97,7 +97,6 @@ static void __init nvidia_bugs(int num, int slot, int func)
97} 97}
98 98
99#if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC) 99#if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC)
100#if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC)
101static u32 __init ati_ixp4x0_rev(int num, int slot, int func) 100static u32 __init ati_ixp4x0_rev(int num, int slot, int func)
102{ 101{
103 u32 d; 102 u32 d;
@@ -115,7 +114,6 @@ static u32 __init ati_ixp4x0_rev(int num, int slot, int func)
115 d &= 0xff; 114 d &= 0xff;
116 return d; 115 return d;
117} 116}
118#endif
119 117
120static void __init ati_bugs(int num, int slot, int func) 118static void __init ati_bugs(int num, int slot, int func)
121{ 119{
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 035c8c529181..b3ea9db39db6 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -36,7 +36,7 @@ static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
36 if (!page) 36 if (!page)
37 goto out; 37 goto out;
38 pud = (pud_t *)page_address(page); 38 pud = (pud_t *)page_address(page);
39 memset(pud, 0, PAGE_SIZE); 39 clear_page(pud);
40 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE)); 40 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
41 } 41 }
42 pud = pud_offset(pgd, addr); 42 pud = pud_offset(pgd, addr);
@@ -45,7 +45,7 @@ static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
45 if (!page) 45 if (!page)
46 goto out; 46 goto out;
47 pmd = (pmd_t *)page_address(page); 47 pmd = (pmd_t *)page_address(page);
48 memset(pmd, 0, PAGE_SIZE); 48 clear_page(pmd);
49 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 49 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
50 } 50 }
51 pmd = pmd_offset(pud, addr); 51 pmd = pmd_offset(pud, addr);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index e3af342fe83a..7a4cf14223ba 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -84,7 +84,7 @@ static int __init reboot_setup(char *str)
84 } 84 }
85 /* we will leave sorting out the final value 85 /* we will leave sorting out the final value
86 when we are ready to reboot, since we might not 86 when we are ready to reboot, since we might not
87 have set up boot_cpu_id or smp_num_cpu */ 87 have detected BSP APIC ID or smp_num_cpu */
88 break; 88 break;
89#endif /* CONFIG_SMP */ 89#endif /* CONFIG_SMP */
90 90
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c3a4fbb2b996..7d5ee08c982d 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -125,7 +125,6 @@ unsigned long max_pfn_mapped;
125RESERVE_BRK(dmi_alloc, 65536); 125RESERVE_BRK(dmi_alloc, 65536);
126#endif 126#endif
127 127
128unsigned int boot_cpu_id __read_mostly;
129 128
130static __initdata unsigned long _brk_start = (unsigned long)__brk_base; 129static __initdata unsigned long _brk_start = (unsigned long)__brk_base;
131unsigned long _brk_end = (unsigned long)__brk_base; 130unsigned long _brk_end = (unsigned long)__brk_base;
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index a60df9ae6454..2335c15c93a4 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -253,7 +253,7 @@ void __init setup_per_cpu_areas(void)
253 * Up to this point, the boot CPU has been using .init.data 253 * Up to this point, the boot CPU has been using .init.data
254 * area. Reload any changed state for the boot CPU. 254 * area. Reload any changed state for the boot CPU.
255 */ 255 */
256 if (cpu == boot_cpu_id) 256 if (!cpu)
257 switch_to_new_gdt(cpu); 257 switch_to_new_gdt(cpu);
258 } 258 }
259 259
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8b3bfc4dd708..87a8c6b00f8d 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1109,8 +1109,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1109 } 1109 }
1110 set_cpu_sibling_map(0); 1110 set_cpu_sibling_map(0);
1111 1111
1112 enable_IR_x2apic();
1113 default_setup_apic_routing();
1114 1112
1115 if (smp_sanity_check(max_cpus) < 0) { 1113 if (smp_sanity_check(max_cpus) < 0) {
1116 printk(KERN_INFO "SMP disabled\n"); 1114 printk(KERN_INFO "SMP disabled\n");
@@ -1118,6 +1116,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1118 goto out; 1116 goto out;
1119 } 1117 }
1120 1118
1119 default_setup_apic_routing();
1120
1121 preempt_disable(); 1121 preempt_disable();
1122 if (read_apic_id() != boot_cpu_physical_apicid) { 1122 if (read_apic_id() != boot_cpu_physical_apicid) {
1123 panic("Boot APIC ID in local APIC unexpected (%d vs %d)", 1123 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 77d8c0f4817d..22b06f7660f4 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1056,14 +1056,13 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
1056 1056
1057 vcpu->arch.apic = apic; 1057 vcpu->arch.apic = apic;
1058 1058
1059 apic->regs_page = alloc_page(GFP_KERNEL); 1059 apic->regs_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
1060 if (apic->regs_page == NULL) { 1060 if (apic->regs_page == NULL) {
1061 printk(KERN_ERR "malloc apic regs error for vcpu %x\n", 1061 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
1062 vcpu->vcpu_id); 1062 vcpu->vcpu_id);
1063 goto nomem_free_apic; 1063 goto nomem_free_apic;
1064 } 1064 }
1065 apic->regs = page_address(apic->regs_page); 1065 apic->regs = page_address(apic->regs_page);
1066 memset(apic->regs, 0, PAGE_SIZE);
1067 apic->vcpu = vcpu; 1066 apic->vcpu = vcpu;
1068 1067
1069 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, 1068 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index bca79091b9d6..558f2d332076 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -67,7 +67,7 @@ static __init void *alloc_low_page(void)
67 panic("alloc_low_page: ran out of memory"); 67 panic("alloc_low_page: ran out of memory");
68 68
69 adr = __va(pfn * PAGE_SIZE); 69 adr = __va(pfn * PAGE_SIZE);
70 memset(adr, 0, PAGE_SIZE); 70 clear_page(adr);
71 return adr; 71 return adr;
72} 72}
73 73
@@ -558,7 +558,7 @@ char swsusp_pg_dir[PAGE_SIZE]
558 558
559static inline void save_pg_dir(void) 559static inline void save_pg_dir(void)
560{ 560{
561 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE); 561 copy_page(swsusp_pg_dir, swapper_pg_dir);
562} 562}
563#else /* !CONFIG_ACPI_SLEEP */ 563#else /* !CONFIG_ACPI_SLEEP */
564static inline void save_pg_dir(void) 564static inline void save_pg_dir(void)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 9a6674689a20..7c48ad4faca3 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -293,7 +293,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
293 panic("alloc_low_page: ran out of memory"); 293 panic("alloc_low_page: ran out of memory");
294 294
295 adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); 295 adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
296 memset(adr, 0, PAGE_SIZE); 296 clear_page(adr);
297 *phys = pfn * PAGE_SIZE; 297 *phys = pfn * PAGE_SIZE;
298 return adr; 298 return adr;
299} 299}
diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c
index 970ed579d4e4..240f86462a83 100644
--- a/arch/x86/mm/k8topology_64.c
+++ b/arch/x86/mm/k8topology_64.c
@@ -54,8 +54,8 @@ static __init int find_northbridge(void)
54static __init void early_get_boot_cpu_id(void) 54static __init void early_get_boot_cpu_id(void)
55{ 55{
56 /* 56 /*
57 * need to get boot_cpu_id so can use that to create apicid_to_node 57 * need to get the APIC ID of the BSP so can use that to
58 * in k8_scan_nodes() 58 * create apicid_to_node in k8_scan_nodes()
59 */ 59 */
60#ifdef CONFIG_X86_MPPARSE 60#ifdef CONFIG_X86_MPPARSE
61 /* 61 /*
@@ -212,7 +212,7 @@ int __init k8_scan_nodes(void)
212 bits = boot_cpu_data.x86_coreid_bits; 212 bits = boot_cpu_data.x86_coreid_bits;
213 cores = (1<<bits); 213 cores = (1<<bits);
214 apicid_base = 0; 214 apicid_base = 0;
215 /* need to get boot_cpu_id early for system with apicid lifting */ 215 /* get the APIC ID of the BSP early for systems with apicid lifting */
216 early_get_boot_cpu_id(); 216 early_get_boot_cpu_id();
217 if (boot_cpu_physical_apicid > 0) { 217 if (boot_cpu_physical_apicid > 0) {
218 pr_info("BSP APIC ID: %02x\n", boot_cpu_physical_apicid); 218 pr_info("BSP APIC ID: %02x\n", boot_cpu_physical_apicid);
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index c64a5d387de5..87508886cbbd 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -92,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v)
92 for_each_online_cpu(j) 92 for_each_online_cpu(j)
93 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 93 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
94#endif 94#endif
95 seq_printf(p, " %14s", irq_desc[i].chip->typename); 95 seq_printf(p, " %14s", irq_desc[i].chip->name);
96 seq_printf(p, " %s", action->name); 96 seq_printf(p, " %s", action->name);
97 97
98 for (action=action->next; action; action = action->next) 98 for (action=action->next; action; action = action->next)
diff --git a/drivers/isdn/act2000/act2000.h b/drivers/isdn/act2000/act2000.h
index d4c50512a1ff..88c9423500d8 100644
--- a/drivers/isdn/act2000/act2000.h
+++ b/drivers/isdn/act2000/act2000.h
@@ -141,9 +141,9 @@ typedef struct irq_data_isa {
141 __u8 rcvhdr[8]; 141 __u8 rcvhdr[8];
142} irq_data_isa; 142} irq_data_isa;
143 143
144typedef union irq_data { 144typedef union act2000_irq_data {
145 irq_data_isa isa; 145 irq_data_isa isa;
146} irq_data; 146} act2000_irq_data;
147 147
148/* 148/*
149 * Per card driver data 149 * Per card driver data
@@ -176,7 +176,7 @@ typedef struct act2000_card {
176 char *status_buf_read; 176 char *status_buf_read;
177 char *status_buf_write; 177 char *status_buf_write;
178 char *status_buf_end; 178 char *status_buf_end;
179 irq_data idat; /* Data used for IRQ handler */ 179 act2000_irq_data idat; /* Data used for IRQ handler */
180 isdn_if interface; /* Interface to upper layer */ 180 isdn_if interface; /* Interface to upper layer */
181 char regname[35]; /* Name used for request_region */ 181 char regname[35]; /* Name used for request_region */
182} act2000_card; 182} act2000_card;
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 6f9afcd5ca4e..b133378d4dc9 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -801,6 +801,16 @@ static void closecard(int cardnr)
801 ll_unload(csta); 801 ll_unload(csta);
802} 802}
803 803
804static irqreturn_t card_irq(int intno, void *dev_id)
805{
806 struct IsdnCardState *cs = dev_id;
807 irqreturn_t ret = cs->irq_func(intno, cs);
808
809 if (ret == IRQ_HANDLED)
810 cs->irq_cnt++;
811 return ret;
812}
813
804static int init_card(struct IsdnCardState *cs) 814static int init_card(struct IsdnCardState *cs)
805{ 815{
806 int irq_cnt, cnt = 3, ret; 816 int irq_cnt, cnt = 3, ret;
@@ -809,10 +819,10 @@ static int init_card(struct IsdnCardState *cs)
809 ret = cs->cardmsg(cs, CARD_INIT, NULL); 819 ret = cs->cardmsg(cs, CARD_INIT, NULL);
810 return(ret); 820 return(ret);
811 } 821 }
812 irq_cnt = kstat_irqs(cs->irq); 822 irq_cnt = cs->irq_cnt = 0;
813 printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ], 823 printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ],
814 cs->irq, irq_cnt); 824 cs->irq, irq_cnt);
815 if (request_irq(cs->irq, cs->irq_func, cs->irq_flags, "HiSax", cs)) { 825 if (request_irq(cs->irq, card_irq, cs->irq_flags, "HiSax", cs)) {
816 printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n", 826 printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n",
817 cs->irq); 827 cs->irq);
818 return 1; 828 return 1;
@@ -822,8 +832,8 @@ static int init_card(struct IsdnCardState *cs)
822 /* Timeout 10ms */ 832 /* Timeout 10ms */
823 msleep(10); 833 msleep(10);
824 printk(KERN_INFO "%s: IRQ %d count %d\n", 834 printk(KERN_INFO "%s: IRQ %d count %d\n",
825 CardType[cs->typ], cs->irq, kstat_irqs(cs->irq)); 835 CardType[cs->typ], cs->irq, cs->irq_cnt);
826 if (kstat_irqs(cs->irq) == irq_cnt) { 836 if (cs->irq_cnt == irq_cnt) {
827 printk(KERN_WARNING 837 printk(KERN_WARNING
828 "%s: IRQ(%d) getting no interrupts during init %d\n", 838 "%s: IRQ(%d) getting no interrupts during init %d\n",
829 CardType[cs->typ], cs->irq, 4 - cnt); 839 CardType[cs->typ], cs->irq, 4 - cnt);
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index 832a87855ffb..32ab3924aa73 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -959,6 +959,7 @@ struct IsdnCardState {
959 u_long event; 959 u_long event;
960 struct work_struct tqueue; 960 struct work_struct tqueue;
961 struct timer_list dbusytimer; 961 struct timer_list dbusytimer;
962 unsigned int irq_cnt;
962#ifdef ERROR_STATISTIC 963#ifdef ERROR_STATISTIC
963 int err_crc; 964 int err_crc;
964 int err_tx; 965 int err_tx;
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 097f24d8bceb..b9fda7018cef 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -78,7 +78,7 @@ struct sih {
78 u8 irq_lines; /* number of supported irq lines */ 78 u8 irq_lines; /* number of supported irq lines */
79 79
80 /* SIR ignored -- set interrupt, for testing only */ 80 /* SIR ignored -- set interrupt, for testing only */
81 struct irq_data { 81 struct sih_irq_data {
82 u8 isr_offset; 82 u8 isr_offset;
83 u8 imr_offset; 83 u8 imr_offset;
84 } mask[2]; 84 } mask[2];
@@ -810,7 +810,7 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
810 twl4030_irq_chip = dummy_irq_chip; 810 twl4030_irq_chip = dummy_irq_chip;
811 twl4030_irq_chip.name = "twl4030"; 811 twl4030_irq_chip.name = "twl4030";
812 812
813 twl4030_sih_irq_chip.ack = dummy_irq_chip.ack; 813 twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
814 814
815 for (i = irq_base; i < irq_end; i++) { 815 for (i = irq_base; i < irq_end; i++) {
816 set_irq_chip_and_handler(i, &twl4030_irq_chip, 816 set_irq_chip_and_handler(i, &twl4030_irq_chip,
diff --git a/include/linux/irq.h b/include/linux/irq.h
index c03243ad84b4..ef878823ee3b 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -72,6 +72,10 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
72#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ 72#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */
73#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ 73#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */
74 74
75#define IRQF_MODIFY_MASK \
76 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
77 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL)
78
75#ifdef CONFIG_IRQ_PER_CPU 79#ifdef CONFIG_IRQ_PER_CPU
76# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) 80# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
77# define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) 81# define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
@@ -80,36 +84,82 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
80# define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING 84# define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING
81#endif 85#endif
82 86
83struct proc_dir_entry;
84struct msi_desc; 87struct msi_desc;
88struct irq_2_iommu;
89
90/**
91 * struct irq_data - per irq and irq chip data passed down to chip functions
92 * @irq: interrupt number
93 * @node: node index useful for balancing
94 * @chip: low level interrupt hardware access
95 * @handler_data: per-IRQ data for the irq_chip methods
96 * @chip_data: platform-specific per-chip private data for the chip
97 * methods, to allow shared chip implementations
98 * @msi_desc: MSI descriptor
99 * @affinity: IRQ affinity on SMP
100 * @irq_2_iommu: iommu with this irq
101 *
102 * The fields here need to overlay the ones in irq_desc until we
103 * cleaned up the direct references and switched everything over to
104 * irq_data.
105 */
106struct irq_data {
107 unsigned int irq;
108 unsigned int node;
109 struct irq_chip *chip;
110 void *handler_data;
111 void *chip_data;
112 struct msi_desc *msi_desc;
113#ifdef CONFIG_SMP
114 cpumask_var_t affinity;
115#endif
116#ifdef CONFIG_INTR_REMAP
117 struct irq_2_iommu *irq_2_iommu;
118#endif
119};
85 120
86/** 121/**
87 * struct irq_chip - hardware interrupt chip descriptor 122 * struct irq_chip - hardware interrupt chip descriptor
88 * 123 *
89 * @name: name for /proc/interrupts 124 * @name: name for /proc/interrupts
90 * @startup: start up the interrupt (defaults to ->enable if NULL) 125 * @startup: deprecated, replaced by irq_startup
91 * @shutdown: shut down the interrupt (defaults to ->disable if NULL) 126 * @shutdown: deprecated, replaced by irq_shutdown
92 * @enable: enable the interrupt (defaults to chip->unmask if NULL) 127 * @enable: deprecated, replaced by irq_enable
93 * @disable: disable the interrupt 128 * @disable: deprecated, replaced by irq_disable
94 * @ack: start of a new interrupt 129 * @ack: deprecated, replaced by irq_ack
95 * @mask: mask an interrupt source 130 * @mask: deprecated, replaced by irq_mask
96 * @mask_ack: ack and mask an interrupt source 131 * @mask_ack: deprecated, replaced by irq_mask_ack
97 * @unmask: unmask an interrupt source 132 * @unmask: deprecated, replaced by irq_unmask
98 * @eoi: end of interrupt - chip level 133 * @eoi: deprecated, replaced by irq_eoi
99 * @end: end of interrupt - flow level 134 * @end: deprecated, will go away with __do_IRQ()
100 * @set_affinity: set the CPU affinity on SMP machines 135 * @set_affinity: deprecated, replaced by irq_set_affinity
101 * @retrigger: resend an IRQ to the CPU 136 * @retrigger: deprecated, replaced by irq_retrigger
102 * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ 137 * @set_type: deprecated, replaced by irq_set_type
103 * @set_wake: enable/disable power-management wake-on of an IRQ 138 * @set_wake: deprecated, replaced by irq_wake
139 * @bus_lock: deprecated, replaced by irq_bus_lock
140 * @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock
104 * 141 *
105 * @bus_lock: function to lock access to slow bus (i2c) chips 142 * @irq_startup: start up the interrupt (defaults to ->enable if NULL)
106 * @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips 143 * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL)
144 * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL)
145 * @irq_disable: disable the interrupt
146 * @irq_ack: start of a new interrupt
147 * @irq_mask: mask an interrupt source
148 * @irq_mask_ack: ack and mask an interrupt source
149 * @irq_unmask: unmask an interrupt source
150 * @irq_eoi: end of interrupt
151 * @irq_set_affinity: set the CPU affinity on SMP machines
152 * @irq_retrigger: resend an IRQ to the CPU
153 * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
154 * @irq_set_wake: enable/disable power-management wake-on of an IRQ
155 * @irq_bus_lock: function to lock access to slow bus (i2c) chips
156 * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
107 * 157 *
108 * @release: release function solely used by UML 158 * @release: release function solely used by UML
109 * @typename: obsoleted by name, kept as migration helper
110 */ 159 */
111struct irq_chip { 160struct irq_chip {
112 const char *name; 161 const char *name;
162#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
113 unsigned int (*startup)(unsigned int irq); 163 unsigned int (*startup)(unsigned int irq);
114 void (*shutdown)(unsigned int irq); 164 void (*shutdown)(unsigned int irq);
115 void (*enable)(unsigned int irq); 165 void (*enable)(unsigned int irq);
@@ -130,154 +180,66 @@ struct irq_chip {
130 180
131 void (*bus_lock)(unsigned int irq); 181 void (*bus_lock)(unsigned int irq);
132 void (*bus_sync_unlock)(unsigned int irq); 182 void (*bus_sync_unlock)(unsigned int irq);
183#endif
184 unsigned int (*irq_startup)(struct irq_data *data);
185 void (*irq_shutdown)(struct irq_data *data);
186 void (*irq_enable)(struct irq_data *data);
187 void (*irq_disable)(struct irq_data *data);
188
189 void (*irq_ack)(struct irq_data *data);
190 void (*irq_mask)(struct irq_data *data);
191 void (*irq_mask_ack)(struct irq_data *data);
192 void (*irq_unmask)(struct irq_data *data);
193 void (*irq_eoi)(struct irq_data *data);
194
195 int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
196 int (*irq_retrigger)(struct irq_data *data);
197 int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
198 int (*irq_set_wake)(struct irq_data *data, unsigned int on);
199
200 void (*irq_bus_lock)(struct irq_data *data);
201 void (*irq_bus_sync_unlock)(struct irq_data *data);
133 202
134 /* Currently used only by UML, might disappear one day.*/ 203 /* Currently used only by UML, might disappear one day.*/
135#ifdef CONFIG_IRQ_RELEASE_METHOD 204#ifdef CONFIG_IRQ_RELEASE_METHOD
136 void (*release)(unsigned int irq, void *dev_id); 205 void (*release)(unsigned int irq, void *dev_id);
137#endif 206#endif
138 /*
139 * For compatibility, ->typename is copied into ->name.
140 * Will disappear.
141 */
142 const char *typename;
143}; 207};
144 208
145struct timer_rand_state; 209/* This include will go away once we isolated irq_desc usage to core code */
146struct irq_2_iommu; 210#include <linux/irqdesc.h>
147/**
148 * struct irq_desc - interrupt descriptor
149 * @irq: interrupt number for this descriptor
150 * @timer_rand_state: pointer to timer rand state struct
151 * @kstat_irqs: irq stats per cpu
152 * @irq_2_iommu: iommu with this irq
153 * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
154 * @chip: low level interrupt hardware access
155 * @msi_desc: MSI descriptor
156 * @handler_data: per-IRQ data for the irq_chip methods
157 * @chip_data: platform-specific per-chip private data for the chip
158 * methods, to allow shared chip implementations
159 * @action: the irq action chain
160 * @status: status information
161 * @depth: disable-depth, for nested irq_disable() calls
162 * @wake_depth: enable depth, for multiple set_irq_wake() callers
163 * @irq_count: stats field to detect stalled irqs
164 * @last_unhandled: aging timer for unhandled count
165 * @irqs_unhandled: stats field for spurious unhandled interrupts
166 * @lock: locking for SMP
167 * @affinity: IRQ affinity on SMP
168 * @node: node index useful for balancing
169 * @pending_mask: pending rebalanced interrupts
170 * @threads_active: number of irqaction threads currently running
171 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
172 * @dir: /proc/irq/ procfs entry
173 * @name: flow handler name for /proc/interrupts output
174 */
175struct irq_desc {
176 unsigned int irq;
177 struct timer_rand_state *timer_rand_state;
178 unsigned int *kstat_irqs;
179#ifdef CONFIG_INTR_REMAP
180 struct irq_2_iommu *irq_2_iommu;
181#endif
182 irq_flow_handler_t handle_irq;
183 struct irq_chip *chip;
184 struct msi_desc *msi_desc;
185 void *handler_data;
186 void *chip_data;
187 struct irqaction *action; /* IRQ action list */
188 unsigned int status; /* IRQ status */
189
190 unsigned int depth; /* nested irq disables */
191 unsigned int wake_depth; /* nested wake enables */
192 unsigned int irq_count; /* For detecting broken IRQs */
193 unsigned long last_unhandled; /* Aging timer for unhandled count */
194 unsigned int irqs_unhandled;
195 raw_spinlock_t lock;
196#ifdef CONFIG_SMP
197 cpumask_var_t affinity;
198 const struct cpumask *affinity_hint;
199 unsigned int node;
200#ifdef CONFIG_GENERIC_PENDING_IRQ
201 cpumask_var_t pending_mask;
202#endif
203#endif
204 atomic_t threads_active;
205 wait_queue_head_t wait_for_threads;
206#ifdef CONFIG_PROC_FS
207 struct proc_dir_entry *dir;
208#endif
209 const char *name;
210} ____cacheline_internodealigned_in_smp;
211 211
212extern void arch_init_copy_chip_data(struct irq_desc *old_desc, 212/*
213 struct irq_desc *desc, int node); 213 * Pick up the arch-dependent methods:
214extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); 214 */
215#include <asm/hw_irq.h>
215 216
216#ifndef CONFIG_SPARSE_IRQ 217#ifndef NR_IRQS_LEGACY
217extern struct irq_desc irq_desc[NR_IRQS]; 218# define NR_IRQS_LEGACY 0
218#endif 219#endif
219 220
220#ifdef CONFIG_NUMA_IRQ_DESC 221#ifndef ARCH_IRQ_INIT_FLAGS
221extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); 222# define ARCH_IRQ_INIT_FLAGS 0
222#else
223static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
224{
225 return desc;
226}
227#endif 223#endif
228 224
229extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); 225#define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS)
230
231/*
232 * Pick up the arch-dependent methods:
233 */
234#include <asm/hw_irq.h>
235 226
227struct irqaction;
236extern int setup_irq(unsigned int irq, struct irqaction *new); 228extern int setup_irq(unsigned int irq, struct irqaction *new);
237extern void remove_irq(unsigned int irq, struct irqaction *act); 229extern void remove_irq(unsigned int irq, struct irqaction *act);
238 230
239#ifdef CONFIG_GENERIC_HARDIRQS 231#ifdef CONFIG_GENERIC_HARDIRQS
240 232
241#ifdef CONFIG_SMP 233#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
242
243#ifdef CONFIG_GENERIC_PENDING_IRQ
244
245void move_native_irq(int irq); 234void move_native_irq(int irq);
246void move_masked_irq(int irq); 235void move_masked_irq(int irq);
247 236#else
248#else /* CONFIG_GENERIC_PENDING_IRQ */ 237static inline void move_native_irq(int irq) { }
249 238static inline void move_masked_irq(int irq) { }
250static inline void move_irq(int irq) 239#endif
251{
252}
253
254static inline void move_native_irq(int irq)
255{
256}
257
258static inline void move_masked_irq(int irq)
259{
260}
261
262#endif /* CONFIG_GENERIC_PENDING_IRQ */
263
264#else /* CONFIG_SMP */
265
266#define move_native_irq(x)
267#define move_masked_irq(x)
268
269#endif /* CONFIG_SMP */
270 240
271extern int no_irq_affinity; 241extern int no_irq_affinity;
272 242
273static inline int irq_balancing_disabled(unsigned int irq)
274{
275 struct irq_desc *desc;
276
277 desc = irq_to_desc(irq);
278 return desc->status & IRQ_NO_BALANCING_MASK;
279}
280
281/* Handle irq action chains: */ 243/* Handle irq action chains: */
282extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); 244extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
283 245
@@ -293,42 +255,10 @@ extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
293extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); 255extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
294extern void handle_nested_irq(unsigned int irq); 256extern void handle_nested_irq(unsigned int irq);
295 257
296/*
297 * Monolithic do_IRQ implementation.
298 */
299#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
300extern unsigned int __do_IRQ(unsigned int irq);
301#endif
302
303/*
304 * Architectures call this to let the generic IRQ layer
305 * handle an interrupt. If the descriptor is attached to an
306 * irqchip-style controller then we call the ->handle_irq() handler,
307 * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
308 */
309static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
310{
311#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
312 desc->handle_irq(irq, desc);
313#else
314 if (likely(desc->handle_irq))
315 desc->handle_irq(irq, desc);
316 else
317 __do_IRQ(irq);
318#endif
319}
320
321static inline void generic_handle_irq(unsigned int irq)
322{
323 generic_handle_irq_desc(irq, irq_to_desc(irq));
324}
325
326/* Handling of unhandled and spurious interrupts: */ 258/* Handling of unhandled and spurious interrupts: */
327extern void note_interrupt(unsigned int irq, struct irq_desc *desc, 259extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
328 irqreturn_t action_ret); 260 irqreturn_t action_ret);
329 261
330/* Resending of interrupts :*/
331void check_irq_resend(struct irq_desc *desc, unsigned int irq);
332 262
333/* Enable/disable irq debugging output: */ 263/* Enable/disable irq debugging output: */
334extern int noirqdebug_setup(char *str); 264extern int noirqdebug_setup(char *str);
@@ -351,16 +281,6 @@ extern void
351__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 281__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
352 const char *name); 282 const char *name);
353 283
354/* caller has locked the irq_desc and both params are valid */
355static inline void __set_irq_handler_unlocked(int irq,
356 irq_flow_handler_t handler)
357{
358 struct irq_desc *desc;
359
360 desc = irq_to_desc(irq);
361 desc->handle_irq = handler;
362}
363
364/* 284/*
365 * Set a highlevel flow handler for a given IRQ: 285 * Set a highlevel flow handler for a given IRQ:
366 */ 286 */
@@ -384,21 +304,33 @@ set_irq_chained_handler(unsigned int irq,
384 304
385extern void set_irq_nested_thread(unsigned int irq, int nest); 305extern void set_irq_nested_thread(unsigned int irq, int nest);
386 306
387extern void set_irq_noprobe(unsigned int irq); 307void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
388extern void set_irq_probe(unsigned int irq); 308
309static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
310{
311 irq_modify_status(irq, 0, set);
312}
313
314static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr)
315{
316 irq_modify_status(irq, clr, 0);
317}
318
319static inline void set_irq_noprobe(unsigned int irq)
320{
321 irq_modify_status(irq, 0, IRQ_NOPROBE);
322}
323
324static inline void set_irq_probe(unsigned int irq)
325{
326 irq_modify_status(irq, IRQ_NOPROBE, 0);
327}
389 328
390/* Handle dynamic irq creation and destruction */ 329/* Handle dynamic irq creation and destruction */
391extern unsigned int create_irq_nr(unsigned int irq_want, int node); 330extern unsigned int create_irq_nr(unsigned int irq_want, int node);
392extern int create_irq(void); 331extern int create_irq(void);
393extern void destroy_irq(unsigned int irq); 332extern void destroy_irq(unsigned int irq);
394 333
395/* Test to see if a driver has successfully requested an irq */
396static inline int irq_has_action(unsigned int irq)
397{
398 struct irq_desc *desc = irq_to_desc(irq);
399 return desc->action != NULL;
400}
401
402/* Dynamic irq helper functions */ 334/* Dynamic irq helper functions */
403extern void dynamic_irq_init(unsigned int irq); 335extern void dynamic_irq_init(unsigned int irq);
404void dynamic_irq_init_keep_chip_data(unsigned int irq); 336void dynamic_irq_init_keep_chip_data(unsigned int irq);
@@ -411,114 +343,91 @@ extern int set_irq_data(unsigned int irq, void *data);
411extern int set_irq_chip_data(unsigned int irq, void *data); 343extern int set_irq_chip_data(unsigned int irq, void *data);
412extern int set_irq_type(unsigned int irq, unsigned int type); 344extern int set_irq_type(unsigned int irq, unsigned int type);
413extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); 345extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
346extern struct irq_data *irq_get_irq_data(unsigned int irq);
414 347
415#define get_irq_chip(irq) (irq_to_desc(irq)->chip) 348static inline struct irq_chip *get_irq_chip(unsigned int irq)
416#define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data)
417#define get_irq_data(irq) (irq_to_desc(irq)->handler_data)
418#define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc)
419
420#define get_irq_desc_chip(desc) ((desc)->chip)
421#define get_irq_desc_chip_data(desc) ((desc)->chip_data)
422#define get_irq_desc_data(desc) ((desc)->handler_data)
423#define get_irq_desc_msi(desc) ((desc)->msi_desc)
424
425#endif /* CONFIG_GENERIC_HARDIRQS */
426
427#endif /* !CONFIG_S390 */
428
429#ifdef CONFIG_SMP
430/**
431 * alloc_desc_masks - allocate cpumasks for irq_desc
432 * @desc: pointer to irq_desc struct
433 * @node: node which will be handling the cpumasks
434 * @boot: true if need bootmem
435 *
436 * Allocates affinity and pending_mask cpumask if required.
437 * Returns true if successful (or not required).
438 */
439static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
440 bool boot)
441{ 349{
442 gfp_t gfp = GFP_ATOMIC; 350 struct irq_data *d = irq_get_irq_data(irq);
351 return d ? d->chip : NULL;
352}
443 353
444 if (boot) 354static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
445 gfp = GFP_NOWAIT; 355{
356 return d->chip;
357}
446 358
447#ifdef CONFIG_CPUMASK_OFFSTACK 359static inline void *get_irq_chip_data(unsigned int irq)
448 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) 360{
449 return false; 361 struct irq_data *d = irq_get_irq_data(irq);
362 return d ? d->chip_data : NULL;
363}
450 364
451#ifdef CONFIG_GENERIC_PENDING_IRQ 365static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
452 if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { 366{
453 free_cpumask_var(desc->affinity); 367 return d->chip_data;
454 return false;
455 }
456#endif
457#endif
458 return true;
459} 368}
460 369
461static inline void init_desc_masks(struct irq_desc *desc) 370static inline void *get_irq_data(unsigned int irq)
462{ 371{
463 cpumask_setall(desc->affinity); 372 struct irq_data *d = irq_get_irq_data(irq);
464#ifdef CONFIG_GENERIC_PENDING_IRQ 373 return d ? d->handler_data : NULL;
465 cpumask_clear(desc->pending_mask);
466#endif
467} 374}
468 375
469/** 376static inline void *irq_data_get_irq_data(struct irq_data *d)
470 * init_copy_desc_masks - copy cpumasks for irq_desc 377{
471 * @old_desc: pointer to old irq_desc struct 378 return d->handler_data;
472 * @new_desc: pointer to new irq_desc struct 379}
473 *
474 * Insures affinity and pending_masks are copied to new irq_desc.
475 * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
476 * irq_desc struct so the copy is redundant.
477 */
478 380
479static inline void init_copy_desc_masks(struct irq_desc *old_desc, 381static inline struct msi_desc *get_irq_msi(unsigned int irq)
480 struct irq_desc *new_desc)
481{ 382{
482#ifdef CONFIG_CPUMASK_OFFSTACK 383 struct irq_data *d = irq_get_irq_data(irq);
483 cpumask_copy(new_desc->affinity, old_desc->affinity); 384 return d ? d->msi_desc : NULL;
385}
484 386
485#ifdef CONFIG_GENERIC_PENDING_IRQ 387static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
486 cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); 388{
487#endif 389 return d->msi_desc;
488#endif
489} 390}
490 391
491static inline void free_desc_masks(struct irq_desc *old_desc, 392#ifdef CONFIG_INTR_REMAP
492 struct irq_desc *new_desc) 393static inline struct irq_2_iommu *get_irq_iommu(unsigned int irq)
493{ 394{
494 free_cpumask_var(old_desc->affinity); 395 struct irq_data *d = irq_get_irq_data(irq);
396 return d ? d->irq_2_iommu : NULL;
397}
495 398
496#ifdef CONFIG_GENERIC_PENDING_IRQ 399static inline struct irq_2_iommu *irq_data_get_iommu(struct irq_data *d)
497 free_cpumask_var(old_desc->pending_mask); 400{
498#endif 401 return d->irq_2_iommu;
499} 402}
403#endif
500 404
501#else /* !CONFIG_SMP */ 405int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node);
406void irq_free_descs(unsigned int irq, unsigned int cnt);
407int irq_reserve_irqs(unsigned int from, unsigned int cnt);
502 408
503static inline bool alloc_desc_masks(struct irq_desc *desc, int node, 409static inline int irq_alloc_desc(int node)
504 bool boot)
505{ 410{
506 return true; 411 return irq_alloc_descs(-1, 0, 1, node);
507} 412}
508 413
509static inline void init_desc_masks(struct irq_desc *desc) 414static inline int irq_alloc_desc_at(unsigned int at, int node)
510{ 415{
416 return irq_alloc_descs(at, at, 1, node);
511} 417}
512 418
513static inline void init_copy_desc_masks(struct irq_desc *old_desc, 419static inline int irq_alloc_desc_from(unsigned int from, int node)
514 struct irq_desc *new_desc)
515{ 420{
421 return irq_alloc_descs(-1, from, 1, node);
516} 422}
517 423
518static inline void free_desc_masks(struct irq_desc *old_desc, 424static inline void irq_free_desc(unsigned int irq)
519 struct irq_desc *new_desc)
520{ 425{
426 irq_free_descs(irq, 1);
521} 427}
522#endif /* CONFIG_SMP */ 428
429#endif /* CONFIG_GENERIC_HARDIRQS */
430
431#endif /* !CONFIG_S390 */
523 432
524#endif /* _LINUX_IRQ_H */ 433#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
new file mode 100644
index 000000000000..22e426fdd301
--- /dev/null
+++ b/include/linux/irqdesc.h
@@ -0,0 +1,171 @@
1#ifndef _LINUX_IRQDESC_H
2#define _LINUX_IRQDESC_H
3
4/*
5 * Core internal functions to deal with irq descriptors
6 *
7 * This include will move to kernel/irq once we cleaned up the tree.
8 * For now it's included from <linux/irq.h>
9 */
10
11struct proc_dir_entry;
12struct timer_rand_state;
13struct irq_2_iommu;
14/**
15 * struct irq_desc - interrupt descriptor
16 * @irq_data: per irq and chip data passed down to chip functions
17 * @timer_rand_state: pointer to timer rand state struct
18 * @kstat_irqs: irq stats per cpu
19 * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
20 * @action: the irq action chain
21 * @status: status information
22 * @depth: disable-depth, for nested irq_disable() calls
23 * @wake_depth: enable depth, for multiple set_irq_wake() callers
24 * @irq_count: stats field to detect stalled irqs
25 * @last_unhandled: aging timer for unhandled count
26 * @irqs_unhandled: stats field for spurious unhandled interrupts
27 * @lock: locking for SMP
28 * @pending_mask: pending rebalanced interrupts
29 * @threads_active: number of irqaction threads currently running
30 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
31 * @dir: /proc/irq/ procfs entry
32 * @name: flow handler name for /proc/interrupts output
33 */
34struct irq_desc {
35
36#ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
37 struct irq_data irq_data;
38#else
39 /*
40 * This union will go away, once we fixed the direct access to
41 * irq_desc all over the place. The direct fields are a 1:1
42 * overlay of irq_data.
43 */
44 union {
45 struct irq_data irq_data;
46 struct {
47 unsigned int irq;
48 unsigned int node;
49 struct irq_chip *chip;
50 void *handler_data;
51 void *chip_data;
52 struct msi_desc *msi_desc;
53#ifdef CONFIG_SMP
54 cpumask_var_t affinity;
55#endif
56#ifdef CONFIG_INTR_REMAP
57 struct irq_2_iommu *irq_2_iommu;
58#endif
59 };
60 };
61#endif
62
63 struct timer_rand_state *timer_rand_state;
64 unsigned int *kstat_irqs;
65 irq_flow_handler_t handle_irq;
66 struct irqaction *action; /* IRQ action list */
67 unsigned int status; /* IRQ status */
68
69 unsigned int depth; /* nested irq disables */
70 unsigned int wake_depth; /* nested wake enables */
71 unsigned int irq_count; /* For detecting broken IRQs */
72 unsigned long last_unhandled; /* Aging timer for unhandled count */
73 unsigned int irqs_unhandled;
74 raw_spinlock_t lock;
75#ifdef CONFIG_SMP
76 const struct cpumask *affinity_hint;
77#ifdef CONFIG_GENERIC_PENDING_IRQ
78 cpumask_var_t pending_mask;
79#endif
80#endif
81 atomic_t threads_active;
82 wait_queue_head_t wait_for_threads;
83#ifdef CONFIG_PROC_FS
84 struct proc_dir_entry *dir;
85#endif
86 const char *name;
87} ____cacheline_internodealigned_in_smp;
88
89extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
90 struct irq_desc *desc, int node);
91extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
92
93#ifndef CONFIG_SPARSE_IRQ
94extern struct irq_desc irq_desc[NR_IRQS];
95#endif
96
97#ifdef CONFIG_NUMA_IRQ_DESC
98extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node);
99#else
100static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
101{
102 return desc;
103}
104#endif
105
106extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
107
108#ifdef CONFIG_GENERIC_HARDIRQS
109
110#define get_irq_desc_chip(desc) ((desc)->irq_data.chip)
111#define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data)
112#define get_irq_desc_data(desc) ((desc)->irq_data.handler_data)
113#define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc)
114
115/*
116 * Monolithic do_IRQ implementation.
117 */
118#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
119extern unsigned int __do_IRQ(unsigned int irq);
120#endif
121
122/*
123 * Architectures call this to let the generic IRQ layer
124 * handle an interrupt. If the descriptor is attached to an
125 * irqchip-style controller then we call the ->handle_irq() handler,
126 * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
127 */
128static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
129{
130#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
131 desc->handle_irq(irq, desc);
132#else
133 if (likely(desc->handle_irq))
134 desc->handle_irq(irq, desc);
135 else
136 __do_IRQ(irq);
137#endif
138}
139
140static inline void generic_handle_irq(unsigned int irq)
141{
142 generic_handle_irq_desc(irq, irq_to_desc(irq));
143}
144
145/* Test to see if a driver has successfully requested an irq */
146static inline int irq_has_action(unsigned int irq)
147{
148 struct irq_desc *desc = irq_to_desc(irq);
149 return desc->action != NULL;
150}
151
152static inline int irq_balancing_disabled(unsigned int irq)
153{
154 struct irq_desc *desc;
155
156 desc = irq_to_desc(irq);
157 return desc->status & IRQ_NO_BALANCING_MASK;
158}
159
160/* caller has locked the irq_desc and both params are valid */
161static inline void __set_irq_handler_unlocked(int irq,
162 irq_flow_handler_t handler)
163{
164 struct irq_desc *desc;
165
166 desc = irq_to_desc(irq);
167 desc->handle_irq = handler;
168}
169#endif
170
171#endif
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 7bf89bc8cbca..05aa8c23483f 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -25,6 +25,7 @@
25 25
26extern int nr_irqs; 26extern int nr_irqs;
27extern struct irq_desc *irq_to_desc(unsigned int irq); 27extern struct irq_desc *irq_to_desc(unsigned int irq);
28unsigned int irq_get_next_irq(unsigned int offset);
28 29
29# define for_each_irq_desc(irq, desc) \ 30# define for_each_irq_desc(irq, desc) \
30 for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ 31 for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \
@@ -47,6 +48,10 @@ extern struct irq_desc *irq_to_desc(unsigned int irq);
47#define irq_node(irq) 0 48#define irq_node(irq) 0
48#endif 49#endif
49 50
51# define for_each_active_irq(irq) \
52 for (irq = irq_get_next_irq(0); irq < nr_irqs; \
53 irq = irq_get_next_irq(irq + 1))
54
50#endif /* CONFIG_GENERIC_HARDIRQS */ 55#endif /* CONFIG_GENERIC_HARDIRQS */
51 56
52#define for_each_irq_nr(irq) \ 57#define for_each_irq_nr(irq) \
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 06aed8305bf3..17d050ce7ab8 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -424,14 +424,6 @@ do { \
424 424
425#endif /* CONFIG_LOCKDEP */ 425#endif /* CONFIG_LOCKDEP */
426 426
427#ifdef CONFIG_GENERIC_HARDIRQS
428extern void early_init_irq_lock_class(void);
429#else
430static inline void early_init_irq_lock_class(void)
431{
432}
433#endif
434
435#ifdef CONFIG_TRACE_IRQFLAGS 427#ifdef CONFIG_TRACE_IRQFLAGS
436extern void early_boot_irqs_off(void); 428extern void early_boot_irqs_off(void);
437extern void early_boot_irqs_on(void); 429extern void early_boot_irqs_on(void);
diff --git a/init/Kconfig b/init/Kconfig
index 2de5b1cbadd9..1df1a87cc595 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -332,6 +332,8 @@ config AUDIT_TREE
332 depends on AUDITSYSCALL 332 depends on AUDITSYSCALL
333 select FSNOTIFY 333 select FSNOTIFY
334 334
335source "kernel/irq/Kconfig"
336
335menu "RCU Subsystem" 337menu "RCU Subsystem"
336 338
337choice 339choice
diff --git a/init/main.c b/init/main.c
index 94ab488039aa..9684c9670b48 100644
--- a/init/main.c
+++ b/init/main.c
@@ -556,7 +556,6 @@ asmlinkage void __init start_kernel(void)
556 556
557 local_irq_disable(); 557 local_irq_disable();
558 early_boot_irqs_off(); 558 early_boot_irqs_off();
559 early_init_irq_lock_class();
560 559
561/* 560/*
562 * Interrupts are still disabled. Do necessary setups, then 561 * Interrupts are still disabled. Do necessary setups, then
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
new file mode 100644
index 000000000000..a42c0191d71a
--- /dev/null
+++ b/kernel/irq/Kconfig
@@ -0,0 +1,58 @@
1config HAVE_GENERIC_HARDIRQS
2 def_bool n
3
4if HAVE_GENERIC_HARDIRQS
5menu "IRQ subsystem"
6#
7# Interrupt subsystem related configuration options
8#
9config GENERIC_HARDIRQS
10 def_bool y
11
12config GENERIC_HARDIRQS_NO__DO_IRQ
13 def_bool y
14
15# Select this to disable the deprecated stuff
16config GENERIC_HARDIRQS_NO_DEPRECATED
17 def_bool n
18
19# Options selectable by the architecture code
20config HAVE_SPARSE_IRQ
21 def_bool n
22
23config GENERIC_IRQ_PROBE
24 def_bool n
25
26config GENERIC_PENDING_IRQ
27 def_bool n
28
29if SPARSE_IRQ && NUMA
30config NUMA_IRQ_DESC
31 def_bool n
32endif
33
34config AUTO_IRQ_AFFINITY
35 def_bool n
36
37config IRQ_PER_CPU
38 def_bool n
39
40config HARDIRQS_SW_RESEND
41 def_bool n
42
43config SPARSE_IRQ
44 bool "Support sparse irq numbering"
45 depends on HAVE_SPARSE_IRQ
46 ---help---
47
48 Sparse irq numbering is useful for distro kernels that want
49 to define a high CONFIG_NR_CPUS value but still want to have
50 low kernel memory footprint on smaller machines.
51
52 ( Sparse irqs can also be beneficial on NUMA boxes, as they spread
53 out the interrupt descriptors in a more NUMA-friendly way. )
54
55 If you don't know what to do here, say N.
56
57endmenu
58endif
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 7d047808419d..1eaab0da56db 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -1,5 +1,5 @@
1 1
2obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o 2obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o
3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o 3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
4obj-$(CONFIG_PROC_FS) += proc.o 4obj-$(CONFIG_PROC_FS) += proc.o
5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o 5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 2295a31ef110..505798f86c36 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -57,9 +57,10 @@ unsigned long probe_irq_on(void)
57 * Some chips need to know about probing in 57 * Some chips need to know about probing in
58 * progress: 58 * progress:
59 */ 59 */
60 if (desc->chip->set_type) 60 if (desc->irq_data.chip->irq_set_type)
61 desc->chip->set_type(i, IRQ_TYPE_PROBE); 61 desc->irq_data.chip->irq_set_type(&desc->irq_data,
62 desc->chip->startup(i); 62 IRQ_TYPE_PROBE);
63 desc->irq_data.chip->irq_startup(&desc->irq_data);
63 } 64 }
64 raw_spin_unlock_irq(&desc->lock); 65 raw_spin_unlock_irq(&desc->lock);
65 } 66 }
@@ -76,7 +77,7 @@ unsigned long probe_irq_on(void)
76 raw_spin_lock_irq(&desc->lock); 77 raw_spin_lock_irq(&desc->lock);
77 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 78 if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
78 desc->status |= IRQ_AUTODETECT | IRQ_WAITING; 79 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
79 if (desc->chip->startup(i)) 80 if (desc->irq_data.chip->irq_startup(&desc->irq_data))
80 desc->status |= IRQ_PENDING; 81 desc->status |= IRQ_PENDING;
81 } 82 }
82 raw_spin_unlock_irq(&desc->lock); 83 raw_spin_unlock_irq(&desc->lock);
@@ -98,7 +99,7 @@ unsigned long probe_irq_on(void)
98 /* It triggered already - consider it spurious. */ 99 /* It triggered already - consider it spurious. */
99 if (!(status & IRQ_WAITING)) { 100 if (!(status & IRQ_WAITING)) {
100 desc->status = status & ~IRQ_AUTODETECT; 101 desc->status = status & ~IRQ_AUTODETECT;
101 desc->chip->shutdown(i); 102 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
102 } else 103 } else
103 if (i < 32) 104 if (i < 32)
104 mask |= 1 << i; 105 mask |= 1 << i;
@@ -137,7 +138,7 @@ unsigned int probe_irq_mask(unsigned long val)
137 mask |= 1 << i; 138 mask |= 1 << i;
138 139
139 desc->status = status & ~IRQ_AUTODETECT; 140 desc->status = status & ~IRQ_AUTODETECT;
140 desc->chip->shutdown(i); 141 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
141 } 142 }
142 raw_spin_unlock_irq(&desc->lock); 143 raw_spin_unlock_irq(&desc->lock);
143 } 144 }
@@ -181,7 +182,7 @@ int probe_irq_off(unsigned long val)
181 nr_of_irqs++; 182 nr_of_irqs++;
182 } 183 }
183 desc->status = status & ~IRQ_AUTODETECT; 184 desc->status = status & ~IRQ_AUTODETECT;
184 desc->chip->shutdown(i); 185 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
185 } 186 }
186 raw_spin_unlock_irq(&desc->lock); 187 raw_spin_unlock_irq(&desc->lock);
187 } 188 }
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index b7091d5ca2f8..3405761d6224 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -31,19 +31,19 @@ static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
31 31
32 /* Ensure we don't have left over values from a previous use of this irq */ 32 /* Ensure we don't have left over values from a previous use of this irq */
33 raw_spin_lock_irqsave(&desc->lock, flags); 33 raw_spin_lock_irqsave(&desc->lock, flags);
34 desc->status = IRQ_DISABLED; 34 desc->status = IRQ_DEFAULT_INIT_FLAGS;
35 desc->chip = &no_irq_chip; 35 desc->irq_data.chip = &no_irq_chip;
36 desc->handle_irq = handle_bad_irq; 36 desc->handle_irq = handle_bad_irq;
37 desc->depth = 1; 37 desc->depth = 1;
38 desc->msi_desc = NULL; 38 desc->irq_data.msi_desc = NULL;
39 desc->handler_data = NULL; 39 desc->irq_data.handler_data = NULL;
40 if (!keep_chip_data) 40 if (!keep_chip_data)
41 desc->chip_data = NULL; 41 desc->irq_data.chip_data = NULL;
42 desc->action = NULL; 42 desc->action = NULL;
43 desc->irq_count = 0; 43 desc->irq_count = 0;
44 desc->irqs_unhandled = 0; 44 desc->irqs_unhandled = 0;
45#ifdef CONFIG_SMP 45#ifdef CONFIG_SMP
46 cpumask_setall(desc->affinity); 46 cpumask_setall(desc->irq_data.affinity);
47#ifdef CONFIG_GENERIC_PENDING_IRQ 47#ifdef CONFIG_GENERIC_PENDING_IRQ
48 cpumask_clear(desc->pending_mask); 48 cpumask_clear(desc->pending_mask);
49#endif 49#endif
@@ -64,7 +64,7 @@ void dynamic_irq_init(unsigned int irq)
64 * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq 64 * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
65 * @irq: irq number to initialize 65 * @irq: irq number to initialize
66 * 66 *
67 * does not set irq_to_desc(irq)->chip_data to NULL 67 * does not set irq_to_desc(irq)->irq_data.chip_data to NULL
68 */ 68 */
69void dynamic_irq_init_keep_chip_data(unsigned int irq) 69void dynamic_irq_init_keep_chip_data(unsigned int irq)
70{ 70{
@@ -88,12 +88,12 @@ static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
88 irq); 88 irq);
89 return; 89 return;
90 } 90 }
91 desc->msi_desc = NULL; 91 desc->irq_data.msi_desc = NULL;
92 desc->handler_data = NULL; 92 desc->irq_data.handler_data = NULL;
93 if (!keep_chip_data) 93 if (!keep_chip_data)
94 desc->chip_data = NULL; 94 desc->irq_data.chip_data = NULL;
95 desc->handle_irq = handle_bad_irq; 95 desc->handle_irq = handle_bad_irq;
96 desc->chip = &no_irq_chip; 96 desc->irq_data.chip = &no_irq_chip;
97 desc->name = NULL; 97 desc->name = NULL;
98 clear_kstat_irqs(desc); 98 clear_kstat_irqs(desc);
99 raw_spin_unlock_irqrestore(&desc->lock, flags); 99 raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -112,7 +112,7 @@ void dynamic_irq_cleanup(unsigned int irq)
112 * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq 112 * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
113 * @irq: irq number to initialize 113 * @irq: irq number to initialize
114 * 114 *
115 * does not set irq_to_desc(irq)->chip_data to NULL 115 * does not set irq_to_desc(irq)->irq_data.chip_data to NULL
116 */ 116 */
117void dynamic_irq_cleanup_keep_chip_data(unsigned int irq) 117void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
118{ 118{
@@ -140,7 +140,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
140 140
141 raw_spin_lock_irqsave(&desc->lock, flags); 141 raw_spin_lock_irqsave(&desc->lock, flags);
142 irq_chip_set_defaults(chip); 142 irq_chip_set_defaults(chip);
143 desc->chip = chip; 143 desc->irq_data.chip = chip;
144 raw_spin_unlock_irqrestore(&desc->lock, flags); 144 raw_spin_unlock_irqrestore(&desc->lock, flags);
145 145
146 return 0; 146 return 0;
@@ -193,7 +193,7 @@ int set_irq_data(unsigned int irq, void *data)
193 } 193 }
194 194
195 raw_spin_lock_irqsave(&desc->lock, flags); 195 raw_spin_lock_irqsave(&desc->lock, flags);
196 desc->handler_data = data; 196 desc->irq_data.handler_data = data;
197 raw_spin_unlock_irqrestore(&desc->lock, flags); 197 raw_spin_unlock_irqrestore(&desc->lock, flags);
198 return 0; 198 return 0;
199} 199}
@@ -218,7 +218,7 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry)
218 } 218 }
219 219
220 raw_spin_lock_irqsave(&desc->lock, flags); 220 raw_spin_lock_irqsave(&desc->lock, flags);
221 desc->msi_desc = entry; 221 desc->irq_data.msi_desc = entry;
222 if (entry) 222 if (entry)
223 entry->irq = irq; 223 entry->irq = irq;
224 raw_spin_unlock_irqrestore(&desc->lock, flags); 224 raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -243,19 +243,27 @@ int set_irq_chip_data(unsigned int irq, void *data)
243 return -EINVAL; 243 return -EINVAL;
244 } 244 }
245 245
246 if (!desc->chip) { 246 if (!desc->irq_data.chip) {
247 printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); 247 printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
248 return -EINVAL; 248 return -EINVAL;
249 } 249 }
250 250
251 raw_spin_lock_irqsave(&desc->lock, flags); 251 raw_spin_lock_irqsave(&desc->lock, flags);
252 desc->chip_data = data; 252 desc->irq_data.chip_data = data;
253 raw_spin_unlock_irqrestore(&desc->lock, flags); 253 raw_spin_unlock_irqrestore(&desc->lock, flags);
254 254
255 return 0; 255 return 0;
256} 256}
257EXPORT_SYMBOL(set_irq_chip_data); 257EXPORT_SYMBOL(set_irq_chip_data);
258 258
259struct irq_data *irq_get_irq_data(unsigned int irq)
260{
261 struct irq_desc *desc = irq_to_desc(irq);
262
263 return desc ? &desc->irq_data : NULL;
264}
265EXPORT_SYMBOL_GPL(irq_get_irq_data);
266
259/** 267/**
260 * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq 268 * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq
261 * 269 *
@@ -287,93 +295,216 @@ EXPORT_SYMBOL_GPL(set_irq_nested_thread);
287/* 295/*
288 * default enable function 296 * default enable function
289 */ 297 */
290static void default_enable(unsigned int irq) 298static void default_enable(struct irq_data *data)
291{ 299{
292 struct irq_desc *desc = irq_to_desc(irq); 300 struct irq_desc *desc = irq_data_to_desc(data);
293 301
294 desc->chip->unmask(irq); 302 desc->irq_data.chip->irq_unmask(&desc->irq_data);
295 desc->status &= ~IRQ_MASKED; 303 desc->status &= ~IRQ_MASKED;
296} 304}
297 305
298/* 306/*
299 * default disable function 307 * default disable function
300 */ 308 */
301static void default_disable(unsigned int irq) 309static void default_disable(struct irq_data *data)
302{ 310{
303} 311}
304 312
305/* 313/*
306 * default startup function 314 * default startup function
307 */ 315 */
308static unsigned int default_startup(unsigned int irq) 316static unsigned int default_startup(struct irq_data *data)
309{ 317{
310 struct irq_desc *desc = irq_to_desc(irq); 318 struct irq_desc *desc = irq_data_to_desc(data);
311 319
312 desc->chip->enable(irq); 320 desc->irq_data.chip->irq_enable(data);
313 return 0; 321 return 0;
314} 322}
315 323
316/* 324/*
317 * default shutdown function 325 * default shutdown function
318 */ 326 */
319static void default_shutdown(unsigned int irq) 327static void default_shutdown(struct irq_data *data)
320{ 328{
321 struct irq_desc *desc = irq_to_desc(irq); 329 struct irq_desc *desc = irq_data_to_desc(data);
322 330
323 desc->chip->mask(irq); 331 desc->irq_data.chip->irq_mask(&desc->irq_data);
324 desc->status |= IRQ_MASKED; 332 desc->status |= IRQ_MASKED;
325} 333}
326 334
335#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
336/* Temporary migration helpers */
337static void compat_irq_mask(struct irq_data *data)
338{
339 data->chip->mask(data->irq);
340}
341
342static void compat_irq_unmask(struct irq_data *data)
343{
344 data->chip->unmask(data->irq);
345}
346
347static void compat_irq_ack(struct irq_data *data)
348{
349 data->chip->ack(data->irq);
350}
351
352static void compat_irq_mask_ack(struct irq_data *data)
353{
354 data->chip->mask_ack(data->irq);
355}
356
357static void compat_irq_eoi(struct irq_data *data)
358{
359 data->chip->eoi(data->irq);
360}
361
362static void compat_irq_enable(struct irq_data *data)
363{
364 data->chip->enable(data->irq);
365}
366
367static void compat_irq_disable(struct irq_data *data)
368{
369 data->chip->disable(data->irq);
370}
371
372static void compat_irq_shutdown(struct irq_data *data)
373{
374 data->chip->shutdown(data->irq);
375}
376
377static unsigned int compat_irq_startup(struct irq_data *data)
378{
379 return data->chip->startup(data->irq);
380}
381
382static int compat_irq_set_affinity(struct irq_data *data,
383 const struct cpumask *dest, bool force)
384{
385 return data->chip->set_affinity(data->irq, dest);
386}
387
388static int compat_irq_set_type(struct irq_data *data, unsigned int type)
389{
390 return data->chip->set_type(data->irq, type);
391}
392
393static int compat_irq_set_wake(struct irq_data *data, unsigned int on)
394{
395 return data->chip->set_wake(data->irq, on);
396}
397
398static int compat_irq_retrigger(struct irq_data *data)
399{
400 return data->chip->retrigger(data->irq);
401}
402
403static void compat_bus_lock(struct irq_data *data)
404{
405 data->chip->bus_lock(data->irq);
406}
407
408static void compat_bus_sync_unlock(struct irq_data *data)
409{
410 data->chip->bus_sync_unlock(data->irq);
411}
412#endif
413
327/* 414/*
328 * Fixup enable/disable function pointers 415 * Fixup enable/disable function pointers
329 */ 416 */
330void irq_chip_set_defaults(struct irq_chip *chip) 417void irq_chip_set_defaults(struct irq_chip *chip)
331{ 418{
332 if (!chip->enable) 419#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
333 chip->enable = default_enable; 420 /*
334 if (!chip->disable) 421 * Compat fixup functions need to be before we set the
335 chip->disable = default_disable; 422 * defaults for enable/disable/startup/shutdown
336 if (!chip->startup) 423 */
337 chip->startup = default_startup; 424 if (chip->enable)
425 chip->irq_enable = compat_irq_enable;
426 if (chip->disable)
427 chip->irq_disable = compat_irq_disable;
428 if (chip->shutdown)
429 chip->irq_shutdown = compat_irq_shutdown;
430 if (chip->startup)
431 chip->irq_startup = compat_irq_startup;
432#endif
433 /*
434 * The real defaults
435 */
436 if (!chip->irq_enable)
437 chip->irq_enable = default_enable;
438 if (!chip->irq_disable)
439 chip->irq_disable = default_disable;
440 if (!chip->irq_startup)
441 chip->irq_startup = default_startup;
338 /* 442 /*
339 * We use chip->disable, when the user provided its own. When 443 * We use chip->irq_disable, when the user provided its own. When
340 * we have default_disable set for chip->disable, then we need 444 * we have default_disable set for chip->irq_disable, then we need
341 * to use default_shutdown, otherwise the irq line is not 445 * to use default_shutdown, otherwise the irq line is not
342 * disabled on free_irq(): 446 * disabled on free_irq():
343 */ 447 */
344 if (!chip->shutdown) 448 if (!chip->irq_shutdown)
345 chip->shutdown = chip->disable != default_disable ? 449 chip->irq_shutdown = chip->irq_disable != default_disable ?
346 chip->disable : default_shutdown; 450 chip->irq_disable : default_shutdown;
347 if (!chip->name) 451
348 chip->name = chip->typename; 452#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
349 if (!chip->end) 453 if (!chip->end)
350 chip->end = dummy_irq_chip.end; 454 chip->end = dummy_irq_chip.end;
455
456 /*
457 * Now fix up the remaining compat handlers
458 */
459 if (chip->bus_lock)
460 chip->irq_bus_lock = compat_bus_lock;
461 if (chip->bus_sync_unlock)
462 chip->irq_bus_sync_unlock = compat_bus_sync_unlock;
463 if (chip->mask)
464 chip->irq_mask = compat_irq_mask;
465 if (chip->unmask)
466 chip->irq_unmask = compat_irq_unmask;
467 if (chip->ack)
468 chip->irq_ack = compat_irq_ack;
469 if (chip->mask_ack)
470 chip->irq_mask_ack = compat_irq_mask_ack;
471 if (chip->eoi)
472 chip->irq_eoi = compat_irq_eoi;
473 if (chip->set_affinity)
474 chip->irq_set_affinity = compat_irq_set_affinity;
475 if (chip->set_type)
476 chip->irq_set_type = compat_irq_set_type;
477 if (chip->set_wake)
478 chip->irq_set_wake = compat_irq_set_wake;
479 if (chip->retrigger)
480 chip->irq_retrigger = compat_irq_retrigger;
481#endif
351} 482}
352 483
353static inline void mask_ack_irq(struct irq_desc *desc, int irq) 484static inline void mask_ack_irq(struct irq_desc *desc)
354{ 485{
355 if (desc->chip->mask_ack) 486 if (desc->irq_data.chip->irq_mask_ack)
356 desc->chip->mask_ack(irq); 487 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
357 else { 488 else {
358 desc->chip->mask(irq); 489 desc->irq_data.chip->irq_mask(&desc->irq_data);
359 if (desc->chip->ack) 490 if (desc->irq_data.chip->irq_ack)
360 desc->chip->ack(irq); 491 desc->irq_data.chip->irq_ack(&desc->irq_data);
361 } 492 }
362 desc->status |= IRQ_MASKED; 493 desc->status |= IRQ_MASKED;
363} 494}
364 495
365static inline void mask_irq(struct irq_desc *desc, int irq) 496static inline void mask_irq(struct irq_desc *desc)
366{ 497{
367 if (desc->chip->mask) { 498 if (desc->irq_data.chip->irq_mask) {
368 desc->chip->mask(irq); 499 desc->irq_data.chip->irq_mask(&desc->irq_data);
369 desc->status |= IRQ_MASKED; 500 desc->status |= IRQ_MASKED;
370 } 501 }
371} 502}
372 503
373static inline void unmask_irq(struct irq_desc *desc, int irq) 504static inline void unmask_irq(struct irq_desc *desc)
374{ 505{
375 if (desc->chip->unmask) { 506 if (desc->irq_data.chip->irq_unmask) {
376 desc->chip->unmask(irq); 507 desc->irq_data.chip->irq_unmask(&desc->irq_data);
377 desc->status &= ~IRQ_MASKED; 508 desc->status &= ~IRQ_MASKED;
378 } 509 }
379} 510}
@@ -476,7 +607,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
476 irqreturn_t action_ret; 607 irqreturn_t action_ret;
477 608
478 raw_spin_lock(&desc->lock); 609 raw_spin_lock(&desc->lock);
479 mask_ack_irq(desc, irq); 610 mask_ack_irq(desc);
480 611
481 if (unlikely(desc->status & IRQ_INPROGRESS)) 612 if (unlikely(desc->status & IRQ_INPROGRESS))
482 goto out_unlock; 613 goto out_unlock;
@@ -502,7 +633,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
502 desc->status &= ~IRQ_INPROGRESS; 633 desc->status &= ~IRQ_INPROGRESS;
503 634
504 if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) 635 if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
505 unmask_irq(desc, irq); 636 unmask_irq(desc);
506out_unlock: 637out_unlock:
507 raw_spin_unlock(&desc->lock); 638 raw_spin_unlock(&desc->lock);
508} 639}
@@ -539,7 +670,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
539 action = desc->action; 670 action = desc->action;
540 if (unlikely(!action || (desc->status & IRQ_DISABLED))) { 671 if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
541 desc->status |= IRQ_PENDING; 672 desc->status |= IRQ_PENDING;
542 mask_irq(desc, irq); 673 mask_irq(desc);
543 goto out; 674 goto out;
544 } 675 }
545 676
@@ -554,7 +685,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
554 raw_spin_lock(&desc->lock); 685 raw_spin_lock(&desc->lock);
555 desc->status &= ~IRQ_INPROGRESS; 686 desc->status &= ~IRQ_INPROGRESS;
556out: 687out:
557 desc->chip->eoi(irq); 688 desc->irq_data.chip->irq_eoi(&desc->irq_data);
558 689
559 raw_spin_unlock(&desc->lock); 690 raw_spin_unlock(&desc->lock);
560} 691}
@@ -590,14 +721,13 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
590 if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || 721 if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
591 !desc->action)) { 722 !desc->action)) {
592 desc->status |= (IRQ_PENDING | IRQ_MASKED); 723 desc->status |= (IRQ_PENDING | IRQ_MASKED);
593 mask_ack_irq(desc, irq); 724 mask_ack_irq(desc);
594 goto out_unlock; 725 goto out_unlock;
595 } 726 }
596 kstat_incr_irqs_this_cpu(irq, desc); 727 kstat_incr_irqs_this_cpu(irq, desc);
597 728
598 /* Start handling the irq */ 729 /* Start handling the irq */
599 if (desc->chip->ack) 730 desc->irq_data.chip->irq_ack(&desc->irq_data);
600 desc->chip->ack(irq);
601 731
602 /* Mark the IRQ currently in progress.*/ 732 /* Mark the IRQ currently in progress.*/
603 desc->status |= IRQ_INPROGRESS; 733 desc->status |= IRQ_INPROGRESS;
@@ -607,7 +737,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
607 irqreturn_t action_ret; 737 irqreturn_t action_ret;
608 738
609 if (unlikely(!action)) { 739 if (unlikely(!action)) {
610 mask_irq(desc, irq); 740 mask_irq(desc);
611 goto out_unlock; 741 goto out_unlock;
612 } 742 }
613 743
@@ -619,7 +749,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
619 if (unlikely((desc->status & 749 if (unlikely((desc->status &
620 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == 750 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
621 (IRQ_PENDING | IRQ_MASKED))) { 751 (IRQ_PENDING | IRQ_MASKED))) {
622 unmask_irq(desc, irq); 752 unmask_irq(desc);
623 } 753 }
624 754
625 desc->status &= ~IRQ_PENDING; 755 desc->status &= ~IRQ_PENDING;
@@ -650,15 +780,15 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
650 780
651 kstat_incr_irqs_this_cpu(irq, desc); 781 kstat_incr_irqs_this_cpu(irq, desc);
652 782
653 if (desc->chip->ack) 783 if (desc->irq_data.chip->irq_ack)
654 desc->chip->ack(irq); 784 desc->irq_data.chip->irq_ack(&desc->irq_data);
655 785
656 action_ret = handle_IRQ_event(irq, desc->action); 786 action_ret = handle_IRQ_event(irq, desc->action);
657 if (!noirqdebug) 787 if (!noirqdebug)
658 note_interrupt(irq, desc, action_ret); 788 note_interrupt(irq, desc, action_ret);
659 789
660 if (desc->chip->eoi) 790 if (desc->irq_data.chip->irq_eoi)
661 desc->chip->eoi(irq); 791 desc->irq_data.chip->irq_eoi(&desc->irq_data);
662} 792}
663 793
664void 794void
@@ -676,7 +806,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
676 806
677 if (!handle) 807 if (!handle)
678 handle = handle_bad_irq; 808 handle = handle_bad_irq;
679 else if (desc->chip == &no_irq_chip) { 809 else if (desc->irq_data.chip == &no_irq_chip) {
680 printk(KERN_WARNING "Trying to install %sinterrupt handler " 810 printk(KERN_WARNING "Trying to install %sinterrupt handler "
681 "for IRQ%d\n", is_chained ? "chained " : "", irq); 811 "for IRQ%d\n", is_chained ? "chained " : "", irq);
682 /* 812 /*
@@ -686,16 +816,16 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
686 * prevent us to setup the interrupt at all. Switch it to 816 * prevent us to setup the interrupt at all. Switch it to
687 * dummy_irq_chip for easy transition. 817 * dummy_irq_chip for easy transition.
688 */ 818 */
689 desc->chip = &dummy_irq_chip; 819 desc->irq_data.chip = &dummy_irq_chip;
690 } 820 }
691 821
692 chip_bus_lock(irq, desc); 822 chip_bus_lock(desc);
693 raw_spin_lock_irqsave(&desc->lock, flags); 823 raw_spin_lock_irqsave(&desc->lock, flags);
694 824
695 /* Uninstall? */ 825 /* Uninstall? */
696 if (handle == handle_bad_irq) { 826 if (handle == handle_bad_irq) {
697 if (desc->chip != &no_irq_chip) 827 if (desc->irq_data.chip != &no_irq_chip)
698 mask_ack_irq(desc, irq); 828 mask_ack_irq(desc);
699 desc->status |= IRQ_DISABLED; 829 desc->status |= IRQ_DISABLED;
700 desc->depth = 1; 830 desc->depth = 1;
701 } 831 }
@@ -706,10 +836,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
706 desc->status &= ~IRQ_DISABLED; 836 desc->status &= ~IRQ_DISABLED;
707 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; 837 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
708 desc->depth = 0; 838 desc->depth = 0;
709 desc->chip->startup(irq); 839 desc->irq_data.chip->irq_startup(&desc->irq_data);
710 } 840 }
711 raw_spin_unlock_irqrestore(&desc->lock, flags); 841 raw_spin_unlock_irqrestore(&desc->lock, flags);
712 chip_bus_sync_unlock(irq, desc); 842 chip_bus_sync_unlock(desc);
713} 843}
714EXPORT_SYMBOL_GPL(__set_irq_handler); 844EXPORT_SYMBOL_GPL(__set_irq_handler);
715 845
@@ -729,32 +859,20 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
729 __set_irq_handler(irq, handle, 0, name); 859 __set_irq_handler(irq, handle, 0, name);
730} 860}
731 861
732void set_irq_noprobe(unsigned int irq) 862void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
733{ 863{
734 struct irq_desc *desc = irq_to_desc(irq); 864 struct irq_desc *desc = irq_to_desc(irq);
735 unsigned long flags; 865 unsigned long flags;
736 866
737 if (!desc) { 867 if (!desc)
738 printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq);
739 return; 868 return;
740 }
741 869
742 raw_spin_lock_irqsave(&desc->lock, flags); 870 /* Sanitize flags */
743 desc->status |= IRQ_NOPROBE; 871 set &= IRQF_MODIFY_MASK;
744 raw_spin_unlock_irqrestore(&desc->lock, flags); 872 clr &= IRQF_MODIFY_MASK;
745}
746
747void set_irq_probe(unsigned int irq)
748{
749 struct irq_desc *desc = irq_to_desc(irq);
750 unsigned long flags;
751
752 if (!desc) {
753 printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq);
754 return;
755 }
756 873
757 raw_spin_lock_irqsave(&desc->lock, flags); 874 raw_spin_lock_irqsave(&desc->lock, flags);
758 desc->status &= ~IRQ_NOPROBE; 875 desc->status &= ~clr;
876 desc->status |= set;
759 raw_spin_unlock_irqrestore(&desc->lock, flags); 877 raw_spin_unlock_irqrestore(&desc->lock, flags);
760} 878}
diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c
new file mode 100644
index 000000000000..918dea9de9ea
--- /dev/null
+++ b/kernel/irq/dummychip.c
@@ -0,0 +1,68 @@
1/*
2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4 *
5 * This file contains the dummy interrupt chip implementation
6 */
7#include <linux/interrupt.h>
8#include <linux/irq.h>
9
10#include "internals.h"
11
12/*
13 * What should we do if we get a hw irq event on an illegal vector?
14 * Each architecture has to answer this themself.
15 */
16static void ack_bad(struct irq_data *data)
17{
18 struct irq_desc *desc = irq_data_to_desc(data);
19
20 print_irq_desc(data->irq, desc);
21 ack_bad_irq(data->irq);
22}
23
24/*
25 * NOP functions
26 */
27static void noop(struct irq_data *data) { }
28
29static unsigned int noop_ret(struct irq_data *data)
30{
31 return 0;
32}
33
34#ifndef CONFIG_GENERIC_HARDIRQS_NO_CRUFT
35static void compat_noop(unsigned int irq) { }
36#define END_INIT .end = compat_noop
37#else
38#define END_INIT
39#endif
40
41/*
42 * Generic no controller implementation
43 */
44struct irq_chip no_irq_chip = {
45 .name = "none",
46 .irq_startup = noop_ret,
47 .irq_shutdown = noop,
48 .irq_enable = noop,
49 .irq_disable = noop,
50 .irq_ack = ack_bad,
51 END_INIT
52};
53
54/*
55 * Generic dummy implementation which can be used for
56 * real dumb interrupt sources
57 */
58struct irq_chip dummy_irq_chip = {
59 .name = "dummy",
60 .irq_startup = noop_ret,
61 .irq_shutdown = noop,
62 .irq_enable = noop,
63 .irq_disable = noop,
64 .irq_ack = noop,
65 .irq_mask = noop,
66 .irq_unmask = noop,
67 END_INIT
68};
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 27e5c6911223..e2347eb63306 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -11,24 +11,15 @@
11 */ 11 */
12 12
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/random.h> 14#include <linux/random.h>
15#include <linux/sched.h>
18#include <linux/interrupt.h> 16#include <linux/interrupt.h>
19#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
20#include <linux/rculist.h> 18
21#include <linux/hash.h>
22#include <linux/radix-tree.h>
23#include <trace/events/irq.h> 19#include <trace/events/irq.h>
24 20
25#include "internals.h" 21#include "internals.h"
26 22
27/*
28 * lockdep: we want to handle all irq_desc locks as a single lock-class:
29 */
30struct lock_class_key irq_desc_lock_class;
31
32/** 23/**
33 * handle_bad_irq - handle spurious and unhandled irqs 24 * handle_bad_irq - handle spurious and unhandled irqs
34 * @irq: the interrupt number 25 * @irq: the interrupt number
@@ -43,304 +34,6 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
43 ack_bad_irq(irq); 34 ack_bad_irq(irq);
44} 35}
45 36
46#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
47static void __init init_irq_default_affinity(void)
48{
49 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
50 cpumask_setall(irq_default_affinity);
51}
52#else
53static void __init init_irq_default_affinity(void)
54{
55}
56#endif
57
58/*
59 * Linux has a controller-independent interrupt architecture.
60 * Every controller has a 'controller-template', that is used
61 * by the main code to do the right thing. Each driver-visible
62 * interrupt source is transparently wired to the appropriate
63 * controller. Thus drivers need not be aware of the
64 * interrupt-controller.
65 *
66 * The code is designed to be easily extended with new/different
67 * interrupt controllers, without having to do assembly magic or
68 * having to touch the generic code.
69 *
70 * Controller mappings for all interrupt sources:
71 */
72int nr_irqs = NR_IRQS;
73EXPORT_SYMBOL_GPL(nr_irqs);
74
75#ifdef CONFIG_SPARSE_IRQ
76
77static struct irq_desc irq_desc_init = {
78 .irq = -1,
79 .status = IRQ_DISABLED,
80 .chip = &no_irq_chip,
81 .handle_irq = handle_bad_irq,
82 .depth = 1,
83 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
84};
85
86void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
87{
88 void *ptr;
89
90 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
91 GFP_ATOMIC, node);
92
93 /*
94 * don't overwite if can not get new one
95 * init_copy_kstat_irqs() could still use old one
96 */
97 if (ptr) {
98 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
99 desc->kstat_irqs = ptr;
100 }
101}
102
103static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
104{
105 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
106
107 raw_spin_lock_init(&desc->lock);
108 desc->irq = irq;
109#ifdef CONFIG_SMP
110 desc->node = node;
111#endif
112 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
113 init_kstat_irqs(desc, node, nr_cpu_ids);
114 if (!desc->kstat_irqs) {
115 printk(KERN_ERR "can not alloc kstat_irqs\n");
116 BUG_ON(1);
117 }
118 if (!alloc_desc_masks(desc, node, false)) {
119 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
120 BUG_ON(1);
121 }
122 init_desc_masks(desc);
123 arch_init_chip_data(desc, node);
124}
125
126/*
127 * Protect the sparse_irqs:
128 */
129DEFINE_RAW_SPINLOCK(sparse_irq_lock);
130
131static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
132
133static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
134{
135 radix_tree_insert(&irq_desc_tree, irq, desc);
136}
137
138struct irq_desc *irq_to_desc(unsigned int irq)
139{
140 return radix_tree_lookup(&irq_desc_tree, irq);
141}
142
143void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
144{
145 void **ptr;
146
147 ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
148 if (ptr)
149 radix_tree_replace_slot(ptr, desc);
150}
151
152static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
153 [0 ... NR_IRQS_LEGACY-1] = {
154 .irq = -1,
155 .status = IRQ_DISABLED,
156 .chip = &no_irq_chip,
157 .handle_irq = handle_bad_irq,
158 .depth = 1,
159 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
160 }
161};
162
163static unsigned int *kstat_irqs_legacy;
164
165int __init early_irq_init(void)
166{
167 struct irq_desc *desc;
168 int legacy_count;
169 int node;
170 int i;
171
172 init_irq_default_affinity();
173
174 /* initialize nr_irqs based on nr_cpu_ids */
175 arch_probe_nr_irqs();
176 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
177
178 desc = irq_desc_legacy;
179 legacy_count = ARRAY_SIZE(irq_desc_legacy);
180 node = first_online_node;
181
182 /* allocate based on nr_cpu_ids */
183 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
184 sizeof(int), GFP_NOWAIT, node);
185
186 for (i = 0; i < legacy_count; i++) {
187 desc[i].irq = i;
188#ifdef CONFIG_SMP
189 desc[i].node = node;
190#endif
191 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
192 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
193 alloc_desc_masks(&desc[i], node, true);
194 init_desc_masks(&desc[i]);
195 set_irq_desc(i, &desc[i]);
196 }
197
198 return arch_early_irq_init();
199}
200
201struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
202{
203 struct irq_desc *desc;
204 unsigned long flags;
205
206 if (irq >= nr_irqs) {
207 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
208 irq, nr_irqs);
209 return NULL;
210 }
211
212 desc = irq_to_desc(irq);
213 if (desc)
214 return desc;
215
216 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
217
218 /* We have to check it to avoid races with another CPU */
219 desc = irq_to_desc(irq);
220 if (desc)
221 goto out_unlock;
222
223 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
224
225 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
226 if (!desc) {
227 printk(KERN_ERR "can not alloc irq_desc\n");
228 BUG_ON(1);
229 }
230 init_one_irq_desc(irq, desc, node);
231
232 set_irq_desc(irq, desc);
233
234out_unlock:
235 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
236
237 return desc;
238}
239
240#else /* !CONFIG_SPARSE_IRQ */
241
242struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
243 [0 ... NR_IRQS-1] = {
244 .status = IRQ_DISABLED,
245 .chip = &no_irq_chip,
246 .handle_irq = handle_bad_irq,
247 .depth = 1,
248 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
249 }
250};
251
252static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
253int __init early_irq_init(void)
254{
255 struct irq_desc *desc;
256 int count;
257 int i;
258
259 init_irq_default_affinity();
260
261 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
262
263 desc = irq_desc;
264 count = ARRAY_SIZE(irq_desc);
265
266 for (i = 0; i < count; i++) {
267 desc[i].irq = i;
268 alloc_desc_masks(&desc[i], 0, true);
269 init_desc_masks(&desc[i]);
270 desc[i].kstat_irqs = kstat_irqs_all[i];
271 }
272 return arch_early_irq_init();
273}
274
275struct irq_desc *irq_to_desc(unsigned int irq)
276{
277 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
278}
279
280struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
281{
282 return irq_to_desc(irq);
283}
284#endif /* !CONFIG_SPARSE_IRQ */
285
286void clear_kstat_irqs(struct irq_desc *desc)
287{
288 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
289}
290
291/*
292 * What should we do if we get a hw irq event on an illegal vector?
293 * Each architecture has to answer this themself.
294 */
295static void ack_bad(unsigned int irq)
296{
297 struct irq_desc *desc = irq_to_desc(irq);
298
299 print_irq_desc(irq, desc);
300 ack_bad_irq(irq);
301}
302
303/*
304 * NOP functions
305 */
306static void noop(unsigned int irq)
307{
308}
309
310static unsigned int noop_ret(unsigned int irq)
311{
312 return 0;
313}
314
315/*
316 * Generic no controller implementation
317 */
318struct irq_chip no_irq_chip = {
319 .name = "none",
320 .startup = noop_ret,
321 .shutdown = noop,
322 .enable = noop,
323 .disable = noop,
324 .ack = ack_bad,
325 .end = noop,
326};
327
328/*
329 * Generic dummy implementation which can be used for
330 * real dumb interrupt sources
331 */
332struct irq_chip dummy_irq_chip = {
333 .name = "dummy",
334 .startup = noop_ret,
335 .shutdown = noop,
336 .enable = noop,
337 .disable = noop,
338 .ack = noop,
339 .mask = noop,
340 .unmask = noop,
341 .end = noop,
342};
343
344/* 37/*
345 * Special, empty irq handler: 38 * Special, empty irq handler:
346 */ 39 */
@@ -457,20 +150,20 @@ unsigned int __do_IRQ(unsigned int irq)
457 /* 150 /*
458 * No locking required for CPU-local interrupts: 151 * No locking required for CPU-local interrupts:
459 */ 152 */
460 if (desc->chip->ack) 153 if (desc->irq_data.chip->ack)
461 desc->chip->ack(irq); 154 desc->irq_data.chip->ack(irq);
462 if (likely(!(desc->status & IRQ_DISABLED))) { 155 if (likely(!(desc->status & IRQ_DISABLED))) {
463 action_ret = handle_IRQ_event(irq, desc->action); 156 action_ret = handle_IRQ_event(irq, desc->action);
464 if (!noirqdebug) 157 if (!noirqdebug)
465 note_interrupt(irq, desc, action_ret); 158 note_interrupt(irq, desc, action_ret);
466 } 159 }
467 desc->chip->end(irq); 160 desc->irq_data.chip->end(irq);
468 return 1; 161 return 1;
469 } 162 }
470 163
471 raw_spin_lock(&desc->lock); 164 raw_spin_lock(&desc->lock);
472 if (desc->chip->ack) 165 if (desc->irq_data.chip->ack)
473 desc->chip->ack(irq); 166 desc->irq_data.chip->ack(irq);
474 /* 167 /*
475 * REPLAY is when Linux resends an IRQ that was dropped earlier 168 * REPLAY is when Linux resends an IRQ that was dropped earlier
476 * WAITING is used by probe to mark irqs that are being tested 169 * WAITING is used by probe to mark irqs that are being tested
@@ -530,27 +223,9 @@ out:
530 * The ->end() handler has to deal with interrupts which got 223 * The ->end() handler has to deal with interrupts which got
531 * disabled while the handler was running. 224 * disabled while the handler was running.
532 */ 225 */
533 desc->chip->end(irq); 226 desc->irq_data.chip->end(irq);
534 raw_spin_unlock(&desc->lock); 227 raw_spin_unlock(&desc->lock);
535 228
536 return 1; 229 return 1;
537} 230}
538#endif 231#endif
539
540void early_init_irq_lock_class(void)
541{
542 struct irq_desc *desc;
543 int i;
544
545 for_each_irq_desc(i, desc) {
546 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
547 }
548}
549
550unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
551{
552 struct irq_desc *desc = irq_to_desc(irq);
553 return desc ? desc->kstat_irqs[cpu] : 0;
554}
555EXPORT_SYMBOL(kstat_irqs_cpu);
556
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index c63f3bc88f0b..8eb01e379ccc 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -1,9 +1,12 @@
1/* 1/*
2 * IRQ subsystem internal functions and variables: 2 * IRQ subsystem internal functions and variables:
3 */ 3 */
4#include <linux/irqdesc.h>
4 5
5extern int noirqdebug; 6extern int noirqdebug;
6 7
8#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
9
7/* Set default functions for irq_chip structures: */ 10/* Set default functions for irq_chip structures: */
8extern void irq_chip_set_defaults(struct irq_chip *chip); 11extern void irq_chip_set_defaults(struct irq_chip *chip);
9 12
@@ -20,16 +23,21 @@ extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
20extern void clear_kstat_irqs(struct irq_desc *desc); 23extern void clear_kstat_irqs(struct irq_desc *desc);
21extern raw_spinlock_t sparse_irq_lock; 24extern raw_spinlock_t sparse_irq_lock;
22 25
26/* Resending of interrupts :*/
27void check_irq_resend(struct irq_desc *desc, unsigned int irq);
28
23#ifdef CONFIG_SPARSE_IRQ 29#ifdef CONFIG_SPARSE_IRQ
24void replace_irq_desc(unsigned int irq, struct irq_desc *desc); 30void replace_irq_desc(unsigned int irq, struct irq_desc *desc);
25#endif 31#endif
26 32
27#ifdef CONFIG_PROC_FS 33#ifdef CONFIG_PROC_FS
28extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); 34extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
35extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc);
29extern void register_handler_proc(unsigned int irq, struct irqaction *action); 36extern void register_handler_proc(unsigned int irq, struct irqaction *action);
30extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); 37extern void unregister_handler_proc(unsigned int irq, struct irqaction *action);
31#else 38#else
32static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } 39static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { }
40static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { }
33static inline void register_handler_proc(unsigned int irq, 41static inline void register_handler_proc(unsigned int irq,
34 struct irqaction *action) { } 42 struct irqaction *action) { }
35static inline void unregister_handler_proc(unsigned int irq, 43static inline void unregister_handler_proc(unsigned int irq,
@@ -40,17 +48,27 @@ extern int irq_select_affinity_usr(unsigned int irq);
40 48
41extern void irq_set_thread_affinity(struct irq_desc *desc); 49extern void irq_set_thread_affinity(struct irq_desc *desc);
42 50
51#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
52static inline void irq_end(unsigned int irq, struct irq_desc *desc)
53{
54 if (desc->irq_data.chip && desc->irq_data.chip->end)
55 desc->irq_data.chip->end(irq);
56}
57#else
58static inline void irq_end(unsigned int irq, struct irq_desc *desc) { }
59#endif
60
43/* Inline functions for support of irq chips on slow busses */ 61/* Inline functions for support of irq chips on slow busses */
44static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc) 62static inline void chip_bus_lock(struct irq_desc *desc)
45{ 63{
46 if (unlikely(desc->chip->bus_lock)) 64 if (unlikely(desc->irq_data.chip->irq_bus_lock))
47 desc->chip->bus_lock(irq); 65 desc->irq_data.chip->irq_bus_lock(&desc->irq_data);
48} 66}
49 67
50static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc) 68static inline void chip_bus_sync_unlock(struct irq_desc *desc)
51{ 69{
52 if (unlikely(desc->chip->bus_sync_unlock)) 70 if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock))
53 desc->chip->bus_sync_unlock(irq); 71 desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
54} 72}
55 73
56/* 74/*
@@ -67,8 +85,8 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
67 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); 85 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
68 printk("->handle_irq(): %p, ", desc->handle_irq); 86 printk("->handle_irq(): %p, ", desc->handle_irq);
69 print_symbol("%s\n", (unsigned long)desc->handle_irq); 87 print_symbol("%s\n", (unsigned long)desc->handle_irq);
70 printk("->chip(): %p, ", desc->chip); 88 printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
71 print_symbol("%s\n", (unsigned long)desc->chip); 89 print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
72 printk("->action(): %p\n", desc->action); 90 printk("->action(): %p\n", desc->action);
73 if (desc->action) { 91 if (desc->action) {
74 printk("->action->handler(): %p, ", desc->action->handler); 92 printk("->action->handler(): %p, ", desc->action->handler);
@@ -93,3 +111,99 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
93 111
94#undef P 112#undef P
95 113
114/* Stuff below will be cleaned up after the sparse allocator is done */
115
116#ifdef CONFIG_SMP
117/**
118 * alloc_desc_masks - allocate cpumasks for irq_desc
119 * @desc: pointer to irq_desc struct
120 * @node: node which will be handling the cpumasks
121 * @boot: true if need bootmem
122 *
123 * Allocates affinity and pending_mask cpumask if required.
124 * Returns true if successful (or not required).
125 */
126static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
127 bool boot)
128{
129 gfp_t gfp = GFP_ATOMIC;
130
131 if (boot)
132 gfp = GFP_NOWAIT;
133
134#ifdef CONFIG_CPUMASK_OFFSTACK
135 if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
136 return false;
137
138#ifdef CONFIG_GENERIC_PENDING_IRQ
139 if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
140 free_cpumask_var(desc->irq_data.affinity);
141 return false;
142 }
143#endif
144#endif
145 return true;
146}
147
148static inline void init_desc_masks(struct irq_desc *desc)
149{
150 cpumask_setall(desc->irq_data.affinity);
151#ifdef CONFIG_GENERIC_PENDING_IRQ
152 cpumask_clear(desc->pending_mask);
153#endif
154}
155
156/**
157 * init_copy_desc_masks - copy cpumasks for irq_desc
158 * @old_desc: pointer to old irq_desc struct
159 * @new_desc: pointer to new irq_desc struct
160 *
161 * Insures affinity and pending_masks are copied to new irq_desc.
162 * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
163 * irq_desc struct so the copy is redundant.
164 */
165
166static inline void init_copy_desc_masks(struct irq_desc *old_desc,
167 struct irq_desc *new_desc)
168{
169#ifdef CONFIG_CPUMASK_OFFSTACK
170 cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity);
171
172#ifdef CONFIG_GENERIC_PENDING_IRQ
173 cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
174#endif
175#endif
176}
177
178static inline void free_desc_masks(struct irq_desc *old_desc,
179 struct irq_desc *new_desc)
180{
181 free_cpumask_var(old_desc->irq_data.affinity);
182
183#ifdef CONFIG_GENERIC_PENDING_IRQ
184 free_cpumask_var(old_desc->pending_mask);
185#endif
186}
187
188#else /* !CONFIG_SMP */
189
190static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
191 bool boot)
192{
193 return true;
194}
195
196static inline void init_desc_masks(struct irq_desc *desc)
197{
198}
199
200static inline void init_copy_desc_masks(struct irq_desc *old_desc,
201 struct irq_desc *new_desc)
202{
203}
204
205static inline void free_desc_masks(struct irq_desc *old_desc,
206 struct irq_desc *new_desc)
207{
208}
209#endif /* CONFIG_SMP */
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
new file mode 100644
index 000000000000..a1fbd1d347af
--- /dev/null
+++ b/kernel/irq/irqdesc.c
@@ -0,0 +1,424 @@
1/*
2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4 *
5 * This file contains the interrupt descriptor management code
6 *
7 * Detailed information is available in Documentation/DocBook/genericirq
8 *
9 */
10#include <linux/irq.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/radix-tree.h>
16#include <linux/bitmap.h>
17
18#include "internals.h"
19
20/*
21 * lockdep: we want to handle all irq_desc locks as a single lock-class:
22 */
23struct lock_class_key irq_desc_lock_class;
24
25#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
26static void __init init_irq_default_affinity(void)
27{
28 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29 cpumask_setall(irq_default_affinity);
30}
31#else
32static void __init init_irq_default_affinity(void)
33{
34}
35#endif
36
37#ifdef CONFIG_SMP
38static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
39{
40 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
41 return -ENOMEM;
42
43#ifdef CONFIG_GENERIC_PENDING_IRQ
44 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
45 free_cpumask_var(desc->irq_data.affinity);
46 return -ENOMEM;
47 }
48#endif
49 return 0;
50}
51
52static void desc_smp_init(struct irq_desc *desc, int node)
53{
54 desc->irq_data.node = node;
55 cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
56}
57
58#else
59static inline int
60alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
61static inline void desc_smp_init(struct irq_desc *desc, int node) { }
62#endif
63
64static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
65{
66 desc->irq_data.irq = irq;
67 desc->irq_data.chip = &no_irq_chip;
68 desc->irq_data.chip_data = NULL;
69 desc->irq_data.handler_data = NULL;
70 desc->irq_data.msi_desc = NULL;
71 desc->status = IRQ_DEFAULT_INIT_FLAGS;
72 desc->handle_irq = handle_bad_irq;
73 desc->depth = 1;
74 desc->name = NULL;
75 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
76 desc_smp_init(desc, node);
77}
78
79int nr_irqs = NR_IRQS;
80EXPORT_SYMBOL_GPL(nr_irqs);
81
82DEFINE_RAW_SPINLOCK(sparse_irq_lock);
83static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
84
85#ifdef CONFIG_SPARSE_IRQ
86
87void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
88{
89 void *ptr;
90
91 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
92 GFP_ATOMIC, node);
93
94 /*
95 * don't overwite if can not get new one
96 * init_copy_kstat_irqs() could still use old one
97 */
98 if (ptr) {
99 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
100 desc->kstat_irqs = ptr;
101 }
102}
103
104static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
105
106static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
107{
108 radix_tree_insert(&irq_desc_tree, irq, desc);
109}
110
111struct irq_desc *irq_to_desc(unsigned int irq)
112{
113 return radix_tree_lookup(&irq_desc_tree, irq);
114}
115
116void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
117{
118 void **ptr;
119
120 ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
121 if (ptr)
122 radix_tree_replace_slot(ptr, desc);
123}
124
125static void delete_irq_desc(unsigned int irq)
126{
127 radix_tree_delete(&irq_desc_tree, irq);
128}
129
130#ifdef CONFIG_SMP
131static void free_masks(struct irq_desc *desc)
132{
133#ifdef CONFIG_GENERIC_PENDING_IRQ
134 free_cpumask_var(desc->pending_mask);
135#endif
136 free_cpumask_var(desc->affinity);
137}
138#else
139static inline void free_masks(struct irq_desc *desc) { }
140#endif
141
142static struct irq_desc *alloc_desc(int irq, int node)
143{
144 /* Temporary hack until we can switch to GFP_KERNEL */
145 gfp_t gfp = gfp_allowed_mask == GFP_BOOT_MASK ? GFP_NOWAIT : GFP_ATOMIC;
146 struct irq_desc *desc;
147
148 desc = kzalloc_node(sizeof(*desc), gfp, node);
149 if (!desc)
150 return NULL;
151 /* allocate based on nr_cpu_ids */
152 desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs),
153 gfp, node);
154 if (!desc->kstat_irqs)
155 goto err_desc;
156
157 if (alloc_masks(desc, gfp, node))
158 goto err_kstat;
159
160 raw_spin_lock_init(&desc->lock);
161 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
162
163 desc_set_defaults(irq, desc, node);
164
165 return desc;
166
167err_kstat:
168 kfree(desc->kstat_irqs);
169err_desc:
170 kfree(desc);
171 return NULL;
172}
173
174static void free_desc(unsigned int irq)
175{
176 struct irq_desc *desc = irq_to_desc(irq);
177 unsigned long flags;
178
179 unregister_irq_proc(irq, desc);
180
181 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
182 delete_irq_desc(irq);
183 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
184
185 free_masks(desc);
186 kfree(desc->kstat_irqs);
187 kfree(desc);
188}
189
190static int alloc_descs(unsigned int start, unsigned int cnt, int node)
191{
192 struct irq_desc *desc;
193 unsigned long flags;
194 int i;
195
196 for (i = 0; i < cnt; i++) {
197 desc = alloc_desc(start + i, node);
198 if (!desc)
199 goto err;
200 /* temporary until I fixed x86 madness */
201 arch_init_chip_data(desc, node);
202 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
203 irq_insert_desc(start + i, desc);
204 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
205 }
206 return start;
207
208err:
209 for (i--; i >= 0; i--)
210 free_desc(start + i);
211
212 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
213 bitmap_clear(allocated_irqs, start, cnt);
214 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
215 return -ENOMEM;
216}
217
218struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
219{
220 int res = irq_alloc_descs(irq, irq, 1, node);
221
222 if (res == -EEXIST || res == irq)
223 return irq_to_desc(irq);
224 return NULL;
225}
226
227int __init early_irq_init(void)
228{
229 int i, initcnt, node = first_online_node;
230 struct irq_desc *desc;
231
232 init_irq_default_affinity();
233
234 /* Let arch update nr_irqs and return the nr of preallocated irqs */
235 initcnt = arch_probe_nr_irqs();
236 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
237
238 for (i = 0; i < initcnt; i++) {
239 desc = alloc_desc(i, node);
240 set_bit(i, allocated_irqs);
241 irq_insert_desc(i, desc);
242 }
243 return arch_early_irq_init();
244}
245
246#else /* !CONFIG_SPARSE_IRQ */
247
248struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
249 [0 ... NR_IRQS-1] = {
250 .status = IRQ_DEFAULT_INIT_FLAGS,
251 .handle_irq = handle_bad_irq,
252 .depth = 1,
253 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
254 }
255};
256
257static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
258int __init early_irq_init(void)
259{
260 int count, i, node = first_online_node;
261 struct irq_desc *desc;
262
263 init_irq_default_affinity();
264
265 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
266
267 desc = irq_desc;
268 count = ARRAY_SIZE(irq_desc);
269
270 for (i = 0; i < count; i++) {
271 desc[i].irq_data.irq = i;
272 desc[i].irq_data.chip = &no_irq_chip;
273 desc[i].kstat_irqs = kstat_irqs_all[i];
274 alloc_masks(desc + i, GFP_KERNEL, node);
275 desc_smp_init(desc + i, node);
276 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
277 }
278 return arch_early_irq_init();
279}
280
281struct irq_desc *irq_to_desc(unsigned int irq)
282{
283 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
284}
285
286struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
287{
288 return irq_to_desc(irq);
289}
290
291#ifdef CONFIG_SMP
292static inline int desc_node(struct irq_desc *desc)
293{
294 return desc->irq_data.node;
295}
296#else
297static inline int desc_node(struct irq_desc *desc) { return 0; }
298#endif
299
300static void free_desc(unsigned int irq)
301{
302 struct irq_desc *desc = irq_to_desc(irq);
303 unsigned long flags;
304
305 raw_spin_lock_irqsave(&desc->lock, flags);
306 desc_set_defaults(irq, desc, desc_node(desc));
307 raw_spin_unlock_irqrestore(&desc->lock, flags);
308}
309
310static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
311{
312 return start;
313}
314#endif /* !CONFIG_SPARSE_IRQ */
315
316/* Dynamic interrupt handling */
317
318/**
319 * irq_free_descs - free irq descriptors
320 * @from: Start of descriptor range
321 * @cnt: Number of consecutive irqs to free
322 */
323void irq_free_descs(unsigned int from, unsigned int cnt)
324{
325 unsigned long flags;
326 int i;
327
328 if (from >= nr_irqs || (from + cnt) > nr_irqs)
329 return;
330
331 for (i = 0; i < cnt; i++)
332 free_desc(from + i);
333
334 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
335 bitmap_clear(allocated_irqs, from, cnt);
336 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
337}
338
339/**
340 * irq_alloc_descs - allocate and initialize a range of irq descriptors
341 * @irq: Allocate for specific irq number if irq >= 0
342 * @from: Start the search from this irq number
343 * @cnt: Number of consecutive irqs to allocate.
344 * @node: Preferred node on which the irq descriptor should be allocated
345 *
346 * Returns the first irq number or error code
347 */
348int __ref
349irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
350{
351 unsigned long flags;
352 int start, ret;
353
354 if (!cnt)
355 return -EINVAL;
356
357 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
358
359 start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
360 ret = -EEXIST;
361 if (irq >=0 && start != irq)
362 goto err;
363
364 ret = -ENOMEM;
365 if (start >= nr_irqs)
366 goto err;
367
368 bitmap_set(allocated_irqs, start, cnt);
369 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
370 return alloc_descs(start, cnt, node);
371
372err:
373 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
374 return ret;
375}
376
377/**
378 * irq_reserve_irqs - mark irqs allocated
379 * @from: mark from irq number
380 * @cnt: number of irqs to mark
381 *
382 * Returns 0 on success or an appropriate error code
383 */
384int irq_reserve_irqs(unsigned int from, unsigned int cnt)
385{
386 unsigned long flags;
387 unsigned int start;
388 int ret = 0;
389
390 if (!cnt || (from + cnt) > nr_irqs)
391 return -EINVAL;
392
393 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
394 start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
395 if (start == from)
396 bitmap_set(allocated_irqs, start, cnt);
397 else
398 ret = -EEXIST;
399 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
400 return ret;
401}
402
403/**
404 * irq_get_next_irq - get next allocated irq number
405 * @offset: where to start the search
406 *
407 * Returns next irq number after offset or nr_irqs if none is found.
408 */
409unsigned int irq_get_next_irq(unsigned int offset)
410{
411 return find_next_bit(allocated_irqs, nr_irqs, offset);
412}
413
414/* Statistics access */
415void clear_kstat_irqs(struct irq_desc *desc)
416{
417 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
418}
419
420unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
421{
422 struct irq_desc *desc = irq_to_desc(irq);
423 return desc ? desc->kstat_irqs[cpu] : 0;
424}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c3003e9d91a3..644e8d5fa367 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -73,8 +73,8 @@ int irq_can_set_affinity(unsigned int irq)
73{ 73{
74 struct irq_desc *desc = irq_to_desc(irq); 74 struct irq_desc *desc = irq_to_desc(irq);
75 75
76 if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || 76 if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip ||
77 !desc->chip->set_affinity) 77 !desc->irq_data.chip->irq_set_affinity)
78 return 0; 78 return 0;
79 79
80 return 1; 80 return 1;
@@ -109,17 +109,18 @@ void irq_set_thread_affinity(struct irq_desc *desc)
109int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 109int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
110{ 110{
111 struct irq_desc *desc = irq_to_desc(irq); 111 struct irq_desc *desc = irq_to_desc(irq);
112 struct irq_chip *chip = desc->irq_data.chip;
112 unsigned long flags; 113 unsigned long flags;
113 114
114 if (!desc->chip->set_affinity) 115 if (!chip->irq_set_affinity)
115 return -EINVAL; 116 return -EINVAL;
116 117
117 raw_spin_lock_irqsave(&desc->lock, flags); 118 raw_spin_lock_irqsave(&desc->lock, flags);
118 119
119#ifdef CONFIG_GENERIC_PENDING_IRQ 120#ifdef CONFIG_GENERIC_PENDING_IRQ
120 if (desc->status & IRQ_MOVE_PCNTXT) { 121 if (desc->status & IRQ_MOVE_PCNTXT) {
121 if (!desc->chip->set_affinity(irq, cpumask)) { 122 if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
122 cpumask_copy(desc->affinity, cpumask); 123 cpumask_copy(desc->irq_data.affinity, cpumask);
123 irq_set_thread_affinity(desc); 124 irq_set_thread_affinity(desc);
124 } 125 }
125 } 126 }
@@ -128,8 +129,8 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
128 cpumask_copy(desc->pending_mask, cpumask); 129 cpumask_copy(desc->pending_mask, cpumask);
129 } 130 }
130#else 131#else
131 if (!desc->chip->set_affinity(irq, cpumask)) { 132 if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
132 cpumask_copy(desc->affinity, cpumask); 133 cpumask_copy(desc->irq_data.affinity, cpumask);
133 irq_set_thread_affinity(desc); 134 irq_set_thread_affinity(desc);
134 } 135 }
135#endif 136#endif
@@ -168,16 +169,16 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
168 * one of the targets is online. 169 * one of the targets is online.
169 */ 170 */
170 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 171 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
171 if (cpumask_any_and(desc->affinity, cpu_online_mask) 172 if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask)
172 < nr_cpu_ids) 173 < nr_cpu_ids)
173 goto set_affinity; 174 goto set_affinity;
174 else 175 else
175 desc->status &= ~IRQ_AFFINITY_SET; 176 desc->status &= ~IRQ_AFFINITY_SET;
176 } 177 }
177 178
178 cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); 179 cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity);
179set_affinity: 180set_affinity:
180 desc->chip->set_affinity(irq, desc->affinity); 181 desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false);
181 182
182 return 0; 183 return 0;
183} 184}
@@ -223,7 +224,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
223 224
224 if (!desc->depth++) { 225 if (!desc->depth++) {
225 desc->status |= IRQ_DISABLED; 226 desc->status |= IRQ_DISABLED;
226 desc->chip->disable(irq); 227 desc->irq_data.chip->irq_disable(&desc->irq_data);
227 } 228 }
228} 229}
229 230
@@ -246,11 +247,11 @@ void disable_irq_nosync(unsigned int irq)
246 if (!desc) 247 if (!desc)
247 return; 248 return;
248 249
249 chip_bus_lock(irq, desc); 250 chip_bus_lock(desc);
250 raw_spin_lock_irqsave(&desc->lock, flags); 251 raw_spin_lock_irqsave(&desc->lock, flags);
251 __disable_irq(desc, irq, false); 252 __disable_irq(desc, irq, false);
252 raw_spin_unlock_irqrestore(&desc->lock, flags); 253 raw_spin_unlock_irqrestore(&desc->lock, flags);
253 chip_bus_sync_unlock(irq, desc); 254 chip_bus_sync_unlock(desc);
254} 255}
255EXPORT_SYMBOL(disable_irq_nosync); 256EXPORT_SYMBOL(disable_irq_nosync);
256 257
@@ -313,7 +314,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
313 * IRQ line is re-enabled. 314 * IRQ line is re-enabled.
314 * 315 *
315 * This function may be called from IRQ context only when 316 * This function may be called from IRQ context only when
316 * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 317 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
317 */ 318 */
318void enable_irq(unsigned int irq) 319void enable_irq(unsigned int irq)
319{ 320{
@@ -323,11 +324,11 @@ void enable_irq(unsigned int irq)
323 if (!desc) 324 if (!desc)
324 return; 325 return;
325 326
326 chip_bus_lock(irq, desc); 327 chip_bus_lock(desc);
327 raw_spin_lock_irqsave(&desc->lock, flags); 328 raw_spin_lock_irqsave(&desc->lock, flags);
328 __enable_irq(desc, irq, false); 329 __enable_irq(desc, irq, false);
329 raw_spin_unlock_irqrestore(&desc->lock, flags); 330 raw_spin_unlock_irqrestore(&desc->lock, flags);
330 chip_bus_sync_unlock(irq, desc); 331 chip_bus_sync_unlock(desc);
331} 332}
332EXPORT_SYMBOL(enable_irq); 333EXPORT_SYMBOL(enable_irq);
333 334
@@ -336,8 +337,8 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
336 struct irq_desc *desc = irq_to_desc(irq); 337 struct irq_desc *desc = irq_to_desc(irq);
337 int ret = -ENXIO; 338 int ret = -ENXIO;
338 339
339 if (desc->chip->set_wake) 340 if (desc->irq_data.chip->irq_set_wake)
340 ret = desc->chip->set_wake(irq, on); 341 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
341 342
342 return ret; 343 return ret;
343} 344}
@@ -429,12 +430,12 @@ void compat_irq_chip_set_default_handler(struct irq_desc *desc)
429} 430}
430 431
431int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 432int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
432 unsigned long flags) 433 unsigned long flags)
433{ 434{
434 int ret; 435 int ret;
435 struct irq_chip *chip = desc->chip; 436 struct irq_chip *chip = desc->irq_data.chip;
436 437
437 if (!chip || !chip->set_type) { 438 if (!chip || !chip->irq_set_type) {
438 /* 439 /*
439 * IRQF_TRIGGER_* but the PIC does not support multiple 440 * IRQF_TRIGGER_* but the PIC does not support multiple
440 * flow-types? 441 * flow-types?
@@ -445,11 +446,11 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
445 } 446 }
446 447
447 /* caller masked out all except trigger mode flags */ 448 /* caller masked out all except trigger mode flags */
448 ret = chip->set_type(irq, flags); 449 ret = chip->irq_set_type(&desc->irq_data, flags);
449 450
450 if (ret) 451 if (ret)
451 pr_err("setting trigger mode %d for irq %u failed (%pF)\n", 452 pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
452 (int)flags, irq, chip->set_type); 453 flags, irq, chip->irq_set_type);
453 else { 454 else {
454 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 455 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
455 flags |= IRQ_LEVEL; 456 flags |= IRQ_LEVEL;
@@ -457,8 +458,8 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
457 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); 458 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
458 desc->status |= flags; 459 desc->status |= flags;
459 460
460 if (chip != desc->chip) 461 if (chip != desc->irq_data.chip)
461 irq_chip_set_defaults(desc->chip); 462 irq_chip_set_defaults(desc->irq_data.chip);
462 } 463 }
463 464
464 return ret; 465 return ret;
@@ -507,7 +508,7 @@ static int irq_wait_for_interrupt(struct irqaction *action)
507static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 508static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
508{ 509{
509again: 510again:
510 chip_bus_lock(irq, desc); 511 chip_bus_lock(desc);
511 raw_spin_lock_irq(&desc->lock); 512 raw_spin_lock_irq(&desc->lock);
512 513
513 /* 514 /*
@@ -521,17 +522,17 @@ again:
521 */ 522 */
522 if (unlikely(desc->status & IRQ_INPROGRESS)) { 523 if (unlikely(desc->status & IRQ_INPROGRESS)) {
523 raw_spin_unlock_irq(&desc->lock); 524 raw_spin_unlock_irq(&desc->lock);
524 chip_bus_sync_unlock(irq, desc); 525 chip_bus_sync_unlock(desc);
525 cpu_relax(); 526 cpu_relax();
526 goto again; 527 goto again;
527 } 528 }
528 529
529 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 530 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
530 desc->status &= ~IRQ_MASKED; 531 desc->status &= ~IRQ_MASKED;
531 desc->chip->unmask(irq); 532 desc->irq_data.chip->irq_unmask(&desc->irq_data);
532 } 533 }
533 raw_spin_unlock_irq(&desc->lock); 534 raw_spin_unlock_irq(&desc->lock);
534 chip_bus_sync_unlock(irq, desc); 535 chip_bus_sync_unlock(desc);
535} 536}
536 537
537#ifdef CONFIG_SMP 538#ifdef CONFIG_SMP
@@ -556,7 +557,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
556 } 557 }
557 558
558 raw_spin_lock_irq(&desc->lock); 559 raw_spin_lock_irq(&desc->lock);
559 cpumask_copy(mask, desc->affinity); 560 cpumask_copy(mask, desc->irq_data.affinity);
560 raw_spin_unlock_irq(&desc->lock); 561 raw_spin_unlock_irq(&desc->lock);
561 562
562 set_cpus_allowed_ptr(current, mask); 563 set_cpus_allowed_ptr(current, mask);
@@ -657,7 +658,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
657 if (!desc) 658 if (!desc)
658 return -EINVAL; 659 return -EINVAL;
659 660
660 if (desc->chip == &no_irq_chip) 661 if (desc->irq_data.chip == &no_irq_chip)
661 return -ENOSYS; 662 return -ENOSYS;
662 /* 663 /*
663 * Some drivers like serial.c use request_irq() heavily, 664 * Some drivers like serial.c use request_irq() heavily,
@@ -752,7 +753,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
752 } 753 }
753 754
754 if (!shared) { 755 if (!shared) {
755 irq_chip_set_defaults(desc->chip); 756 irq_chip_set_defaults(desc->irq_data.chip);
756 757
757 init_waitqueue_head(&desc->wait_for_threads); 758 init_waitqueue_head(&desc->wait_for_threads);
758 759
@@ -779,7 +780,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
779 if (!(desc->status & IRQ_NOAUTOEN)) { 780 if (!(desc->status & IRQ_NOAUTOEN)) {
780 desc->depth = 0; 781 desc->depth = 0;
781 desc->status &= ~IRQ_DISABLED; 782 desc->status &= ~IRQ_DISABLED;
782 desc->chip->startup(irq); 783 desc->irq_data.chip->irq_startup(&desc->irq_data);
783 } else 784 } else
784 /* Undo nested disables: */ 785 /* Undo nested disables: */
785 desc->depth = 1; 786 desc->depth = 1;
@@ -912,17 +913,17 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
912 913
913 /* Currently used only by UML, might disappear one day: */ 914 /* Currently used only by UML, might disappear one day: */
914#ifdef CONFIG_IRQ_RELEASE_METHOD 915#ifdef CONFIG_IRQ_RELEASE_METHOD
915 if (desc->chip->release) 916 if (desc->irq_data.chip->release)
916 desc->chip->release(irq, dev_id); 917 desc->irq_data.chip->release(irq, dev_id);
917#endif 918#endif
918 919
919 /* If this was the last handler, shut down the IRQ line: */ 920 /* If this was the last handler, shut down the IRQ line: */
920 if (!desc->action) { 921 if (!desc->action) {
921 desc->status |= IRQ_DISABLED; 922 desc->status |= IRQ_DISABLED;
922 if (desc->chip->shutdown) 923 if (desc->irq_data.chip->irq_shutdown)
923 desc->chip->shutdown(irq); 924 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
924 else 925 else
925 desc->chip->disable(irq); 926 desc->irq_data.chip->irq_disable(&desc->irq_data);
926 } 927 }
927 928
928#ifdef CONFIG_SMP 929#ifdef CONFIG_SMP
@@ -997,9 +998,9 @@ void free_irq(unsigned int irq, void *dev_id)
997 if (!desc) 998 if (!desc)
998 return; 999 return;
999 1000
1000 chip_bus_lock(irq, desc); 1001 chip_bus_lock(desc);
1001 kfree(__free_irq(irq, dev_id)); 1002 kfree(__free_irq(irq, dev_id));
1002 chip_bus_sync_unlock(irq, desc); 1003 chip_bus_sync_unlock(desc);
1003} 1004}
1004EXPORT_SYMBOL(free_irq); 1005EXPORT_SYMBOL(free_irq);
1005 1006
@@ -1086,9 +1087,9 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1086 action->name = devname; 1087 action->name = devname;
1087 action->dev_id = dev_id; 1088 action->dev_id = dev_id;
1088 1089
1089 chip_bus_lock(irq, desc); 1090 chip_bus_lock(desc);
1090 retval = __setup_irq(irq, desc, action); 1091 retval = __setup_irq(irq, desc, action);
1091 chip_bus_sync_unlock(irq, desc); 1092 chip_bus_sync_unlock(desc);
1092 1093
1093 if (retval) 1094 if (retval)
1094 kfree(action); 1095 kfree(action);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 241962280836..1d2541940480 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -7,6 +7,7 @@
7void move_masked_irq(int irq) 7void move_masked_irq(int irq)
8{ 8{
9 struct irq_desc *desc = irq_to_desc(irq); 9 struct irq_desc *desc = irq_to_desc(irq);
10 struct irq_chip *chip = desc->irq_data.chip;
10 11
11 if (likely(!(desc->status & IRQ_MOVE_PENDING))) 12 if (likely(!(desc->status & IRQ_MOVE_PENDING)))
12 return; 13 return;
@@ -24,7 +25,7 @@ void move_masked_irq(int irq)
24 if (unlikely(cpumask_empty(desc->pending_mask))) 25 if (unlikely(cpumask_empty(desc->pending_mask)))
25 return; 26 return;
26 27
27 if (!desc->chip->set_affinity) 28 if (!chip->irq_set_affinity)
28 return; 29 return;
29 30
30 assert_raw_spin_locked(&desc->lock); 31 assert_raw_spin_locked(&desc->lock);
@@ -43,8 +44,9 @@ void move_masked_irq(int irq)
43 */ 44 */
44 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) 45 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
45 < nr_cpu_ids)) 46 < nr_cpu_ids))
46 if (!desc->chip->set_affinity(irq, desc->pending_mask)) { 47 if (!chip->irq_set_affinity(&desc->irq_data,
47 cpumask_copy(desc->affinity, desc->pending_mask); 48 desc->pending_mask, false)) {
49 cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
48 irq_set_thread_affinity(desc); 50 irq_set_thread_affinity(desc);
49 } 51 }
50 52
@@ -61,8 +63,8 @@ void move_native_irq(int irq)
61 if (unlikely(desc->status & IRQ_DISABLED)) 63 if (unlikely(desc->status & IRQ_DISABLED))
62 return; 64 return;
63 65
64 desc->chip->mask(irq); 66 desc->irq_data.chip->irq_mask(&desc->irq_data);
65 move_masked_irq(irq); 67 move_masked_irq(irq);
66 desc->chip->unmask(irq); 68 desc->irq_data.chip->irq_unmask(&desc->irq_data);
67} 69}
68 70
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 65d3845665ac..e7f1f16402c1 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -44,7 +44,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
44 return false; 44 return false;
45 } 45 }
46 raw_spin_lock_init(&desc->lock); 46 raw_spin_lock_init(&desc->lock);
47 desc->node = node; 47 desc->irq_data.node = node;
48 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 48 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
49 init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); 49 init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
50 init_copy_desc_masks(old_desc, desc); 50 init_copy_desc_masks(old_desc, desc);
@@ -66,7 +66,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
66 unsigned int irq; 66 unsigned int irq;
67 unsigned long flags; 67 unsigned long flags;
68 68
69 irq = old_desc->irq; 69 irq = old_desc->irq_data.irq;
70 70
71 raw_spin_lock_irqsave(&sparse_irq_lock, flags); 71 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
72 72
@@ -109,10 +109,10 @@ out_unlock:
109struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) 109struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
110{ 110{
111 /* those static or target node is -1, do not move them */ 111 /* those static or target node is -1, do not move them */
112 if (desc->irq < NR_IRQS_LEGACY || node == -1) 112 if (desc->irq_data.irq < NR_IRQS_LEGACY || node == -1)
113 return desc; 113 return desc;
114 114
115 if (desc->node != node) 115 if (desc->irq_data.node != node)
116 desc = __real_move_irq_desc(desc, node); 116 desc = __real_move_irq_desc(desc, node);
117 117
118 return desc; 118 return desc;
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 09a2ee540bd2..01b1d3a88983 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -21,7 +21,7 @@ static struct proc_dir_entry *root_irq_dir;
21static int irq_affinity_proc_show(struct seq_file *m, void *v) 21static int irq_affinity_proc_show(struct seq_file *m, void *v)
22{ 22{
23 struct irq_desc *desc = irq_to_desc((long)m->private); 23 struct irq_desc *desc = irq_to_desc((long)m->private);
24 const struct cpumask *mask = desc->affinity; 24 const struct cpumask *mask = desc->irq_data.affinity;
25 25
26#ifdef CONFIG_GENERIC_PENDING_IRQ 26#ifdef CONFIG_GENERIC_PENDING_IRQ
27 if (desc->status & IRQ_MOVE_PENDING) 27 if (desc->status & IRQ_MOVE_PENDING)
@@ -65,7 +65,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
65 cpumask_var_t new_value; 65 cpumask_var_t new_value;
66 int err; 66 int err;
67 67
68 if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || 68 if (!irq_to_desc(irq)->irq_data.chip->irq_set_affinity || no_irq_affinity ||
69 irq_balancing_disabled(irq)) 69 irq_balancing_disabled(irq))
70 return -EIO; 70 return -EIO;
71 71
@@ -185,7 +185,7 @@ static int irq_node_proc_show(struct seq_file *m, void *v)
185{ 185{
186 struct irq_desc *desc = irq_to_desc((long) m->private); 186 struct irq_desc *desc = irq_to_desc((long) m->private);
187 187
188 seq_printf(m, "%d\n", desc->node); 188 seq_printf(m, "%d\n", desc->irq_data.node);
189 return 0; 189 return 0;
190} 190}
191 191
@@ -269,7 +269,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
269{ 269{
270 char name [MAX_NAMELEN]; 270 char name [MAX_NAMELEN];
271 271
272 if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) 272 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
273 return; 273 return;
274 274
275 memset(name, 0, MAX_NAMELEN); 275 memset(name, 0, MAX_NAMELEN);
@@ -297,6 +297,24 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
297 &irq_spurious_proc_fops, (void *)(long)irq); 297 &irq_spurious_proc_fops, (void *)(long)irq);
298} 298}
299 299
300void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
301{
302 char name [MAX_NAMELEN];
303
304 if (!root_irq_dir || !desc->dir)
305 return;
306#ifdef CONFIG_SMP
307 remove_proc_entry("smp_affinity", desc->dir);
308 remove_proc_entry("affinity_hint", desc->dir);
309 remove_proc_entry("node", desc->dir);
310#endif
311 remove_proc_entry("spurious", desc->dir);
312
313 memset(name, 0, MAX_NAMELEN);
314 sprintf(name, "%u", irq);
315 remove_proc_entry(name, root_irq_dir);
316}
317
300#undef MAX_NAMELEN 318#undef MAX_NAMELEN
301 319
302void unregister_handler_proc(unsigned int irq, struct irqaction *action) 320void unregister_handler_proc(unsigned int irq, struct irqaction *action)
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 090c3763f3a2..891115a929aa 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -60,7 +60,7 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
60 /* 60 /*
61 * Make sure the interrupt is enabled, before resending it: 61 * Make sure the interrupt is enabled, before resending it:
62 */ 62 */
63 desc->chip->enable(irq); 63 desc->irq_data.chip->irq_enable(&desc->irq_data);
64 64
65 /* 65 /*
66 * We do not resend level type interrupts. Level type 66 * We do not resend level type interrupts. Level type
@@ -70,7 +70,8 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
70 if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { 70 if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
71 desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; 71 desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY;
72 72
73 if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) { 73 if (!desc->irq_data.chip->irq_retrigger ||
74 !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
74#ifdef CONFIG_HARDIRQS_SW_RESEND 75#ifdef CONFIG_HARDIRQS_SW_RESEND
75 /* Set it pending and activate the softirq: */ 76 /* Set it pending and activate the softirq: */
76 set_bit(irq, irqs_resend); 77 set_bit(irq, irqs_resend);
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 89fb90ae534f..3089d3b9d5f3 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -14,6 +14,8 @@
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/timer.h> 15#include <linux/timer.h>
16 16
17#include "internals.h"
18
17static int irqfixup __read_mostly; 19static int irqfixup __read_mostly;
18 20
19#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) 21#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
@@ -78,8 +80,8 @@ static int try_one_irq(int irq, struct irq_desc *desc)
78 * If we did actual work for the real IRQ line we must let the 80 * If we did actual work for the real IRQ line we must let the
79 * IRQ controller clean up too 81 * IRQ controller clean up too
80 */ 82 */
81 if (work && desc->chip && desc->chip->end) 83 if (work)
82 desc->chip->end(irq); 84 irq_end(irq, desc);
83 raw_spin_unlock(&desc->lock); 85 raw_spin_unlock(&desc->lock);
84 86
85 return ok; 87 return ok;
@@ -254,7 +256,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
254 printk(KERN_EMERG "Disabling IRQ #%d\n", irq); 256 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
255 desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; 257 desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED;
256 desc->depth++; 258 desc->depth++;
257 desc->chip->disable(irq); 259 desc->irq_data.chip->irq_disable(&desc->irq_data);
258 260
259 mod_timer(&poll_spurious_irq_timer, 261 mod_timer(&poll_spurious_irq_timer,
260 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 262 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 07b4f1b1a73a..14a7b80b2cce 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -886,9 +886,10 @@ int __init __weak early_irq_init(void)
886 return 0; 886 return 0;
887} 887}
888 888
889#ifdef CONFIG_GENERIC_HARDIRQS
889int __init __weak arch_probe_nr_irqs(void) 890int __init __weak arch_probe_nr_irqs(void)
890{ 891{
891 return 0; 892 return NR_IRQS_LEGACY;
892} 893}
893 894
894int __init __weak arch_early_irq_init(void) 895int __init __weak arch_early_irq_init(void)
@@ -900,3 +901,4 @@ int __weak arch_init_chip_data(struct irq_desc *desc, int node)
900{ 901{
901 return 0; 902 return 0;
902} 903}
904#endif