diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 17:11:46 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 17:11:46 -0400 |
| commit | 4a60cfa9457749f7987fd4f3c956dbba5a281129 (patch) | |
| tree | 85f3633276282cde0a3ac558d988704eaa3e68af | |
| parent | 62bea97f54d806218a992b18d1f425cfb5060175 (diff) | |
| parent | 27afdf2008da0b8878a73e32e4eb12381b84e224 (diff) | |
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (96 commits)
apic, x86: Use BIOS settings for IBS and MCE threshold interrupt LVT offsets
apic, x86: Check if EILVT APIC registers are available (AMD only)
x86: ioapic: Call free_irte only if interrupt remapping enabled
arm: Use ARCH_IRQ_INIT_FLAGS
genirq, ARM: Fix boot on ARM platforms
genirq: Fix CONFIG_GENIRQ_NO_DEPRECATED=y build
x86: Switch sparse_irq allocations to GFP_KERNEL
genirq: Switch sparse_irq allocator to GFP_KERNEL
genirq: Make sparse_lock a mutex
x86: lguest: Use new irq allocator
genirq: Remove the now unused sparse irq leftovers
genirq: Sanitize dynamic irq handling
genirq: Remove arch_init_chip_data()
x86: xen: Sanitise sparse_irq handling
x86: Use sane enumeration
x86: uv: Clean up the direct access to irq_desc
x86: Make io_apic.c local functions static
genirq: Remove irq_2_iommu
x86: Speed up the irq_remapped check in hot pathes
intr_remap: Simplify the code further
...
Fix up trivial conflicts in arch/x86/Kconfig
83 files changed, 2135 insertions, 2226 deletions
diff --git a/Documentation/DocBook/genericirq.tmpl b/Documentation/DocBook/genericirq.tmpl index 1448b33fd22..fb10fd08c05 100644 --- a/Documentation/DocBook/genericirq.tmpl +++ b/Documentation/DocBook/genericirq.tmpl | |||
| @@ -28,7 +28,7 @@ | |||
| 28 | </authorgroup> | 28 | </authorgroup> |
| 29 | 29 | ||
| 30 | <copyright> | 30 | <copyright> |
| 31 | <year>2005-2006</year> | 31 | <year>2005-2010</year> |
| 32 | <holder>Thomas Gleixner</holder> | 32 | <holder>Thomas Gleixner</holder> |
| 33 | </copyright> | 33 | </copyright> |
| 34 | <copyright> | 34 | <copyright> |
| @@ -100,6 +100,10 @@ | |||
| 100 | <listitem><para>Edge type</para></listitem> | 100 | <listitem><para>Edge type</para></listitem> |
| 101 | <listitem><para>Simple type</para></listitem> | 101 | <listitem><para>Simple type</para></listitem> |
| 102 | </itemizedlist> | 102 | </itemizedlist> |
| 103 | During the implementation we identified another type: | ||
| 104 | <itemizedlist> | ||
| 105 | <listitem><para>Fast EOI type</para></listitem> | ||
| 106 | </itemizedlist> | ||
| 103 | In the SMP world of the __do_IRQ() super-handler another type | 107 | In the SMP world of the __do_IRQ() super-handler another type |
| 104 | was identified: | 108 | was identified: |
| 105 | <itemizedlist> | 109 | <itemizedlist> |
| @@ -153,6 +157,7 @@ | |||
| 153 | is still available. This leads to a kind of duality for the time | 157 | is still available. This leads to a kind of duality for the time |
| 154 | being. Over time the new model should be used in more and more | 158 | being. Over time the new model should be used in more and more |
| 155 | architectures, as it enables smaller and cleaner IRQ subsystems. | 159 | architectures, as it enables smaller and cleaner IRQ subsystems. |
| 160 | It's deprecated for three years now and about to be removed. | ||
| 156 | </para> | 161 | </para> |
| 157 | </chapter> | 162 | </chapter> |
| 158 | <chapter id="bugs"> | 163 | <chapter id="bugs"> |
| @@ -217,6 +222,7 @@ | |||
| 217 | <itemizedlist> | 222 | <itemizedlist> |
| 218 | <listitem><para>handle_level_irq</para></listitem> | 223 | <listitem><para>handle_level_irq</para></listitem> |
| 219 | <listitem><para>handle_edge_irq</para></listitem> | 224 | <listitem><para>handle_edge_irq</para></listitem> |
| 225 | <listitem><para>handle_fasteoi_irq</para></listitem> | ||
| 220 | <listitem><para>handle_simple_irq</para></listitem> | 226 | <listitem><para>handle_simple_irq</para></listitem> |
| 221 | <listitem><para>handle_percpu_irq</para></listitem> | 227 | <listitem><para>handle_percpu_irq</para></listitem> |
| 222 | </itemizedlist> | 228 | </itemizedlist> |
| @@ -233,33 +239,33 @@ | |||
| 233 | are used by the default flow implementations. | 239 | are used by the default flow implementations. |
| 234 | The following helper functions are implemented (simplified excerpt): | 240 | The following helper functions are implemented (simplified excerpt): |
| 235 | <programlisting> | 241 | <programlisting> |
| 236 | default_enable(irq) | 242 | default_enable(struct irq_data *data) |
| 237 | { | 243 | { |
| 238 | desc->chip->unmask(irq); | 244 | desc->chip->irq_unmask(data); |
| 239 | } | 245 | } |
| 240 | 246 | ||
| 241 | default_disable(irq) | 247 | default_disable(struct irq_data *data) |
| 242 | { | 248 | { |
| 243 | if (!delay_disable(irq)) | 249 | if (!delay_disable(data)) |
| 244 | desc->chip->mask(irq); | 250 | desc->chip->irq_mask(data); |
| 245 | } | 251 | } |
| 246 | 252 | ||
| 247 | default_ack(irq) | 253 | default_ack(struct irq_data *data) |
| 248 | { | 254 | { |
| 249 | chip->ack(irq); | 255 | chip->irq_ack(data); |
| 250 | } | 256 | } |
| 251 | 257 | ||
| 252 | default_mask_ack(irq) | 258 | default_mask_ack(struct irq_data *data) |
| 253 | { | 259 | { |
| 254 | if (chip->mask_ack) { | 260 | if (chip->irq_mask_ack) { |
| 255 | chip->mask_ack(irq); | 261 | chip->irq_mask_ack(data); |
| 256 | } else { | 262 | } else { |
| 257 | chip->mask(irq); | 263 | chip->irq_mask(data); |
| 258 | chip->ack(irq); | 264 | chip->irq_ack(data); |
| 259 | } | 265 | } |
| 260 | } | 266 | } |
| 261 | 267 | ||
| 262 | noop(irq) | 268 | noop(struct irq_data *data)) |
| 263 | { | 269 | { |
| 264 | } | 270 | } |
| 265 | 271 | ||
| @@ -278,12 +284,27 @@ noop(irq) | |||
| 278 | <para> | 284 | <para> |
| 279 | The following control flow is implemented (simplified excerpt): | 285 | The following control flow is implemented (simplified excerpt): |
| 280 | <programlisting> | 286 | <programlisting> |
| 281 | desc->chip->start(); | 287 | desc->chip->irq_mask(); |
| 282 | handle_IRQ_event(desc->action); | 288 | handle_IRQ_event(desc->action); |
| 283 | desc->chip->end(); | 289 | desc->chip->irq_unmask(); |
| 284 | </programlisting> | 290 | </programlisting> |
| 285 | </para> | 291 | </para> |
| 286 | </sect3> | 292 | </sect3> |
| 293 | <sect3 id="Default_FASTEOI_IRQ_flow_handler"> | ||
| 294 | <title>Default Fast EOI IRQ flow handler</title> | ||
| 295 | <para> | ||
| 296 | handle_fasteoi_irq provides a generic implementation | ||
| 297 | for interrupts, which only need an EOI at the end of | ||
| 298 | the handler | ||
| 299 | </para> | ||
| 300 | <para> | ||
| 301 | The following control flow is implemented (simplified excerpt): | ||
| 302 | <programlisting> | ||
| 303 | handle_IRQ_event(desc->action); | ||
| 304 | desc->chip->irq_eoi(); | ||
| 305 | </programlisting> | ||
| 306 | </para> | ||
| 307 | </sect3> | ||
| 287 | <sect3 id="Default_Edge_IRQ_flow_handler"> | 308 | <sect3 id="Default_Edge_IRQ_flow_handler"> |
| 288 | <title>Default Edge IRQ flow handler</title> | 309 | <title>Default Edge IRQ flow handler</title> |
| 289 | <para> | 310 | <para> |
| @@ -294,20 +315,19 @@ desc->chip->end(); | |||
| 294 | The following control flow is implemented (simplified excerpt): | 315 | The following control flow is implemented (simplified excerpt): |
| 295 | <programlisting> | 316 | <programlisting> |
| 296 | if (desc->status & running) { | 317 | if (desc->status & running) { |
| 297 | desc->chip->hold(); | 318 | desc->chip->irq_mask(); |
| 298 | desc->status |= pending | masked; | 319 | desc->status |= pending | masked; |
| 299 | return; | 320 | return; |
| 300 | } | 321 | } |
| 301 | desc->chip->start(); | 322 | desc->chip->irq_ack(); |
| 302 | desc->status |= running; | 323 | desc->status |= running; |
| 303 | do { | 324 | do { |
| 304 | if (desc->status & masked) | 325 | if (desc->status & masked) |
| 305 | desc->chip->enable(); | 326 | desc->chip->irq_unmask(); |
| 306 | desc->status &= ~pending; | 327 | desc->status &= ~pending; |
| 307 | handle_IRQ_event(desc->action); | 328 | handle_IRQ_event(desc->action); |
| 308 | } while (status & pending); | 329 | } while (status & pending); |
| 309 | desc->status &= ~running; | 330 | desc->status &= ~running; |
| 310 | desc->chip->end(); | ||
| 311 | </programlisting> | 331 | </programlisting> |
| 312 | </para> | 332 | </para> |
| 313 | </sect3> | 333 | </sect3> |
| @@ -342,9 +362,9 @@ handle_IRQ_event(desc->action); | |||
| 342 | <para> | 362 | <para> |
| 343 | The following control flow is implemented (simplified excerpt): | 363 | The following control flow is implemented (simplified excerpt): |
| 344 | <programlisting> | 364 | <programlisting> |
| 345 | desc->chip->start(); | ||
| 346 | handle_IRQ_event(desc->action); | 365 | handle_IRQ_event(desc->action); |
| 347 | desc->chip->end(); | 366 | if (desc->chip->irq_eoi) |
| 367 | desc->chip->irq_eoi(); | ||
| 348 | </programlisting> | 368 | </programlisting> |
| 349 | </para> | 369 | </para> |
| 350 | </sect3> | 370 | </sect3> |
| @@ -375,8 +395,7 @@ desc->chip->end(); | |||
| 375 | mechanism. (It's necessary to enable CONFIG_HARDIRQS_SW_RESEND when | 395 | mechanism. (It's necessary to enable CONFIG_HARDIRQS_SW_RESEND when |
| 376 | you want to use the delayed interrupt disable feature and your | 396 | you want to use the delayed interrupt disable feature and your |
| 377 | hardware is not capable of retriggering an interrupt.) | 397 | hardware is not capable of retriggering an interrupt.) |
| 378 | The delayed interrupt disable can be runtime enabled, per interrupt, | 398 | The delayed interrupt disable is not configurable. |
| 379 | by setting the IRQ_DELAYED_DISABLE flag in the irq_desc status field. | ||
| 380 | </para> | 399 | </para> |
| 381 | </sect2> | 400 | </sect2> |
| 382 | </sect1> | 401 | </sect1> |
| @@ -387,13 +406,13 @@ desc->chip->end(); | |||
| 387 | contains all the direct chip relevant functions, which | 406 | contains all the direct chip relevant functions, which |
| 388 | can be utilized by the irq flow implementations. | 407 | can be utilized by the irq flow implementations. |
| 389 | <itemizedlist> | 408 | <itemizedlist> |
| 390 | <listitem><para>ack()</para></listitem> | 409 | <listitem><para>irq_ack()</para></listitem> |
| 391 | <listitem><para>mask_ack() - Optional, recommended for performance</para></listitem> | 410 | <listitem><para>irq_mask_ack() - Optional, recommended for performance</para></listitem> |
| 392 | <listitem><para>mask()</para></listitem> | 411 | <listitem><para>irq_mask()</para></listitem> |
| 393 | <listitem><para>unmask()</para></listitem> | 412 | <listitem><para>irq_unmask()</para></listitem> |
| 394 | <listitem><para>retrigger() - Optional</para></listitem> | 413 | <listitem><para>irq_retrigger() - Optional</para></listitem> |
| 395 | <listitem><para>set_type() - Optional</para></listitem> | 414 | <listitem><para>irq_set_type() - Optional</para></listitem> |
| 396 | <listitem><para>set_wake() - Optional</para></listitem> | 415 | <listitem><para>irq_set_wake() - Optional</para></listitem> |
| 397 | </itemizedlist> | 416 | </itemizedlist> |
| 398 | These primitives are strictly intended to mean what they say: ack means | 417 | These primitives are strictly intended to mean what they say: ack means |
| 399 | ACK, masking means masking of an IRQ line, etc. It is up to the flow | 418 | ACK, masking means masking of an IRQ line, etc. It is up to the flow |
| @@ -458,6 +477,7 @@ desc->chip->end(); | |||
| 458 | <para> | 477 | <para> |
| 459 | This chapter contains the autogenerated documentation of the internal functions. | 478 | This chapter contains the autogenerated documentation of the internal functions. |
| 460 | </para> | 479 | </para> |
| 480 | !Ikernel/irq/irqdesc.c | ||
| 461 | !Ikernel/irq/handle.c | 481 | !Ikernel/irq/handle.c |
| 462 | !Ikernel/irq/chip.c | 482 | !Ikernel/irq/chip.c |
| 463 | </chapter> | 483 | </chapter> |
diff --git a/MAINTAINERS b/MAINTAINERS index 3d4179fbc52..6f5b5b2b528 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -3241,6 +3241,12 @@ F: drivers/net/irda/ | |||
| 3241 | F: include/net/irda/ | 3241 | F: include/net/irda/ |
| 3242 | F: net/irda/ | 3242 | F: net/irda/ |
| 3243 | 3243 | ||
| 3244 | IRQ SUBSYSTEM | ||
| 3245 | M: Thomas Gleixner <tglx@linutronix.de> | ||
| 3246 | S: Maintained | ||
| 3247 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core | ||
| 3248 | F: kernel/irq/ | ||
| 3249 | |||
| 3244 | ISAPNP | 3250 | ISAPNP |
| 3245 | M: Jaroslav Kysela <perex@perex.cz> | 3251 | M: Jaroslav Kysela <perex@perex.cz> |
| 3246 | S: Maintained | 3252 | S: Maintained |
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h index 90831f6f5f5..5586b7c8ef6 100644 --- a/arch/arm/include/asm/hw_irq.h +++ b/arch/arm/include/asm/hw_irq.h | |||
| @@ -24,4 +24,6 @@ void set_irq_flags(unsigned int irq, unsigned int flags); | |||
| 24 | #define IRQF_PROBE (1 << 1) | 24 | #define IRQF_PROBE (1 << 1) |
| 25 | #define IRQF_NOAUTOEN (1 << 2) | 25 | #define IRQF_NOAUTOEN (1 << 2) |
| 26 | 26 | ||
| 27 | #define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE) | ||
| 28 | |||
| 27 | #endif | 29 | #endif |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index c0d5c3b3a76..36ad3be4692 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
| @@ -154,14 +154,6 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) | |||
| 154 | 154 | ||
| 155 | void __init init_IRQ(void) | 155 | void __init init_IRQ(void) |
| 156 | { | 156 | { |
| 157 | struct irq_desc *desc; | ||
| 158 | int irq; | ||
| 159 | |||
| 160 | for (irq = 0; irq < nr_irqs; irq++) { | ||
| 161 | desc = irq_to_desc_alloc_node(irq, 0); | ||
| 162 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; | ||
| 163 | } | ||
| 164 | |||
| 165 | init_arch_irq(); | 157 | init_arch_irq(); |
| 166 | } | 158 | } |
| 167 | 159 | ||
| @@ -169,7 +161,7 @@ void __init init_IRQ(void) | |||
| 169 | int __init arch_probe_nr_irqs(void) | 161 | int __init arch_probe_nr_irqs(void) |
| 170 | { | 162 | { |
| 171 | nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; | 163 | nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; |
| 172 | return 0; | 164 | return nr_irqs; |
| 173 | } | 165 | } |
| 174 | #endif | 166 | #endif |
| 175 | 167 | ||
diff --git a/arch/arm/mach-bcmring/irq.c b/arch/arm/mach-bcmring/irq.c index dc1c4939b0c..e3152631eb3 100644 --- a/arch/arm/mach-bcmring/irq.c +++ b/arch/arm/mach-bcmring/irq.c | |||
| @@ -67,21 +67,21 @@ static void bcmring_unmask_irq2(unsigned int irq) | |||
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | static struct irq_chip bcmring_irq0_chip = { | 69 | static struct irq_chip bcmring_irq0_chip = { |
| 70 | .typename = "ARM-INTC0", | 70 | .name = "ARM-INTC0", |
| 71 | .ack = bcmring_mask_irq0, | 71 | .ack = bcmring_mask_irq0, |
| 72 | .mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */ | 72 | .mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */ |
| 73 | .unmask = bcmring_unmask_irq0, /* unmaks an interrupt */ | 73 | .unmask = bcmring_unmask_irq0, /* unmaks an interrupt */ |
| 74 | }; | 74 | }; |
| 75 | 75 | ||
| 76 | static struct irq_chip bcmring_irq1_chip = { | 76 | static struct irq_chip bcmring_irq1_chip = { |
| 77 | .typename = "ARM-INTC1", | 77 | .name = "ARM-INTC1", |
| 78 | .ack = bcmring_mask_irq1, | 78 | .ack = bcmring_mask_irq1, |
| 79 | .mask = bcmring_mask_irq1, | 79 | .mask = bcmring_mask_irq1, |
| 80 | .unmask = bcmring_unmask_irq1, | 80 | .unmask = bcmring_unmask_irq1, |
| 81 | }; | 81 | }; |
| 82 | 82 | ||
| 83 | static struct irq_chip bcmring_irq2_chip = { | 83 | static struct irq_chip bcmring_irq2_chip = { |
| 84 | .typename = "ARM-SINTC", | 84 | .name = "ARM-SINTC", |
| 85 | .ack = bcmring_mask_irq2, | 85 | .ack = bcmring_mask_irq2, |
| 86 | .mask = bcmring_mask_irq2, | 86 | .mask = bcmring_mask_irq2, |
| 87 | .unmask = bcmring_unmask_irq2, | 87 | .unmask = bcmring_unmask_irq2, |
diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c index f34b0ed8063..7149fcc16c8 100644 --- a/arch/arm/mach-iop13xx/msi.c +++ b/arch/arm/mach-iop13xx/msi.c | |||
| @@ -164,10 +164,10 @@ static void iop13xx_msi_nop(unsigned int irq) | |||
| 164 | static struct irq_chip iop13xx_msi_chip = { | 164 | static struct irq_chip iop13xx_msi_chip = { |
| 165 | .name = "PCI-MSI", | 165 | .name = "PCI-MSI", |
| 166 | .ack = iop13xx_msi_nop, | 166 | .ack = iop13xx_msi_nop, |
| 167 | .enable = unmask_msi_irq, | 167 | .irq_enable = unmask_msi_irq, |
| 168 | .disable = mask_msi_irq, | 168 | .irq_disable = mask_msi_irq, |
| 169 | .mask = mask_msi_irq, | 169 | .irq_mask = mask_msi_irq, |
| 170 | .unmask = unmask_msi_irq, | 170 | .irq_unmask = unmask_msi_irq, |
| 171 | }; | 171 | }; |
| 172 | 172 | ||
| 173 | int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | 173 | int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 4a746ea838f..00b19a416ea 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
| @@ -104,8 +104,8 @@ static int ia64_msi_retrigger_irq(unsigned int irq) | |||
| 104 | */ | 104 | */ |
| 105 | static struct irq_chip ia64_msi_chip = { | 105 | static struct irq_chip ia64_msi_chip = { |
| 106 | .name = "PCI-MSI", | 106 | .name = "PCI-MSI", |
| 107 | .mask = mask_msi_irq, | 107 | .irq_mask = mask_msi_irq, |
| 108 | .unmask = unmask_msi_irq, | 108 | .irq_unmask = unmask_msi_irq, |
| 109 | .ack = ia64_ack_msi_irq, | 109 | .ack = ia64_ack_msi_irq, |
| 110 | #ifdef CONFIG_SMP | 110 | #ifdef CONFIG_SMP |
| 111 | .set_affinity = ia64_set_msi_irq_affinity, | 111 | .set_affinity = ia64_set_msi_irq_affinity, |
| @@ -160,8 +160,8 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
| 160 | 160 | ||
| 161 | static struct irq_chip dmar_msi_type = { | 161 | static struct irq_chip dmar_msi_type = { |
| 162 | .name = "DMAR_MSI", | 162 | .name = "DMAR_MSI", |
| 163 | .unmask = dmar_msi_unmask, | 163 | .irq_unmask = dmar_msi_unmask, |
| 164 | .mask = dmar_msi_mask, | 164 | .irq_mask = dmar_msi_mask, |
| 165 | .ack = ia64_ack_msi_irq, | 165 | .ack = ia64_ack_msi_irq, |
| 166 | #ifdef CONFIG_SMP | 166 | #ifdef CONFIG_SMP |
| 167 | .set_affinity = dmar_msi_set_affinity, | 167 | .set_affinity = dmar_msi_set_affinity, |
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index 0c72dd46383..a5e500f0285 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c | |||
| @@ -228,8 +228,8 @@ static int sn_msi_retrigger_irq(unsigned int irq) | |||
| 228 | 228 | ||
| 229 | static struct irq_chip sn_msi_chip = { | 229 | static struct irq_chip sn_msi_chip = { |
| 230 | .name = "PCI-MSI", | 230 | .name = "PCI-MSI", |
| 231 | .mask = mask_msi_irq, | 231 | .irq_mask = mask_msi_irq, |
| 232 | .unmask = unmask_msi_irq, | 232 | .irq_unmask = unmask_msi_irq, |
| 233 | .ack = sn_ack_msi_irq, | 233 | .ack = sn_ack_msi_irq, |
| 234 | #ifdef CONFIG_SMP | 234 | #ifdef CONFIG_SMP |
| 235 | .set_affinity = sn_set_msi_irq_affinity, | 235 | .set_affinity = sn_set_msi_irq_affinity, |
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index 3c71f776872..7db26f1f082 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c | |||
| @@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 51 | for_each_online_cpu(j) | 51 | for_each_online_cpu(j) |
| 52 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 52 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
| 53 | #endif | 53 | #endif |
| 54 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | 54 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
| 55 | seq_printf(p, " %s", action->name); | 55 | seq_printf(p, " %s", action->name); |
| 56 | 56 | ||
| 57 | for (action=action->next; action; action = action->next) | 57 | for (action=action->next; action; action = action->next) |
diff --git a/arch/m32r/platforms/m32104ut/setup.c b/arch/m32r/platforms/m32104ut/setup.c index 922fdfdadea..402a59d7219 100644 --- a/arch/m32r/platforms/m32104ut/setup.c +++ b/arch/m32r/platforms/m32104ut/setup.c | |||
| @@ -65,7 +65,7 @@ static void shutdown_m32104ut_irq(unsigned int irq) | |||
| 65 | 65 | ||
| 66 | static struct irq_chip m32104ut_irq_type = | 66 | static struct irq_chip m32104ut_irq_type = |
| 67 | { | 67 | { |
| 68 | .typename = "M32104UT-IRQ", | 68 | .name = "M32104UT-IRQ", |
| 69 | .startup = startup_m32104ut_irq, | 69 | .startup = startup_m32104ut_irq, |
| 70 | .shutdown = shutdown_m32104ut_irq, | 70 | .shutdown = shutdown_m32104ut_irq, |
| 71 | .enable = enable_m32104ut_irq, | 71 | .enable = enable_m32104ut_irq, |
diff --git a/arch/m32r/platforms/m32700ut/setup.c b/arch/m32r/platforms/m32700ut/setup.c index 9c1bc7487c1..80b1a026795 100644 --- a/arch/m32r/platforms/m32700ut/setup.c +++ b/arch/m32r/platforms/m32700ut/setup.c | |||
| @@ -71,7 +71,7 @@ static void shutdown_m32700ut_irq(unsigned int irq) | |||
| 71 | 71 | ||
| 72 | static struct irq_chip m32700ut_irq_type = | 72 | static struct irq_chip m32700ut_irq_type = |
| 73 | { | 73 | { |
| 74 | .typename = "M32700UT-IRQ", | 74 | .name = "M32700UT-IRQ", |
| 75 | .startup = startup_m32700ut_irq, | 75 | .startup = startup_m32700ut_irq, |
| 76 | .shutdown = shutdown_m32700ut_irq, | 76 | .shutdown = shutdown_m32700ut_irq, |
| 77 | .enable = enable_m32700ut_irq, | 77 | .enable = enable_m32700ut_irq, |
| @@ -148,7 +148,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq) | |||
| 148 | 148 | ||
| 149 | static struct irq_chip m32700ut_pld_irq_type = | 149 | static struct irq_chip m32700ut_pld_irq_type = |
| 150 | { | 150 | { |
| 151 | .typename = "M32700UT-PLD-IRQ", | 151 | .name = "M32700UT-PLD-IRQ", |
| 152 | .startup = startup_m32700ut_pld_irq, | 152 | .startup = startup_m32700ut_pld_irq, |
| 153 | .shutdown = shutdown_m32700ut_pld_irq, | 153 | .shutdown = shutdown_m32700ut_pld_irq, |
| 154 | .enable = enable_m32700ut_pld_irq, | 154 | .enable = enable_m32700ut_pld_irq, |
| @@ -217,7 +217,7 @@ static void shutdown_m32700ut_lanpld_irq(unsigned int irq) | |||
| 217 | 217 | ||
| 218 | static struct irq_chip m32700ut_lanpld_irq_type = | 218 | static struct irq_chip m32700ut_lanpld_irq_type = |
| 219 | { | 219 | { |
| 220 | .typename = "M32700UT-PLD-LAN-IRQ", | 220 | .name = "M32700UT-PLD-LAN-IRQ", |
| 221 | .startup = startup_m32700ut_lanpld_irq, | 221 | .startup = startup_m32700ut_lanpld_irq, |
| 222 | .shutdown = shutdown_m32700ut_lanpld_irq, | 222 | .shutdown = shutdown_m32700ut_lanpld_irq, |
| 223 | .enable = enable_m32700ut_lanpld_irq, | 223 | .enable = enable_m32700ut_lanpld_irq, |
| @@ -286,7 +286,7 @@ static void shutdown_m32700ut_lcdpld_irq(unsigned int irq) | |||
| 286 | 286 | ||
| 287 | static struct irq_chip m32700ut_lcdpld_irq_type = | 287 | static struct irq_chip m32700ut_lcdpld_irq_type = |
| 288 | { | 288 | { |
| 289 | .typename = "M32700UT-PLD-LCD-IRQ", | 289 | .name = "M32700UT-PLD-LCD-IRQ", |
| 290 | .startup = startup_m32700ut_lcdpld_irq, | 290 | .startup = startup_m32700ut_lcdpld_irq, |
| 291 | .shutdown = shutdown_m32700ut_lcdpld_irq, | 291 | .shutdown = shutdown_m32700ut_lcdpld_irq, |
| 292 | .enable = enable_m32700ut_lcdpld_irq, | 292 | .enable = enable_m32700ut_lcdpld_irq, |
diff --git a/arch/m32r/platforms/mappi/setup.c b/arch/m32r/platforms/mappi/setup.c index fb4b17799b6..ea00c84d6b1 100644 --- a/arch/m32r/platforms/mappi/setup.c +++ b/arch/m32r/platforms/mappi/setup.c | |||
| @@ -65,7 +65,7 @@ static void shutdown_mappi_irq(unsigned int irq) | |||
| 65 | 65 | ||
| 66 | static struct irq_chip mappi_irq_type = | 66 | static struct irq_chip mappi_irq_type = |
| 67 | { | 67 | { |
| 68 | .typename = "MAPPI-IRQ", | 68 | .name = "MAPPI-IRQ", |
| 69 | .startup = startup_mappi_irq, | 69 | .startup = startup_mappi_irq, |
| 70 | .shutdown = shutdown_mappi_irq, | 70 | .shutdown = shutdown_mappi_irq, |
| 71 | .enable = enable_mappi_irq, | 71 | .enable = enable_mappi_irq, |
diff --git a/arch/m32r/platforms/mappi2/setup.c b/arch/m32r/platforms/mappi2/setup.c index 6a65eda0a05..c049376d027 100644 --- a/arch/m32r/platforms/mappi2/setup.c +++ b/arch/m32r/platforms/mappi2/setup.c | |||
| @@ -72,7 +72,7 @@ static void shutdown_mappi2_irq(unsigned int irq) | |||
| 72 | 72 | ||
| 73 | static struct irq_chip mappi2_irq_type = | 73 | static struct irq_chip mappi2_irq_type = |
| 74 | { | 74 | { |
| 75 | .typename = "MAPPI2-IRQ", | 75 | .name = "MAPPI2-IRQ", |
| 76 | .startup = startup_mappi2_irq, | 76 | .startup = startup_mappi2_irq, |
| 77 | .shutdown = shutdown_mappi2_irq, | 77 | .shutdown = shutdown_mappi2_irq, |
| 78 | .enable = enable_mappi2_irq, | 78 | .enable = enable_mappi2_irq, |
diff --git a/arch/m32r/platforms/mappi3/setup.c b/arch/m32r/platforms/mappi3/setup.c index 9c337aeac94..882de25c6e8 100644 --- a/arch/m32r/platforms/mappi3/setup.c +++ b/arch/m32r/platforms/mappi3/setup.c | |||
| @@ -72,7 +72,7 @@ static void shutdown_mappi3_irq(unsigned int irq) | |||
| 72 | 72 | ||
| 73 | static struct irq_chip mappi3_irq_type = | 73 | static struct irq_chip mappi3_irq_type = |
| 74 | { | 74 | { |
| 75 | .typename = "MAPPI3-IRQ", | 75 | .name = "MAPPI3-IRQ", |
| 76 | .startup = startup_mappi3_irq, | 76 | .startup = startup_mappi3_irq, |
| 77 | .shutdown = shutdown_mappi3_irq, | 77 | .shutdown = shutdown_mappi3_irq, |
| 78 | .enable = enable_mappi3_irq, | 78 | .enable = enable_mappi3_irq, |
diff --git a/arch/m32r/platforms/oaks32r/setup.c b/arch/m32r/platforms/oaks32r/setup.c index ed865741c38..d11d93bf74f 100644 --- a/arch/m32r/platforms/oaks32r/setup.c +++ b/arch/m32r/platforms/oaks32r/setup.c | |||
| @@ -63,7 +63,7 @@ static void shutdown_oaks32r_irq(unsigned int irq) | |||
| 63 | 63 | ||
| 64 | static struct irq_chip oaks32r_irq_type = | 64 | static struct irq_chip oaks32r_irq_type = |
| 65 | { | 65 | { |
| 66 | .typename = "OAKS32R-IRQ", | 66 | .name = "OAKS32R-IRQ", |
| 67 | .startup = startup_oaks32r_irq, | 67 | .startup = startup_oaks32r_irq, |
| 68 | .shutdown = shutdown_oaks32r_irq, | 68 | .shutdown = shutdown_oaks32r_irq, |
| 69 | .enable = enable_oaks32r_irq, | 69 | .enable = enable_oaks32r_irq, |
diff --git a/arch/m32r/platforms/opsput/setup.c b/arch/m32r/platforms/opsput/setup.c index 80d68065701..5f3402a2fba 100644 --- a/arch/m32r/platforms/opsput/setup.c +++ b/arch/m32r/platforms/opsput/setup.c | |||
| @@ -72,7 +72,7 @@ static void shutdown_opsput_irq(unsigned int irq) | |||
| 72 | 72 | ||
| 73 | static struct irq_chip opsput_irq_type = | 73 | static struct irq_chip opsput_irq_type = |
| 74 | { | 74 | { |
| 75 | .typename = "OPSPUT-IRQ", | 75 | .name = "OPSPUT-IRQ", |
| 76 | .startup = startup_opsput_irq, | 76 | .startup = startup_opsput_irq, |
| 77 | .shutdown = shutdown_opsput_irq, | 77 | .shutdown = shutdown_opsput_irq, |
| 78 | .enable = enable_opsput_irq, | 78 | .enable = enable_opsput_irq, |
| @@ -149,7 +149,7 @@ static void shutdown_opsput_pld_irq(unsigned int irq) | |||
| 149 | 149 | ||
| 150 | static struct irq_chip opsput_pld_irq_type = | 150 | static struct irq_chip opsput_pld_irq_type = |
| 151 | { | 151 | { |
| 152 | .typename = "OPSPUT-PLD-IRQ", | 152 | .name = "OPSPUT-PLD-IRQ", |
| 153 | .startup = startup_opsput_pld_irq, | 153 | .startup = startup_opsput_pld_irq, |
| 154 | .shutdown = shutdown_opsput_pld_irq, | 154 | .shutdown = shutdown_opsput_pld_irq, |
| 155 | .enable = enable_opsput_pld_irq, | 155 | .enable = enable_opsput_pld_irq, |
| @@ -218,7 +218,7 @@ static void shutdown_opsput_lanpld_irq(unsigned int irq) | |||
| 218 | 218 | ||
| 219 | static struct irq_chip opsput_lanpld_irq_type = | 219 | static struct irq_chip opsput_lanpld_irq_type = |
| 220 | { | 220 | { |
| 221 | .typename = "OPSPUT-PLD-LAN-IRQ", | 221 | .name = "OPSPUT-PLD-LAN-IRQ", |
| 222 | .startup = startup_opsput_lanpld_irq, | 222 | .startup = startup_opsput_lanpld_irq, |
| 223 | .shutdown = shutdown_opsput_lanpld_irq, | 223 | .shutdown = shutdown_opsput_lanpld_irq, |
| 224 | .enable = enable_opsput_lanpld_irq, | 224 | .enable = enable_opsput_lanpld_irq, |
diff --git a/arch/m32r/platforms/usrv/setup.c b/arch/m32r/platforms/usrv/setup.c index 757302660af..1beac7a51ed 100644 --- a/arch/m32r/platforms/usrv/setup.c +++ b/arch/m32r/platforms/usrv/setup.c | |||
| @@ -63,7 +63,7 @@ static void shutdown_mappi_irq(unsigned int irq) | |||
| 63 | 63 | ||
| 64 | static struct irq_chip mappi_irq_type = | 64 | static struct irq_chip mappi_irq_type = |
| 65 | { | 65 | { |
| 66 | .typename = "M32700-IRQ", | 66 | .name = "M32700-IRQ", |
| 67 | .startup = startup_mappi_irq, | 67 | .startup = startup_mappi_irq, |
| 68 | .shutdown = shutdown_mappi_irq, | 68 | .shutdown = shutdown_mappi_irq, |
| 69 | .enable = enable_mappi_irq, | 69 | .enable = enable_mappi_irq, |
| @@ -136,7 +136,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq) | |||
| 136 | 136 | ||
| 137 | static struct irq_chip m32700ut_pld_irq_type = | 137 | static struct irq_chip m32700ut_pld_irq_type = |
| 138 | { | 138 | { |
| 139 | .typename = "USRV-PLD-IRQ", | 139 | .name = "USRV-PLD-IRQ", |
| 140 | .startup = startup_m32700ut_pld_irq, | 140 | .startup = startup_m32700ut_pld_irq, |
| 141 | .shutdown = shutdown_m32700ut_pld_irq, | 141 | .shutdown = shutdown_m32700ut_pld_irq, |
| 142 | .enable = enable_m32700ut_pld_irq, | 142 | .enable = enable_m32700ut_pld_irq, |
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 97085530aa6..e3e379c6caa 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c | |||
| @@ -310,9 +310,9 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) | |||
| 310 | } | 310 | } |
| 311 | 311 | ||
| 312 | static struct irq_chip msic_irq_chip = { | 312 | static struct irq_chip msic_irq_chip = { |
| 313 | .mask = mask_msi_irq, | 313 | .irq_mask = mask_msi_irq, |
| 314 | .unmask = unmask_msi_irq, | 314 | .irq_unmask = unmask_msi_irq, |
| 315 | .shutdown = unmask_msi_irq, | 315 | .irq_shutdown = mask_msi_irq, |
| 316 | .name = "AXON-MSI", | 316 | .name = "AXON-MSI", |
| 317 | }; | 317 | }; |
| 318 | 318 | ||
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 93834b0d827..67e2c4bdac8 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
| @@ -243,7 +243,7 @@ static unsigned int xics_startup(unsigned int virq) | |||
| 243 | * at that level, so we do it here by hand. | 243 | * at that level, so we do it here by hand. |
| 244 | */ | 244 | */ |
| 245 | if (irq_to_desc(virq)->msi_desc) | 245 | if (irq_to_desc(virq)->msi_desc) |
| 246 | unmask_msi_irq(virq); | 246 | unmask_msi_irq(irq_get_irq_data(virq)); |
| 247 | 247 | ||
| 248 | /* unmask it */ | 248 | /* unmask it */ |
| 249 | xics_unmask_irq(virq); | 249 | xics_unmask_irq(virq); |
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 87991d3abba..bdbd896c89d 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c | |||
| @@ -51,8 +51,8 @@ static void fsl_msi_end_irq(unsigned int virq) | |||
| 51 | } | 51 | } |
| 52 | 52 | ||
| 53 | static struct irq_chip fsl_msi_chip = { | 53 | static struct irq_chip fsl_msi_chip = { |
| 54 | .mask = mask_msi_irq, | 54 | .irq_mask = mask_msi_irq, |
| 55 | .unmask = unmask_msi_irq, | 55 | .irq_unmask = unmask_msi_irq, |
| 56 | .ack = fsl_msi_end_irq, | 56 | .ack = fsl_msi_end_irq, |
| 57 | .name = "FSL-MSI", | 57 | .name = "FSL-MSI", |
| 58 | }; | 58 | }; |
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c index 3b6a9a43718..320ad5a9a25 100644 --- a/arch/powerpc/sysdev/mpic_pasemi_msi.c +++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c | |||
| @@ -39,24 +39,24 @@ | |||
| 39 | static struct mpic *msi_mpic; | 39 | static struct mpic *msi_mpic; |
| 40 | 40 | ||
| 41 | 41 | ||
| 42 | static void mpic_pasemi_msi_mask_irq(unsigned int irq) | 42 | static void mpic_pasemi_msi_mask_irq(struct irq_data *data) |
| 43 | { | 43 | { |
| 44 | pr_debug("mpic_pasemi_msi_mask_irq %d\n", irq); | 44 | pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq); |
| 45 | mask_msi_irq(irq); | 45 | mask_msi_irq(data); |
| 46 | mpic_mask_irq(irq); | 46 | mpic_mask_irq(data->irq); |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | static void mpic_pasemi_msi_unmask_irq(unsigned int irq) | 49 | static void mpic_pasemi_msi_unmask_irq(struct irq_data *data) |
| 50 | { | 50 | { |
| 51 | pr_debug("mpic_pasemi_msi_unmask_irq %d\n", irq); | 51 | pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq); |
| 52 | mpic_unmask_irq(irq); | 52 | mpic_unmask_irq(data->irq); |
| 53 | unmask_msi_irq(irq); | 53 | unmask_msi_irq(data); |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | static struct irq_chip mpic_pasemi_msi_chip = { | 56 | static struct irq_chip mpic_pasemi_msi_chip = { |
| 57 | .shutdown = mpic_pasemi_msi_mask_irq, | 57 | .irq_shutdown = mpic_pasemi_msi_mask_irq, |
| 58 | .mask = mpic_pasemi_msi_mask_irq, | 58 | .irq_mask = mpic_pasemi_msi_mask_irq, |
| 59 | .unmask = mpic_pasemi_msi_unmask_irq, | 59 | .irq_unmask = mpic_pasemi_msi_unmask_irq, |
| 60 | .eoi = mpic_end_irq, | 60 | .eoi = mpic_end_irq, |
| 61 | .set_type = mpic_set_irq_type, | 61 | .set_type = mpic_set_irq_type, |
| 62 | .set_affinity = mpic_set_affinity, | 62 | .set_affinity = mpic_set_affinity, |
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c index bcbfe79c704..a2b028b4a20 100644 --- a/arch/powerpc/sysdev/mpic_u3msi.c +++ b/arch/powerpc/sysdev/mpic_u3msi.c | |||
| @@ -23,22 +23,22 @@ | |||
| 23 | /* A bit ugly, can we get this from the pci_dev somehow? */ | 23 | /* A bit ugly, can we get this from the pci_dev somehow? */ |
| 24 | static struct mpic *msi_mpic; | 24 | static struct mpic *msi_mpic; |
| 25 | 25 | ||
| 26 | static void mpic_u3msi_mask_irq(unsigned int irq) | 26 | static void mpic_u3msi_mask_irq(struct irq_data *data) |
| 27 | { | 27 | { |
| 28 | mask_msi_irq(irq); | 28 | mask_msi_irq(data); |
| 29 | mpic_mask_irq(irq); | 29 | mpic_mask_irq(data->irq); |
| 30 | } | 30 | } |
| 31 | 31 | ||
| 32 | static void mpic_u3msi_unmask_irq(unsigned int irq) | 32 | static void mpic_u3msi_unmask_irq(struct irq_data *data) |
| 33 | { | 33 | { |
| 34 | mpic_unmask_irq(irq); | 34 | mpic_unmask_irq(data->irq); |
| 35 | unmask_msi_irq(irq); | 35 | unmask_msi_irq(data); |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | static struct irq_chip mpic_u3msi_chip = { | 38 | static struct irq_chip mpic_u3msi_chip = { |
| 39 | .shutdown = mpic_u3msi_mask_irq, | 39 | .irq_shutdown = mpic_u3msi_mask_irq, |
| 40 | .mask = mpic_u3msi_mask_irq, | 40 | .irq_mask = mpic_u3msi_mask_irq, |
| 41 | .unmask = mpic_u3msi_unmask_irq, | 41 | .irq_unmask = mpic_u3msi_unmask_irq, |
| 42 | .eoi = mpic_end_irq, | 42 | .eoi = mpic_end_irq, |
| 43 | .set_type = mpic_set_irq_type, | 43 | .set_type = mpic_set_irq_type, |
| 44 | .set_affinity = mpic_set_affinity, | 44 | .set_affinity = mpic_set_affinity, |
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 257de1f0692..ae5bac39b89 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c | |||
| @@ -290,7 +290,7 @@ void __init init_IRQ(void) | |||
| 290 | int __init arch_probe_nr_irqs(void) | 290 | int __init arch_probe_nr_irqs(void) |
| 291 | { | 291 | { |
| 292 | nr_irqs = sh_mv.mv_nr_irqs; | 292 | nr_irqs = sh_mv.mv_nr_irqs; |
| 293 | return 0; | 293 | return NR_IRQS_LEGACY; |
| 294 | } | 294 | } |
| 295 | #endif | 295 | #endif |
| 296 | 296 | ||
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c index 548b8ca9c21..b210416ace7 100644 --- a/arch/sparc/kernel/pci_msi.c +++ b/arch/sparc/kernel/pci_msi.c | |||
| @@ -114,10 +114,10 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num) | |||
| 114 | 114 | ||
| 115 | static struct irq_chip msi_irq = { | 115 | static struct irq_chip msi_irq = { |
| 116 | .name = "PCI-MSI", | 116 | .name = "PCI-MSI", |
| 117 | .mask = mask_msi_irq, | 117 | .irq_mask = mask_msi_irq, |
| 118 | .unmask = unmask_msi_irq, | 118 | .irq_unmask = unmask_msi_irq, |
| 119 | .enable = unmask_msi_irq, | 119 | .irq_enable = unmask_msi_irq, |
| 120 | .disable = mask_msi_irq, | 120 | .irq_disable = mask_msi_irq, |
| 121 | /* XXX affinity XXX */ | 121 | /* XXX affinity XXX */ |
| 122 | }; | 122 | }; |
| 123 | 123 | ||
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index 596c6008693..9a27d563fc3 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c | |||
| @@ -208,7 +208,7 @@ static void tile_irq_chip_eoi(unsigned int irq) | |||
| 208 | } | 208 | } |
| 209 | 209 | ||
| 210 | static struct irq_chip tile_irq_chip = { | 210 | static struct irq_chip tile_irq_chip = { |
| 211 | .typename = "tile_irq_chip", | 211 | .name = "tile_irq_chip", |
| 212 | .ack = tile_irq_chip_ack, | 212 | .ack = tile_irq_chip_ack, |
| 213 | .eoi = tile_irq_chip_eoi, | 213 | .eoi = tile_irq_chip_eoi, |
| 214 | .mask = tile_irq_chip_mask, | 214 | .mask = tile_irq_chip_mask, |
| @@ -288,7 +288,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 288 | for_each_online_cpu(j) | 288 | for_each_online_cpu(j) |
| 289 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 289 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
| 290 | #endif | 290 | #endif |
| 291 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | 291 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
| 292 | seq_printf(p, " %s", action->name); | 292 | seq_printf(p, " %s", action->name); |
| 293 | 293 | ||
| 294 | for (action = action->next; action; action = action->next) | 294 | for (action = action->next; action; action = action->next) |
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index a3f0b04d710..a746e3037a5 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c | |||
| @@ -46,7 +46,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 46 | for_each_online_cpu(j) | 46 | for_each_online_cpu(j) |
| 47 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 47 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
| 48 | #endif | 48 | #endif |
| 49 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | 49 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
| 50 | seq_printf(p, " %s", action->name); | 50 | seq_printf(p, " %s", action->name); |
| 51 | 51 | ||
| 52 | for (action=action->next; action; action = action->next) | 52 | for (action=action->next; action; action = action->next) |
| @@ -369,7 +369,7 @@ static void dummy(unsigned int irq) | |||
| 369 | 369 | ||
| 370 | /* This is used for everything else than the timer. */ | 370 | /* This is used for everything else than the timer. */ |
| 371 | static struct irq_chip normal_irq_type = { | 371 | static struct irq_chip normal_irq_type = { |
| 372 | .typename = "SIGIO", | 372 | .name = "SIGIO", |
| 373 | .release = free_irq_by_irq_and_dev, | 373 | .release = free_irq_by_irq_and_dev, |
| 374 | .disable = dummy, | 374 | .disable = dummy, |
| 375 | .enable = dummy, | 375 | .enable = dummy, |
| @@ -378,7 +378,7 @@ static struct irq_chip normal_irq_type = { | |||
| 378 | }; | 378 | }; |
| 379 | 379 | ||
| 380 | static struct irq_chip SIGVTALRM_irq_type = { | 380 | static struct irq_chip SIGVTALRM_irq_type = { |
| 381 | .typename = "SIGVTALRM", | 381 | .name = "SIGVTALRM", |
| 382 | .release = free_irq_by_irq_and_dev, | 382 | .release = free_irq_by_irq_and_dev, |
| 383 | .shutdown = dummy, /* never called */ | 383 | .shutdown = dummy, /* never called */ |
| 384 | .disable = dummy, | 384 | .disable = dummy, |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8c9e609a175..7ab9db88ab6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -63,6 +63,10 @@ config X86 | |||
| 63 | select HAVE_USER_RETURN_NOTIFIER | 63 | select HAVE_USER_RETURN_NOTIFIER |
| 64 | select HAVE_ARCH_JUMP_LABEL | 64 | select HAVE_ARCH_JUMP_LABEL |
| 65 | select HAVE_TEXT_POKE_SMP | 65 | select HAVE_TEXT_POKE_SMP |
| 66 | select HAVE_GENERIC_HARDIRQS | ||
| 67 | select HAVE_SPARSE_IRQ | ||
| 68 | select GENERIC_IRQ_PROBE | ||
| 69 | select GENERIC_PENDING_IRQ if SMP | ||
| 66 | 70 | ||
| 67 | config INSTRUCTION_DECODER | 71 | config INSTRUCTION_DECODER |
| 68 | def_bool (KPROBES || PERF_EVENTS) | 72 | def_bool (KPROBES || PERF_EVENTS) |
| @@ -204,20 +208,6 @@ config HAVE_INTEL_TXT | |||
| 204 | def_bool y | 208 | def_bool y |
| 205 | depends on EXPERIMENTAL && DMAR && ACPI | 209 | depends on EXPERIMENTAL && DMAR && ACPI |
| 206 | 210 | ||
| 207 | # Use the generic interrupt handling code in kernel/irq/: | ||
| 208 | config GENERIC_HARDIRQS | ||
| 209 | def_bool y | ||
| 210 | |||
| 211 | config GENERIC_HARDIRQS_NO__DO_IRQ | ||
| 212 | def_bool y | ||
| 213 | |||
| 214 | config GENERIC_IRQ_PROBE | ||
| 215 | def_bool y | ||
| 216 | |||
| 217 | config GENERIC_PENDING_IRQ | ||
| 218 | def_bool y | ||
| 219 | depends on GENERIC_HARDIRQS && SMP | ||
| 220 | |||
| 221 | config USE_GENERIC_SMP_HELPERS | 211 | config USE_GENERIC_SMP_HELPERS |
| 222 | def_bool y | 212 | def_bool y |
| 223 | depends on SMP | 213 | depends on SMP |
| @@ -300,23 +290,6 @@ config X86_X2APIC | |||
| 300 | 290 | ||
| 301 | If you don't know what to do here, say N. | 291 | If you don't know what to do here, say N. |
| 302 | 292 | ||
| 303 | config SPARSE_IRQ | ||
| 304 | bool "Support sparse irq numbering" | ||
| 305 | depends on PCI_MSI || HT_IRQ | ||
| 306 | ---help--- | ||
| 307 | This enables support for sparse irqs. This is useful for distro | ||
| 308 | kernels that want to define a high CONFIG_NR_CPUS value but still | ||
| 309 | want to have low kernel memory footprint on smaller machines. | ||
| 310 | |||
| 311 | ( Sparse IRQs can also be beneficial on NUMA boxes, as they spread | ||
| 312 | out the irq_desc[] array in a more NUMA-friendly way. ) | ||
| 313 | |||
| 314 | If you don't know what to do here, say N. | ||
| 315 | |||
| 316 | config NUMA_IRQ_DESC | ||
| 317 | def_bool y | ||
| 318 | depends on SPARSE_IRQ && NUMA | ||
| 319 | |||
| 320 | config X86_MPPARSE | 293 | config X86_MPPARSE |
| 321 | bool "Enable MPS table" if ACPI | 294 | bool "Enable MPS table" if ACPI |
| 322 | default y | 295 | default y |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 1fa03e04ae4..286de34b0ed 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
| @@ -252,9 +252,7 @@ static inline int apic_is_clustered_box(void) | |||
| 252 | } | 252 | } |
| 253 | #endif | 253 | #endif |
| 254 | 254 | ||
| 255 | extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); | 255 | extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); |
| 256 | extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask); | ||
| 257 | |||
| 258 | 256 | ||
| 259 | #else /* !CONFIG_X86_LOCAL_APIC */ | 257 | #else /* !CONFIG_X86_LOCAL_APIC */ |
| 260 | static inline void lapic_shutdown(void) { } | 258 | static inline void lapic_shutdown(void) { } |
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 7fe3b3060f0..a859ca461fb 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
| @@ -131,6 +131,7 @@ | |||
| 131 | #define APIC_EILVTn(n) (0x500 + 0x10 * n) | 131 | #define APIC_EILVTn(n) (0x500 + 0x10 * n) |
| 132 | #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ | 132 | #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ |
| 133 | #define APIC_EILVT_NR_AMD_10H 4 | 133 | #define APIC_EILVT_NR_AMD_10H 4 |
| 134 | #define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H | ||
| 134 | #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) | 135 | #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) |
| 135 | #define APIC_EILVT_MSG_FIX 0x0 | 136 | #define APIC_EILVT_MSG_FIX 0x0 |
| 136 | #define APIC_EILVT_MSG_SMI 0x2 | 137 | #define APIC_EILVT_MSG_SMI 0x2 |
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 1d5c08a1bdf..2c392d663dc 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h | |||
| @@ -74,10 +74,12 @@ extern void hpet_disable(void); | |||
| 74 | extern unsigned int hpet_readl(unsigned int a); | 74 | extern unsigned int hpet_readl(unsigned int a); |
| 75 | extern void force_hpet_resume(void); | 75 | extern void force_hpet_resume(void); |
| 76 | 76 | ||
| 77 | extern void hpet_msi_unmask(unsigned int irq); | 77 | struct irq_data; |
| 78 | extern void hpet_msi_mask(unsigned int irq); | 78 | extern void hpet_msi_unmask(struct irq_data *data); |
| 79 | extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg); | 79 | extern void hpet_msi_mask(struct irq_data *data); |
| 80 | extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg); | 80 | struct hpet_dev; |
| 81 | extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg); | ||
| 82 | extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg); | ||
| 81 | 83 | ||
| 82 | #ifdef CONFIG_PCI_MSI | 84 | #ifdef CONFIG_PCI_MSI |
| 83 | extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); | 85 | extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 3a54a1ca1a0..0274ec5a7e6 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
| @@ -78,6 +78,13 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, | |||
| 78 | irq_attr->polarity = polarity; | 78 | irq_attr->polarity = polarity; |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | struct irq_2_iommu { | ||
| 82 | struct intel_iommu *iommu; | ||
| 83 | u16 irte_index; | ||
| 84 | u16 sub_handle; | ||
| 85 | u8 irte_mask; | ||
| 86 | }; | ||
| 87 | |||
| 81 | /* | 88 | /* |
| 82 | * This is performance-critical, we want to do it O(1) | 89 | * This is performance-critical, we want to do it O(1) |
| 83 | * | 90 | * |
| @@ -89,15 +96,17 @@ struct irq_cfg { | |||
| 89 | cpumask_var_t old_domain; | 96 | cpumask_var_t old_domain; |
| 90 | u8 vector; | 97 | u8 vector; |
| 91 | u8 move_in_progress : 1; | 98 | u8 move_in_progress : 1; |
| 99 | #ifdef CONFIG_INTR_REMAP | ||
| 100 | struct irq_2_iommu irq_2_iommu; | ||
| 101 | #endif | ||
| 92 | }; | 102 | }; |
| 93 | 103 | ||
| 94 | extern struct irq_cfg *irq_cfg(unsigned int); | ||
| 95 | extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); | 104 | extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); |
| 96 | extern void send_cleanup_vector(struct irq_cfg *); | 105 | extern void send_cleanup_vector(struct irq_cfg *); |
| 97 | 106 | ||
| 98 | struct irq_desc; | 107 | struct irq_data; |
| 99 | extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *, | 108 | int __ioapic_set_affinity(struct irq_data *, const struct cpumask *, |
| 100 | unsigned int *dest_id); | 109 | unsigned int *dest_id); |
| 101 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); | 110 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); |
| 102 | extern void setup_ioapic_dest(void); | 111 | extern void setup_ioapic_dest(void); |
| 103 | 112 | ||
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h index 1655147646a..a20365953bf 100644 --- a/arch/x86/include/asm/i8259.h +++ b/arch/x86/include/asm/i8259.h | |||
| @@ -55,6 +55,8 @@ extern struct irq_chip i8259A_chip; | |||
| 55 | struct legacy_pic { | 55 | struct legacy_pic { |
| 56 | int nr_legacy_irqs; | 56 | int nr_legacy_irqs; |
| 57 | struct irq_chip *chip; | 57 | struct irq_chip *chip; |
| 58 | void (*mask)(unsigned int irq); | ||
| 59 | void (*unmask)(unsigned int irq); | ||
| 58 | void (*mask_all)(void); | 60 | void (*mask_all)(void); |
| 59 | void (*restore_mask)(void); | 61 | void (*restore_mask)(void); |
| 60 | void (*init)(int auto_eoi); | 62 | void (*init)(int auto_eoi); |
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 9cb2edb87c2..c8be4566c3d 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
| @@ -170,12 +170,6 @@ extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); | |||
| 170 | 170 | ||
| 171 | extern void probe_nr_irqs_gsi(void); | 171 | extern void probe_nr_irqs_gsi(void); |
| 172 | 172 | ||
| 173 | extern int setup_ioapic_entry(int apic, int irq, | ||
| 174 | struct IO_APIC_route_entry *entry, | ||
| 175 | unsigned int destination, int trigger, | ||
| 176 | int polarity, int vector, int pin); | ||
| 177 | extern void ioapic_write_entry(int apic, int pin, | ||
| 178 | struct IO_APIC_route_entry e); | ||
| 179 | extern void setup_ioapic_ids_from_mpc(void); | 173 | extern void setup_ioapic_ids_from_mpc(void); |
| 180 | 174 | ||
| 181 | struct mp_ioapic_gsi{ | 175 | struct mp_ioapic_gsi{ |
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index 8d841505344..1c23360fb2d 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h | |||
| @@ -24,10 +24,18 @@ static inline void prepare_irte(struct irte *irte, int vector, | |||
| 24 | irte->dest_id = IRTE_DEST(dest); | 24 | irte->dest_id = IRTE_DEST(dest); |
| 25 | irte->redir_hint = 1; | 25 | irte->redir_hint = 1; |
| 26 | } | 26 | } |
| 27 | static inline bool irq_remapped(struct irq_cfg *cfg) | ||
| 28 | { | ||
| 29 | return cfg->irq_2_iommu.iommu != NULL; | ||
| 30 | } | ||
| 27 | #else | 31 | #else |
| 28 | static void prepare_irte(struct irte *irte, int vector, unsigned int dest) | 32 | static void prepare_irte(struct irte *irte, int vector, unsigned int dest) |
| 29 | { | 33 | { |
| 30 | } | 34 | } |
| 35 | static inline bool irq_remapped(struct irq_cfg *cfg) | ||
| 36 | { | ||
| 37 | return false; | ||
| 38 | } | ||
| 31 | #endif | 39 | #endif |
| 32 | 40 | ||
| 33 | #endif /* _ASM_X86_IRQ_REMAPPING_H */ | 41 | #endif /* _ASM_X86_IRQ_REMAPPING_H */ |
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 6fe2b5cb4f3..92543c73cf8 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
| @@ -231,34 +231,6 @@ static void apbt_restart_clocksource(struct clocksource *cs) | |||
| 231 | apbt_start_counter(phy_cs_timer_id); | 231 | apbt_start_counter(phy_cs_timer_id); |
| 232 | } | 232 | } |
| 233 | 233 | ||
| 234 | /* Setup IRQ routing via IOAPIC */ | ||
| 235 | #ifdef CONFIG_SMP | ||
| 236 | static void apbt_setup_irq(struct apbt_dev *adev) | ||
| 237 | { | ||
| 238 | struct irq_chip *chip; | ||
| 239 | struct irq_desc *desc; | ||
| 240 | |||
| 241 | /* timer0 irq has been setup early */ | ||
| 242 | if (adev->irq == 0) | ||
| 243 | return; | ||
| 244 | desc = irq_to_desc(adev->irq); | ||
| 245 | chip = get_irq_chip(adev->irq); | ||
| 246 | disable_irq(adev->irq); | ||
| 247 | desc->status |= IRQ_MOVE_PCNTXT; | ||
| 248 | irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); | ||
| 249 | /* APB timer irqs are set up as mp_irqs, timer is edge triggerred */ | ||
| 250 | set_irq_chip_and_handler_name(adev->irq, chip, handle_edge_irq, "edge"); | ||
| 251 | enable_irq(adev->irq); | ||
| 252 | if (system_state == SYSTEM_BOOTING) | ||
| 253 | if (request_irq(adev->irq, apbt_interrupt_handler, | ||
| 254 | IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, | ||
| 255 | adev->name, adev)) { | ||
| 256 | printk(KERN_ERR "Failed request IRQ for APBT%d\n", | ||
| 257 | adev->num); | ||
| 258 | } | ||
| 259 | } | ||
| 260 | #endif | ||
| 261 | |||
| 262 | static void apbt_enable_int(int n) | 234 | static void apbt_enable_int(int n) |
| 263 | { | 235 | { |
| 264 | unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); | 236 | unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); |
| @@ -334,6 +306,27 @@ static int __init apbt_clockevent_register(void) | |||
| 334 | } | 306 | } |
| 335 | 307 | ||
| 336 | #ifdef CONFIG_SMP | 308 | #ifdef CONFIG_SMP |
| 309 | |||
| 310 | static void apbt_setup_irq(struct apbt_dev *adev) | ||
| 311 | { | ||
| 312 | /* timer0 irq has been setup early */ | ||
| 313 | if (adev->irq == 0) | ||
| 314 | return; | ||
| 315 | |||
| 316 | if (system_state == SYSTEM_BOOTING) { | ||
| 317 | irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); | ||
| 318 | /* APB timer irqs are set up as mp_irqs, timer is edge type */ | ||
| 319 | __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge"); | ||
| 320 | if (request_irq(adev->irq, apbt_interrupt_handler, | ||
| 321 | IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, | ||
| 322 | adev->name, adev)) { | ||
| 323 | printk(KERN_ERR "Failed request IRQ for APBT%d\n", | ||
| 324 | adev->num); | ||
| 325 | } | ||
| 326 | } else | ||
| 327 | enable_irq(adev->irq); | ||
| 328 | } | ||
| 329 | |||
| 337 | /* Should be called with per cpu */ | 330 | /* Should be called with per cpu */ |
| 338 | void apbt_setup_secondary_clock(void) | 331 | void apbt_setup_secondary_clock(void) |
| 339 | { | 332 | { |
| @@ -389,10 +382,11 @@ static int apbt_cpuhp_notify(struct notifier_block *n, | |||
| 389 | 382 | ||
| 390 | switch (action & 0xf) { | 383 | switch (action & 0xf) { |
| 391 | case CPU_DEAD: | 384 | case CPU_DEAD: |
| 385 | disable_irq(adev->irq); | ||
| 392 | apbt_disable_int(cpu); | 386 | apbt_disable_int(cpu); |
| 393 | if (system_state == SYSTEM_RUNNING) | 387 | if (system_state == SYSTEM_RUNNING) { |
| 394 | pr_debug("skipping APBT CPU %lu offline\n", cpu); | 388 | pr_debug("skipping APBT CPU %lu offline\n", cpu); |
| 395 | else if (adev) { | 389 | } else if (adev) { |
| 396 | pr_debug("APBT clockevent for cpu %lu offline\n", cpu); | 390 | pr_debug("APBT clockevent for cpu %lu offline\n", cpu); |
| 397 | free_irq(adev->irq, adev); | 391 | free_irq(adev->irq, adev); |
| 398 | } | 392 | } |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 8cf86fb3b4e..850657d1b0e 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
| @@ -52,6 +52,7 @@ | |||
| 52 | #include <asm/mce.h> | 52 | #include <asm/mce.h> |
| 53 | #include <asm/kvm_para.h> | 53 | #include <asm/kvm_para.h> |
| 54 | #include <asm/tsc.h> | 54 | #include <asm/tsc.h> |
| 55 | #include <asm/atomic.h> | ||
| 55 | 56 | ||
| 56 | unsigned int num_processors; | 57 | unsigned int num_processors; |
| 57 | 58 | ||
| @@ -370,38 +371,87 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) | |||
| 370 | } | 371 | } |
| 371 | 372 | ||
| 372 | /* | 373 | /* |
| 373 | * Setup extended LVT, AMD specific (K8, family 10h) | 374 | * Setup extended LVT, AMD specific |
| 374 | * | 375 | * |
| 375 | * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and | 376 | * Software should use the LVT offsets the BIOS provides. The offsets |
| 376 | * MCE interrupts are supported. Thus MCE offset must be set to 0. | 377 | * are determined by the subsystems using it like those for MCE |
| 378 | * threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts | ||
| 379 | * are supported. Beginning with family 10h at least 4 offsets are | ||
| 380 | * available. | ||
| 377 | * | 381 | * |
| 378 | * If mask=1, the LVT entry does not generate interrupts while mask=0 | 382 | * Since the offsets must be consistent for all cores, we keep track |
| 379 | * enables the vector. See also the BKDGs. | 383 | * of the LVT offsets in software and reserve the offset for the same |
| 384 | * vector also to be used on other cores. An offset is freed by | ||
| 385 | * setting the entry to APIC_EILVT_MASKED. | ||
| 386 | * | ||
| 387 | * If the BIOS is right, there should be no conflicts. Otherwise a | ||
| 388 | * "[Firmware Bug]: ..." error message is generated. However, if | ||
| 389 | * software does not properly determines the offsets, it is not | ||
| 390 | * necessarily a BIOS bug. | ||
| 380 | */ | 391 | */ |
| 381 | 392 | ||
| 382 | #define APIC_EILVT_LVTOFF_MCE 0 | 393 | static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX]; |
| 383 | #define APIC_EILVT_LVTOFF_IBS 1 | ||
| 384 | 394 | ||
| 385 | static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask) | 395 | static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new) |
| 386 | { | 396 | { |
| 387 | unsigned long reg = (lvt_off << 4) + APIC_EILVTn(0); | 397 | return (old & APIC_EILVT_MASKED) |
| 388 | unsigned int v = (mask << 16) | (msg_type << 8) | vector; | 398 | || (new == APIC_EILVT_MASKED) |
| 389 | 399 | || ((new & ~APIC_EILVT_MASKED) == old); | |
| 390 | apic_write(reg, v); | ||
| 391 | } | 400 | } |
| 392 | 401 | ||
| 393 | u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask) | 402 | static unsigned int reserve_eilvt_offset(int offset, unsigned int new) |
| 394 | { | 403 | { |
| 395 | setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask); | 404 | unsigned int rsvd; /* 0: uninitialized */ |
| 396 | return APIC_EILVT_LVTOFF_MCE; | 405 | |
| 406 | if (offset >= APIC_EILVT_NR_MAX) | ||
| 407 | return ~0; | ||
| 408 | |||
| 409 | rsvd = atomic_read(&eilvt_offsets[offset]) & ~APIC_EILVT_MASKED; | ||
| 410 | do { | ||
| 411 | if (rsvd && | ||
| 412 | !eilvt_entry_is_changeable(rsvd, new)) | ||
| 413 | /* may not change if vectors are different */ | ||
| 414 | return rsvd; | ||
| 415 | rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new); | ||
| 416 | } while (rsvd != new); | ||
| 417 | |||
| 418 | return new; | ||
| 397 | } | 419 | } |
| 398 | 420 | ||
| 399 | u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask) | 421 | /* |
| 422 | * If mask=1, the LVT entry does not generate interrupts while mask=0 | ||
| 423 | * enables the vector. See also the BKDGs. | ||
| 424 | */ | ||
| 425 | |||
| 426 | int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) | ||
| 400 | { | 427 | { |
| 401 | setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); | 428 | unsigned long reg = APIC_EILVTn(offset); |
| 402 | return APIC_EILVT_LVTOFF_IBS; | 429 | unsigned int new, old, reserved; |
| 430 | |||
| 431 | new = (mask << 16) | (msg_type << 8) | vector; | ||
| 432 | old = apic_read(reg); | ||
| 433 | reserved = reserve_eilvt_offset(offset, new); | ||
| 434 | |||
| 435 | if (reserved != new) { | ||
| 436 | pr_err(FW_BUG "cpu %d, try to setup vector 0x%x, but " | ||
| 437 | "vector 0x%x was already reserved by another core, " | ||
| 438 | "APIC%lX=0x%x\n", | ||
| 439 | smp_processor_id(), new, reserved, reg, old); | ||
| 440 | return -EINVAL; | ||
| 441 | } | ||
| 442 | |||
| 443 | if (!eilvt_entry_is_changeable(old, new)) { | ||
| 444 | pr_err(FW_BUG "cpu %d, try to setup vector 0x%x but " | ||
| 445 | "register already in use, APIC%lX=0x%x\n", | ||
| 446 | smp_processor_id(), new, reg, old); | ||
| 447 | return -EBUSY; | ||
| 448 | } | ||
| 449 | |||
| 450 | apic_write(reg, new); | ||
| 451 | |||
| 452 | return 0; | ||
| 403 | } | 453 | } |
| 404 | EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs); | 454 | EXPORT_SYMBOL_GPL(setup_APIC_eilvt); |
| 405 | 455 | ||
| 406 | /* | 456 | /* |
| 407 | * Program the next event, relative to now | 457 | * Program the next event, relative to now |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 9508811e844..8ae808d110f 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
| @@ -131,13 +131,9 @@ struct irq_pin_list { | |||
| 131 | struct irq_pin_list *next; | 131 | struct irq_pin_list *next; |
| 132 | }; | 132 | }; |
| 133 | 133 | ||
| 134 | static struct irq_pin_list *get_one_free_irq_2_pin(int node) | 134 | static struct irq_pin_list *alloc_irq_pin_list(int node) |
| 135 | { | 135 | { |
| 136 | struct irq_pin_list *pin; | 136 | return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); |
| 137 | |||
| 138 | pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); | ||
| 139 | |||
| 140 | return pin; | ||
| 141 | } | 137 | } |
| 142 | 138 | ||
| 143 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ | 139 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ |
| @@ -150,10 +146,7 @@ static struct irq_cfg irq_cfgx[NR_IRQS]; | |||
| 150 | int __init arch_early_irq_init(void) | 146 | int __init arch_early_irq_init(void) |
| 151 | { | 147 | { |
| 152 | struct irq_cfg *cfg; | 148 | struct irq_cfg *cfg; |
| 153 | struct irq_desc *desc; | 149 | int count, node, i; |
| 154 | int count; | ||
| 155 | int node; | ||
| 156 | int i; | ||
| 157 | 150 | ||
| 158 | if (!legacy_pic->nr_legacy_irqs) { | 151 | if (!legacy_pic->nr_legacy_irqs) { |
| 159 | nr_irqs_gsi = 0; | 152 | nr_irqs_gsi = 0; |
| @@ -164,11 +157,13 @@ int __init arch_early_irq_init(void) | |||
| 164 | count = ARRAY_SIZE(irq_cfgx); | 157 | count = ARRAY_SIZE(irq_cfgx); |
| 165 | node = cpu_to_node(0); | 158 | node = cpu_to_node(0); |
| 166 | 159 | ||
| 160 | /* Make sure the legacy interrupts are marked in the bitmap */ | ||
| 161 | irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); | ||
| 162 | |||
| 167 | for (i = 0; i < count; i++) { | 163 | for (i = 0; i < count; i++) { |
| 168 | desc = irq_to_desc(i); | 164 | set_irq_chip_data(i, &cfg[i]); |
| 169 | desc->chip_data = &cfg[i]; | 165 | zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); |
| 170 | zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); | 166 | zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); |
| 171 | zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); | ||
| 172 | /* | 167 | /* |
| 173 | * For legacy IRQ's, start with assigning irq0 to irq15 to | 168 | * For legacy IRQ's, start with assigning irq0 to irq15 to |
| 174 | * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. | 169 | * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. |
| @@ -183,170 +178,88 @@ int __init arch_early_irq_init(void) | |||
| 183 | } | 178 | } |
| 184 | 179 | ||
| 185 | #ifdef CONFIG_SPARSE_IRQ | 180 | #ifdef CONFIG_SPARSE_IRQ |
| 186 | struct irq_cfg *irq_cfg(unsigned int irq) | 181 | static struct irq_cfg *irq_cfg(unsigned int irq) |
| 187 | { | 182 | { |
| 188 | struct irq_cfg *cfg = NULL; | 183 | return get_irq_chip_data(irq); |
| 189 | struct irq_desc *desc; | ||
| 190 | |||
| 191 | desc = irq_to_desc(irq); | ||
| 192 | if (desc) | ||
| 193 | cfg = desc->chip_data; | ||
| 194 | |||
| 195 | return cfg; | ||
| 196 | } | 184 | } |
| 197 | 185 | ||
| 198 | static struct irq_cfg *get_one_free_irq_cfg(int node) | 186 | static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) |
| 199 | { | 187 | { |
| 200 | struct irq_cfg *cfg; | 188 | struct irq_cfg *cfg; |
| 201 | 189 | ||
| 202 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); | 190 | cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); |
| 203 | if (cfg) { | 191 | if (!cfg) |
| 204 | if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { | 192 | return NULL; |
| 205 | kfree(cfg); | 193 | if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) |
| 206 | cfg = NULL; | 194 | goto out_cfg; |
| 207 | } else if (!zalloc_cpumask_var_node(&cfg->old_domain, | 195 | if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) |
| 208 | GFP_ATOMIC, node)) { | 196 | goto out_domain; |
| 209 | free_cpumask_var(cfg->domain); | ||
| 210 | kfree(cfg); | ||
| 211 | cfg = NULL; | ||
| 212 | } | ||
| 213 | } | ||
| 214 | |||
| 215 | return cfg; | 197 | return cfg; |
| 198 | out_domain: | ||
| 199 | free_cpumask_var(cfg->domain); | ||
| 200 | out_cfg: | ||
| 201 | kfree(cfg); | ||
| 202 | return NULL; | ||
| 216 | } | 203 | } |
| 217 | 204 | ||
| 218 | int arch_init_chip_data(struct irq_desc *desc, int node) | 205 | static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) |
| 219 | { | ||
| 220 | struct irq_cfg *cfg; | ||
| 221 | |||
| 222 | cfg = desc->chip_data; | ||
| 223 | if (!cfg) { | ||
| 224 | desc->chip_data = get_one_free_irq_cfg(node); | ||
| 225 | if (!desc->chip_data) { | ||
| 226 | printk(KERN_ERR "can not alloc irq_cfg\n"); | ||
| 227 | BUG_ON(1); | ||
| 228 | } | ||
| 229 | } | ||
| 230 | |||
| 231 | return 0; | ||
| 232 | } | ||
| 233 | |||
| 234 | /* for move_irq_desc */ | ||
| 235 | static void | ||
| 236 | init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int node) | ||
| 237 | { | 206 | { |
| 238 | struct irq_pin_list *old_entry, *head, *tail, *entry; | 207 | if (!cfg) |
| 239 | |||
| 240 | cfg->irq_2_pin = NULL; | ||
| 241 | old_entry = old_cfg->irq_2_pin; | ||
| 242 | if (!old_entry) | ||
| 243 | return; | ||
| 244 | |||
| 245 | entry = get_one_free_irq_2_pin(node); | ||
| 246 | if (!entry) | ||
| 247 | return; | 208 | return; |
| 209 | set_irq_chip_data(at, NULL); | ||
| 210 | free_cpumask_var(cfg->domain); | ||
| 211 | free_cpumask_var(cfg->old_domain); | ||
| 212 | kfree(cfg); | ||
| 213 | } | ||
| 248 | 214 | ||
| 249 | entry->apic = old_entry->apic; | 215 | #else |
| 250 | entry->pin = old_entry->pin; | ||
| 251 | head = entry; | ||
| 252 | tail = entry; | ||
| 253 | old_entry = old_entry->next; | ||
| 254 | while (old_entry) { | ||
| 255 | entry = get_one_free_irq_2_pin(node); | ||
| 256 | if (!entry) { | ||
| 257 | entry = head; | ||
| 258 | while (entry) { | ||
| 259 | head = entry->next; | ||
| 260 | kfree(entry); | ||
| 261 | entry = head; | ||
| 262 | } | ||
| 263 | /* still use the old one */ | ||
| 264 | return; | ||
| 265 | } | ||
| 266 | entry->apic = old_entry->apic; | ||
| 267 | entry->pin = old_entry->pin; | ||
| 268 | tail->next = entry; | ||
| 269 | tail = entry; | ||
| 270 | old_entry = old_entry->next; | ||
| 271 | } | ||
| 272 | 216 | ||
| 273 | tail->next = NULL; | 217 | struct irq_cfg *irq_cfg(unsigned int irq) |
| 274 | cfg->irq_2_pin = head; | 218 | { |
| 219 | return irq < nr_irqs ? irq_cfgx + irq : NULL; | ||
| 275 | } | 220 | } |
| 276 | 221 | ||
| 277 | static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg) | 222 | static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) |
| 278 | { | 223 | { |
| 279 | struct irq_pin_list *entry, *next; | 224 | return irq_cfgx + irq; |
| 280 | 225 | } | |
| 281 | if (old_cfg->irq_2_pin == cfg->irq_2_pin) | ||
| 282 | return; | ||
| 283 | 226 | ||
| 284 | entry = old_cfg->irq_2_pin; | 227 | static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { } |
| 285 | 228 | ||
| 286 | while (entry) { | 229 | #endif |
| 287 | next = entry->next; | ||
| 288 | kfree(entry); | ||
| 289 | entry = next; | ||
| 290 | } | ||
| 291 | old_cfg->irq_2_pin = NULL; | ||
| 292 | } | ||
| 293 | 230 | ||
| 294 | void arch_init_copy_chip_data(struct irq_desc *old_desc, | 231 | static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) |
| 295 | struct irq_desc *desc, int node) | ||
| 296 | { | 232 | { |
| 233 | int res = irq_alloc_desc_at(at, node); | ||
| 297 | struct irq_cfg *cfg; | 234 | struct irq_cfg *cfg; |
| 298 | struct irq_cfg *old_cfg; | ||
| 299 | |||
| 300 | cfg = get_one_free_irq_cfg(node); | ||
| 301 | 235 | ||
| 302 | if (!cfg) | 236 | if (res < 0) { |
| 303 | return; | 237 | if (res != -EEXIST) |
| 304 | 238 | return NULL; | |
| 305 | desc->chip_data = cfg; | 239 | cfg = get_irq_chip_data(at); |
| 306 | 240 | if (cfg) | |
| 307 | old_cfg = old_desc->chip_data; | 241 | return cfg; |
| 308 | 242 | } | |
| 309 | cfg->vector = old_cfg->vector; | ||
| 310 | cfg->move_in_progress = old_cfg->move_in_progress; | ||
| 311 | cpumask_copy(cfg->domain, old_cfg->domain); | ||
| 312 | cpumask_copy(cfg->old_domain, old_cfg->old_domain); | ||
| 313 | |||
| 314 | init_copy_irq_2_pin(old_cfg, cfg, node); | ||
| 315 | } | ||
| 316 | 243 | ||
| 317 | static void free_irq_cfg(struct irq_cfg *cfg) | 244 | cfg = alloc_irq_cfg(at, node); |
| 318 | { | 245 | if (cfg) |
| 319 | free_cpumask_var(cfg->domain); | 246 | set_irq_chip_data(at, cfg); |
| 320 | free_cpumask_var(cfg->old_domain); | 247 | else |
| 321 | kfree(cfg); | 248 | irq_free_desc(at); |
| 249 | return cfg; | ||
| 322 | } | 250 | } |
| 323 | 251 | ||
| 324 | void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) | 252 | static int alloc_irq_from(unsigned int from, int node) |
| 325 | { | 253 | { |
| 326 | struct irq_cfg *old_cfg, *cfg; | 254 | return irq_alloc_desc_from(from, node); |
| 327 | |||
| 328 | old_cfg = old_desc->chip_data; | ||
| 329 | cfg = desc->chip_data; | ||
| 330 | |||
| 331 | if (old_cfg == cfg) | ||
| 332 | return; | ||
| 333 | |||
| 334 | if (old_cfg) { | ||
| 335 | free_irq_2_pin(old_cfg, cfg); | ||
| 336 | free_irq_cfg(old_cfg); | ||
| 337 | old_desc->chip_data = NULL; | ||
| 338 | } | ||
| 339 | } | 255 | } |
| 340 | /* end for move_irq_desc */ | ||
| 341 | 256 | ||
| 342 | #else | 257 | static void free_irq_at(unsigned int at, struct irq_cfg *cfg) |
| 343 | struct irq_cfg *irq_cfg(unsigned int irq) | ||
| 344 | { | 258 | { |
| 345 | return irq < nr_irqs ? irq_cfgx + irq : NULL; | 259 | free_irq_cfg(at, cfg); |
| 260 | irq_free_desc(at); | ||
| 346 | } | 261 | } |
| 347 | 262 | ||
| 348 | #endif | ||
| 349 | |||
| 350 | struct io_apic { | 263 | struct io_apic { |
| 351 | unsigned int index; | 264 | unsigned int index; |
| 352 | unsigned int unused[3]; | 265 | unsigned int unused[3]; |
| @@ -451,7 +364,7 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) | |||
| 451 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); | 364 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); |
| 452 | } | 365 | } |
| 453 | 366 | ||
| 454 | void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) | 367 | static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) |
| 455 | { | 368 | { |
| 456 | unsigned long flags; | 369 | unsigned long flags; |
| 457 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 370 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
| @@ -481,7 +394,7 @@ static void ioapic_mask_entry(int apic, int pin) | |||
| 481 | * fast in the common case, and fast for shared ISA-space IRQs. | 394 | * fast in the common case, and fast for shared ISA-space IRQs. |
| 482 | */ | 395 | */ |
| 483 | static int | 396 | static int |
| 484 | add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) | 397 | __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) |
| 485 | { | 398 | { |
| 486 | struct irq_pin_list **last, *entry; | 399 | struct irq_pin_list **last, *entry; |
| 487 | 400 | ||
| @@ -493,7 +406,7 @@ add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) | |||
| 493 | last = &entry->next; | 406 | last = &entry->next; |
| 494 | } | 407 | } |
| 495 | 408 | ||
| 496 | entry = get_one_free_irq_2_pin(node); | 409 | entry = alloc_irq_pin_list(node); |
| 497 | if (!entry) { | 410 | if (!entry) { |
| 498 | printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", | 411 | printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", |
| 499 | node, apic, pin); | 412 | node, apic, pin); |
| @@ -508,7 +421,7 @@ add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) | |||
| 508 | 421 | ||
| 509 | static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) | 422 | static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) |
| 510 | { | 423 | { |
| 511 | if (add_pin_to_irq_node_nopanic(cfg, node, apic, pin)) | 424 | if (__add_pin_to_irq_node(cfg, node, apic, pin)) |
| 512 | panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); | 425 | panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); |
| 513 | } | 426 | } |
| 514 | 427 | ||
| @@ -571,11 +484,6 @@ static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry) | |||
| 571 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); | 484 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); |
| 572 | } | 485 | } |
| 573 | 486 | ||
| 574 | static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) | ||
| 575 | { | ||
| 576 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); | ||
| 577 | } | ||
| 578 | |||
| 579 | static void io_apic_sync(struct irq_pin_list *entry) | 487 | static void io_apic_sync(struct irq_pin_list *entry) |
| 580 | { | 488 | { |
| 581 | /* | 489 | /* |
| @@ -587,44 +495,37 @@ static void io_apic_sync(struct irq_pin_list *entry) | |||
| 587 | readl(&io_apic->data); | 495 | readl(&io_apic->data); |
| 588 | } | 496 | } |
| 589 | 497 | ||
| 590 | static void __mask_IO_APIC_irq(struct irq_cfg *cfg) | 498 | static void mask_ioapic(struct irq_cfg *cfg) |
| 591 | { | 499 | { |
| 500 | unsigned long flags; | ||
| 501 | |||
| 502 | raw_spin_lock_irqsave(&ioapic_lock, flags); | ||
| 592 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); | 503 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); |
| 504 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | ||
| 593 | } | 505 | } |
| 594 | 506 | ||
| 595 | static void mask_IO_APIC_irq_desc(struct irq_desc *desc) | 507 | static void mask_ioapic_irq(struct irq_data *data) |
| 596 | { | 508 | { |
| 597 | struct irq_cfg *cfg = desc->chip_data; | 509 | mask_ioapic(data->chip_data); |
| 598 | unsigned long flags; | 510 | } |
| 599 | |||
| 600 | BUG_ON(!cfg); | ||
| 601 | 511 | ||
| 602 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 512 | static void __unmask_ioapic(struct irq_cfg *cfg) |
| 603 | __mask_IO_APIC_irq(cfg); | 513 | { |
| 604 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 514 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); |
| 605 | } | 515 | } |
| 606 | 516 | ||
| 607 | static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) | 517 | static void unmask_ioapic(struct irq_cfg *cfg) |
| 608 | { | 518 | { |
| 609 | struct irq_cfg *cfg = desc->chip_data; | ||
| 610 | unsigned long flags; | 519 | unsigned long flags; |
| 611 | 520 | ||
| 612 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 521 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
| 613 | __unmask_IO_APIC_irq(cfg); | 522 | __unmask_ioapic(cfg); |
| 614 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 523 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
| 615 | } | 524 | } |
| 616 | 525 | ||
| 617 | static void mask_IO_APIC_irq(unsigned int irq) | 526 | static void unmask_ioapic_irq(struct irq_data *data) |
| 618 | { | 527 | { |
| 619 | struct irq_desc *desc = irq_to_desc(irq); | 528 | unmask_ioapic(data->chip_data); |
| 620 | |||
| 621 | mask_IO_APIC_irq_desc(desc); | ||
| 622 | } | ||
| 623 | static void unmask_IO_APIC_irq(unsigned int irq) | ||
| 624 | { | ||
| 625 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 626 | |||
| 627 | unmask_IO_APIC_irq_desc(desc); | ||
| 628 | } | 529 | } |
| 629 | 530 | ||
| 630 | static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) | 531 | static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) |
| @@ -694,14 +595,14 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void) | |||
| 694 | struct IO_APIC_route_entry **ioapic_entries; | 595 | struct IO_APIC_route_entry **ioapic_entries; |
| 695 | 596 | ||
| 696 | ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, | 597 | ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, |
| 697 | GFP_ATOMIC); | 598 | GFP_KERNEL); |
| 698 | if (!ioapic_entries) | 599 | if (!ioapic_entries) |
| 699 | return 0; | 600 | return 0; |
| 700 | 601 | ||
| 701 | for (apic = 0; apic < nr_ioapics; apic++) { | 602 | for (apic = 0; apic < nr_ioapics; apic++) { |
| 702 | ioapic_entries[apic] = | 603 | ioapic_entries[apic] = |
| 703 | kzalloc(sizeof(struct IO_APIC_route_entry) * | 604 | kzalloc(sizeof(struct IO_APIC_route_entry) * |
| 704 | nr_ioapic_registers[apic], GFP_ATOMIC); | 605 | nr_ioapic_registers[apic], GFP_KERNEL); |
| 705 | if (!ioapic_entries[apic]) | 606 | if (!ioapic_entries[apic]) |
| 706 | goto nomem; | 607 | goto nomem; |
| 707 | } | 608 | } |
| @@ -1259,7 +1160,6 @@ void __setup_vector_irq(int cpu) | |||
| 1259 | /* Initialize vector_irq on a new cpu */ | 1160 | /* Initialize vector_irq on a new cpu */ |
| 1260 | int irq, vector; | 1161 | int irq, vector; |
| 1261 | struct irq_cfg *cfg; | 1162 | struct irq_cfg *cfg; |
| 1262 | struct irq_desc *desc; | ||
| 1263 | 1163 | ||
| 1264 | /* | 1164 | /* |
| 1265 | * vector_lock will make sure that we don't run into irq vector | 1165 | * vector_lock will make sure that we don't run into irq vector |
| @@ -1268,9 +1168,10 @@ void __setup_vector_irq(int cpu) | |||
| 1268 | */ | 1168 | */ |
| 1269 | raw_spin_lock(&vector_lock); | 1169 | raw_spin_lock(&vector_lock); |
| 1270 | /* Mark the inuse vectors */ | 1170 | /* Mark the inuse vectors */ |
| 1271 | for_each_irq_desc(irq, desc) { | 1171 | for_each_active_irq(irq) { |
| 1272 | cfg = desc->chip_data; | 1172 | cfg = get_irq_chip_data(irq); |
| 1273 | 1173 | if (!cfg) | |
| 1174 | continue; | ||
| 1274 | /* | 1175 | /* |
| 1275 | * If it is a legacy IRQ handled by the legacy PIC, this cpu | 1176 | * If it is a legacy IRQ handled by the legacy PIC, this cpu |
| 1276 | * will be part of the irq_cfg's domain. | 1177 | * will be part of the irq_cfg's domain. |
| @@ -1327,17 +1228,17 @@ static inline int IO_APIC_irq_trigger(int irq) | |||
| 1327 | } | 1228 | } |
| 1328 | #endif | 1229 | #endif |
| 1329 | 1230 | ||
| 1330 | static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger) | 1231 | static void ioapic_register_intr(unsigned int irq, unsigned long trigger) |
| 1331 | { | 1232 | { |
| 1332 | 1233 | ||
| 1333 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || | 1234 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || |
| 1334 | trigger == IOAPIC_LEVEL) | 1235 | trigger == IOAPIC_LEVEL) |
| 1335 | desc->status |= IRQ_LEVEL; | 1236 | irq_set_status_flags(irq, IRQ_LEVEL); |
| 1336 | else | 1237 | else |
| 1337 | desc->status &= ~IRQ_LEVEL; | 1238 | irq_clear_status_flags(irq, IRQ_LEVEL); |
| 1338 | 1239 | ||
| 1339 | if (irq_remapped(irq)) { | 1240 | if (irq_remapped(get_irq_chip_data(irq))) { |
| 1340 | desc->status |= IRQ_MOVE_PCNTXT; | 1241 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
| 1341 | if (trigger) | 1242 | if (trigger) |
| 1342 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, | 1243 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, |
| 1343 | handle_fasteoi_irq, | 1244 | handle_fasteoi_irq, |
| @@ -1358,10 +1259,10 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t | |||
| 1358 | handle_edge_irq, "edge"); | 1259 | handle_edge_irq, "edge"); |
| 1359 | } | 1260 | } |
| 1360 | 1261 | ||
| 1361 | int setup_ioapic_entry(int apic_id, int irq, | 1262 | static int setup_ioapic_entry(int apic_id, int irq, |
| 1362 | struct IO_APIC_route_entry *entry, | 1263 | struct IO_APIC_route_entry *entry, |
| 1363 | unsigned int destination, int trigger, | 1264 | unsigned int destination, int trigger, |
| 1364 | int polarity, int vector, int pin) | 1265 | int polarity, int vector, int pin) |
| 1365 | { | 1266 | { |
| 1366 | /* | 1267 | /* |
| 1367 | * add it to the IO-APIC irq-routing table: | 1268 | * add it to the IO-APIC irq-routing table: |
| @@ -1417,18 +1318,14 @@ int setup_ioapic_entry(int apic_id, int irq, | |||
| 1417 | return 0; | 1318 | return 0; |
| 1418 | } | 1319 | } |
| 1419 | 1320 | ||
| 1420 | static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc, | 1321 | static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, |
| 1421 | int trigger, int polarity) | 1322 | struct irq_cfg *cfg, int trigger, int polarity) |
| 1422 | { | 1323 | { |
| 1423 | struct irq_cfg *cfg; | ||
| 1424 | struct IO_APIC_route_entry entry; | 1324 | struct IO_APIC_route_entry entry; |
| 1425 | unsigned int dest; | 1325 | unsigned int dest; |
| 1426 | 1326 | ||
| 1427 | if (!IO_APIC_IRQ(irq)) | 1327 | if (!IO_APIC_IRQ(irq)) |
| 1428 | return; | 1328 | return; |
| 1429 | |||
| 1430 | cfg = desc->chip_data; | ||
| 1431 | |||
| 1432 | /* | 1329 | /* |
| 1433 | * For legacy irqs, cfg->domain starts with cpu 0 for legacy | 1330 | * For legacy irqs, cfg->domain starts with cpu 0 for legacy |
| 1434 | * controllers like 8259. Now that IO-APIC can handle this irq, update | 1331 | * controllers like 8259. Now that IO-APIC can handle this irq, update |
| @@ -1457,9 +1354,9 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq | |||
| 1457 | return; | 1354 | return; |
| 1458 | } | 1355 | } |
| 1459 | 1356 | ||
| 1460 | ioapic_register_intr(irq, desc, trigger); | 1357 | ioapic_register_intr(irq, trigger); |
| 1461 | if (irq < legacy_pic->nr_legacy_irqs) | 1358 | if (irq < legacy_pic->nr_legacy_irqs) |
| 1462 | legacy_pic->chip->mask(irq); | 1359 | legacy_pic->mask(irq); |
| 1463 | 1360 | ||
| 1464 | ioapic_write_entry(apic_id, pin, entry); | 1361 | ioapic_write_entry(apic_id, pin, entry); |
| 1465 | } | 1362 | } |
| @@ -1470,11 +1367,9 @@ static struct { | |||
| 1470 | 1367 | ||
| 1471 | static void __init setup_IO_APIC_irqs(void) | 1368 | static void __init setup_IO_APIC_irqs(void) |
| 1472 | { | 1369 | { |
| 1473 | int apic_id, pin, idx, irq; | 1370 | int apic_id, pin, idx, irq, notcon = 0; |
| 1474 | int notcon = 0; | ||
| 1475 | struct irq_desc *desc; | ||
| 1476 | struct irq_cfg *cfg; | ||
| 1477 | int node = cpu_to_node(0); | 1371 | int node = cpu_to_node(0); |
| 1372 | struct irq_cfg *cfg; | ||
| 1478 | 1373 | ||
| 1479 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); | 1374 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); |
| 1480 | 1375 | ||
| @@ -1511,19 +1406,17 @@ static void __init setup_IO_APIC_irqs(void) | |||
| 1511 | apic->multi_timer_check(apic_id, irq)) | 1406 | apic->multi_timer_check(apic_id, irq)) |
| 1512 | continue; | 1407 | continue; |
| 1513 | 1408 | ||
| 1514 | desc = irq_to_desc_alloc_node(irq, node); | 1409 | cfg = alloc_irq_and_cfg_at(irq, node); |
| 1515 | if (!desc) { | 1410 | if (!cfg) |
| 1516 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | ||
| 1517 | continue; | 1411 | continue; |
| 1518 | } | 1412 | |
| 1519 | cfg = desc->chip_data; | ||
| 1520 | add_pin_to_irq_node(cfg, node, apic_id, pin); | 1413 | add_pin_to_irq_node(cfg, node, apic_id, pin); |
| 1521 | /* | 1414 | /* |
| 1522 | * don't mark it in pin_programmed, so later acpi could | 1415 | * don't mark it in pin_programmed, so later acpi could |
| 1523 | * set it correctly when irq < 16 | 1416 | * set it correctly when irq < 16 |
| 1524 | */ | 1417 | */ |
| 1525 | setup_IO_APIC_irq(apic_id, pin, irq, desc, | 1418 | setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx), |
| 1526 | irq_trigger(idx), irq_polarity(idx)); | 1419 | irq_polarity(idx)); |
| 1527 | } | 1420 | } |
| 1528 | 1421 | ||
| 1529 | if (notcon) | 1422 | if (notcon) |
| @@ -1538,9 +1431,7 @@ static void __init setup_IO_APIC_irqs(void) | |||
| 1538 | */ | 1431 | */ |
| 1539 | void setup_IO_APIC_irq_extra(u32 gsi) | 1432 | void setup_IO_APIC_irq_extra(u32 gsi) |
| 1540 | { | 1433 | { |
| 1541 | int apic_id = 0, pin, idx, irq; | 1434 | int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); |
| 1542 | int node = cpu_to_node(0); | ||
| 1543 | struct irq_desc *desc; | ||
| 1544 | struct irq_cfg *cfg; | 1435 | struct irq_cfg *cfg; |
| 1545 | 1436 | ||
| 1546 | /* | 1437 | /* |
| @@ -1556,18 +1447,15 @@ void setup_IO_APIC_irq_extra(u32 gsi) | |||
| 1556 | return; | 1447 | return; |
| 1557 | 1448 | ||
| 1558 | irq = pin_2_irq(idx, apic_id, pin); | 1449 | irq = pin_2_irq(idx, apic_id, pin); |
| 1559 | #ifdef CONFIG_SPARSE_IRQ | 1450 | |
| 1560 | desc = irq_to_desc(irq); | 1451 | /* Only handle the non legacy irqs on secondary ioapics */ |
| 1561 | if (desc) | 1452 | if (apic_id == 0 || irq < NR_IRQS_LEGACY) |
| 1562 | return; | 1453 | return; |
| 1563 | #endif | 1454 | |
| 1564 | desc = irq_to_desc_alloc_node(irq, node); | 1455 | cfg = alloc_irq_and_cfg_at(irq, node); |
| 1565 | if (!desc) { | 1456 | if (!cfg) |
| 1566 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | ||
| 1567 | return; | 1457 | return; |
| 1568 | } | ||
| 1569 | 1458 | ||
| 1570 | cfg = desc->chip_data; | ||
| 1571 | add_pin_to_irq_node(cfg, node, apic_id, pin); | 1459 | add_pin_to_irq_node(cfg, node, apic_id, pin); |
| 1572 | 1460 | ||
| 1573 | if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { | 1461 | if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { |
| @@ -1577,7 +1465,7 @@ void setup_IO_APIC_irq_extra(u32 gsi) | |||
| 1577 | } | 1465 | } |
| 1578 | set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); | 1466 | set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); |
| 1579 | 1467 | ||
| 1580 | setup_IO_APIC_irq(apic_id, pin, irq, desc, | 1468 | setup_ioapic_irq(apic_id, pin, irq, cfg, |
| 1581 | irq_trigger(idx), irq_polarity(idx)); | 1469 | irq_trigger(idx), irq_polarity(idx)); |
| 1582 | } | 1470 | } |
| 1583 | 1471 | ||
| @@ -1628,7 +1516,6 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
| 1628 | union IO_APIC_reg_03 reg_03; | 1516 | union IO_APIC_reg_03 reg_03; |
| 1629 | unsigned long flags; | 1517 | unsigned long flags; |
| 1630 | struct irq_cfg *cfg; | 1518 | struct irq_cfg *cfg; |
| 1631 | struct irq_desc *desc; | ||
| 1632 | unsigned int irq; | 1519 | unsigned int irq; |
| 1633 | 1520 | ||
| 1634 | printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); | 1521 | printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); |
| @@ -1715,10 +1602,10 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
| 1715 | } | 1602 | } |
| 1716 | } | 1603 | } |
| 1717 | printk(KERN_DEBUG "IRQ to pin mappings:\n"); | 1604 | printk(KERN_DEBUG "IRQ to pin mappings:\n"); |
| 1718 | for_each_irq_desc(irq, desc) { | 1605 | for_each_active_irq(irq) { |
| 1719 | struct irq_pin_list *entry; | 1606 | struct irq_pin_list *entry; |
| 1720 | 1607 | ||
| 1721 | cfg = desc->chip_data; | 1608 | cfg = get_irq_chip_data(irq); |
| 1722 | if (!cfg) | 1609 | if (!cfg) |
| 1723 | continue; | 1610 | continue; |
| 1724 | entry = cfg->irq_2_pin; | 1611 | entry = cfg->irq_2_pin; |
| @@ -2225,29 +2112,26 @@ static int __init timer_irq_works(void) | |||
| 2225 | * an edge even if it isn't on the 8259A... | 2112 | * an edge even if it isn't on the 8259A... |
| 2226 | */ | 2113 | */ |
| 2227 | 2114 | ||
| 2228 | static unsigned int startup_ioapic_irq(unsigned int irq) | 2115 | static unsigned int startup_ioapic_irq(struct irq_data *data) |
| 2229 | { | 2116 | { |
| 2230 | int was_pending = 0; | 2117 | int was_pending = 0, irq = data->irq; |
| 2231 | unsigned long flags; | 2118 | unsigned long flags; |
| 2232 | struct irq_cfg *cfg; | ||
| 2233 | 2119 | ||
| 2234 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 2120 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
| 2235 | if (irq < legacy_pic->nr_legacy_irqs) { | 2121 | if (irq < legacy_pic->nr_legacy_irqs) { |
| 2236 | legacy_pic->chip->mask(irq); | 2122 | legacy_pic->mask(irq); |
| 2237 | if (legacy_pic->irq_pending(irq)) | 2123 | if (legacy_pic->irq_pending(irq)) |
| 2238 | was_pending = 1; | 2124 | was_pending = 1; |
| 2239 | } | 2125 | } |
| 2240 | cfg = irq_cfg(irq); | 2126 | __unmask_ioapic(data->chip_data); |
| 2241 | __unmask_IO_APIC_irq(cfg); | ||
| 2242 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 2127 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
| 2243 | 2128 | ||
| 2244 | return was_pending; | 2129 | return was_pending; |
| 2245 | } | 2130 | } |
| 2246 | 2131 | ||
| 2247 | static int ioapic_retrigger_irq(unsigned int irq) | 2132 | static int ioapic_retrigger_irq(struct irq_data *data) |
| 2248 | { | 2133 | { |
| 2249 | 2134 | struct irq_cfg *cfg = data->chip_data; | |
| 2250 | struct irq_cfg *cfg = irq_cfg(irq); | ||
| 2251 | unsigned long flags; | 2135 | unsigned long flags; |
| 2252 | 2136 | ||
| 2253 | raw_spin_lock_irqsave(&vector_lock, flags); | 2137 | raw_spin_lock_irqsave(&vector_lock, flags); |
| @@ -2298,7 +2182,7 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
| 2298 | * With interrupt-remapping, destination information comes | 2182 | * With interrupt-remapping, destination information comes |
| 2299 | * from interrupt-remapping table entry. | 2183 | * from interrupt-remapping table entry. |
| 2300 | */ | 2184 | */ |
| 2301 | if (!irq_remapped(irq)) | 2185 | if (!irq_remapped(cfg)) |
| 2302 | io_apic_write(apic, 0x11 + pin*2, dest); | 2186 | io_apic_write(apic, 0x11 + pin*2, dest); |
| 2303 | reg = io_apic_read(apic, 0x10 + pin*2); | 2187 | reg = io_apic_read(apic, 0x10 + pin*2); |
| 2304 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; | 2188 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; |
| @@ -2308,65 +2192,46 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
| 2308 | } | 2192 | } |
| 2309 | 2193 | ||
| 2310 | /* | 2194 | /* |
| 2311 | * Either sets desc->affinity to a valid value, and returns | 2195 | * Either sets data->affinity to a valid value, and returns |
| 2312 | * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and | 2196 | * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and |
| 2313 | * leaves desc->affinity untouched. | 2197 | * leaves data->affinity untouched. |
| 2314 | */ | 2198 | */ |
| 2315 | unsigned int | 2199 | int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
| 2316 | set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask, | 2200 | unsigned int *dest_id) |
| 2317 | unsigned int *dest_id) | ||
| 2318 | { | 2201 | { |
| 2319 | struct irq_cfg *cfg; | 2202 | struct irq_cfg *cfg = data->chip_data; |
| 2320 | unsigned int irq; | ||
| 2321 | 2203 | ||
| 2322 | if (!cpumask_intersects(mask, cpu_online_mask)) | 2204 | if (!cpumask_intersects(mask, cpu_online_mask)) |
| 2323 | return -1; | 2205 | return -1; |
| 2324 | 2206 | ||
| 2325 | irq = desc->irq; | 2207 | if (assign_irq_vector(data->irq, data->chip_data, mask)) |
| 2326 | cfg = desc->chip_data; | ||
| 2327 | if (assign_irq_vector(irq, cfg, mask)) | ||
| 2328 | return -1; | 2208 | return -1; |
| 2329 | 2209 | ||
| 2330 | cpumask_copy(desc->affinity, mask); | 2210 | cpumask_copy(data->affinity, mask); |
| 2331 | 2211 | ||
| 2332 | *dest_id = apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain); | 2212 | *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain); |
| 2333 | return 0; | 2213 | return 0; |
| 2334 | } | 2214 | } |
| 2335 | 2215 | ||
| 2336 | static int | 2216 | static int |
| 2337 | set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | 2217 | ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
| 2218 | bool force) | ||
| 2338 | { | 2219 | { |
| 2339 | struct irq_cfg *cfg; | 2220 | unsigned int dest, irq = data->irq; |
| 2340 | unsigned long flags; | 2221 | unsigned long flags; |
| 2341 | unsigned int dest; | 2222 | int ret; |
| 2342 | unsigned int irq; | ||
| 2343 | int ret = -1; | ||
| 2344 | |||
| 2345 | irq = desc->irq; | ||
| 2346 | cfg = desc->chip_data; | ||
| 2347 | 2223 | ||
| 2348 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 2224 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
| 2349 | ret = set_desc_affinity(desc, mask, &dest); | 2225 | ret = __ioapic_set_affinity(data, mask, &dest); |
| 2350 | if (!ret) { | 2226 | if (!ret) { |
| 2351 | /* Only the high 8 bits are valid. */ | 2227 | /* Only the high 8 bits are valid. */ |
| 2352 | dest = SET_APIC_LOGICAL_ID(dest); | 2228 | dest = SET_APIC_LOGICAL_ID(dest); |
| 2353 | __target_IO_APIC_irq(irq, dest, cfg); | 2229 | __target_IO_APIC_irq(irq, dest, data->chip_data); |
| 2354 | } | 2230 | } |
| 2355 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 2231 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
| 2356 | |||
| 2357 | return ret; | 2232 | return ret; |
| 2358 | } | 2233 | } |
| 2359 | 2234 | ||
| 2360 | static int | ||
| 2361 | set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) | ||
| 2362 | { | ||
| 2363 | struct irq_desc *desc; | ||
| 2364 | |||
| 2365 | desc = irq_to_desc(irq); | ||
| 2366 | |||
| 2367 | return set_ioapic_affinity_irq_desc(desc, mask); | ||
| 2368 | } | ||
| 2369 | |||
| 2370 | #ifdef CONFIG_INTR_REMAP | 2235 | #ifdef CONFIG_INTR_REMAP |
| 2371 | 2236 | ||
| 2372 | /* | 2237 | /* |
| @@ -2381,24 +2246,21 @@ set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) | |||
| 2381 | * the interrupt-remapping table entry. | 2246 | * the interrupt-remapping table entry. |
| 2382 | */ | 2247 | */ |
| 2383 | static int | 2248 | static int |
| 2384 | migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | 2249 | ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
| 2250 | bool force) | ||
| 2385 | { | 2251 | { |
| 2386 | struct irq_cfg *cfg; | 2252 | struct irq_cfg *cfg = data->chip_data; |
| 2253 | unsigned int dest, irq = data->irq; | ||
| 2387 | struct irte irte; | 2254 | struct irte irte; |
| 2388 | unsigned int dest; | ||
| 2389 | unsigned int irq; | ||
| 2390 | int ret = -1; | ||
| 2391 | 2255 | ||
| 2392 | if (!cpumask_intersects(mask, cpu_online_mask)) | 2256 | if (!cpumask_intersects(mask, cpu_online_mask)) |
| 2393 | return ret; | 2257 | return -EINVAL; |
| 2394 | 2258 | ||
| 2395 | irq = desc->irq; | ||
| 2396 | if (get_irte(irq, &irte)) | 2259 | if (get_irte(irq, &irte)) |
| 2397 | return ret; | 2260 | return -EBUSY; |
| 2398 | 2261 | ||
| 2399 | cfg = desc->chip_data; | ||
| 2400 | if (assign_irq_vector(irq, cfg, mask)) | 2262 | if (assign_irq_vector(irq, cfg, mask)) |
| 2401 | return ret; | 2263 | return -EBUSY; |
| 2402 | 2264 | ||
| 2403 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); | 2265 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); |
| 2404 | 2266 | ||
| @@ -2413,29 +2275,14 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | |||
| 2413 | if (cfg->move_in_progress) | 2275 | if (cfg->move_in_progress) |
| 2414 | send_cleanup_vector(cfg); | 2276 | send_cleanup_vector(cfg); |
| 2415 | 2277 | ||
| 2416 | cpumask_copy(desc->affinity, mask); | 2278 | cpumask_copy(data->affinity, mask); |
| 2417 | |||
| 2418 | return 0; | 2279 | return 0; |
| 2419 | } | 2280 | } |
| 2420 | 2281 | ||
| 2421 | /* | ||
| 2422 | * Migrates the IRQ destination in the process context. | ||
| 2423 | */ | ||
| 2424 | static int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, | ||
| 2425 | const struct cpumask *mask) | ||
| 2426 | { | ||
| 2427 | return migrate_ioapic_irq_desc(desc, mask); | ||
| 2428 | } | ||
| 2429 | static int set_ir_ioapic_affinity_irq(unsigned int irq, | ||
| 2430 | const struct cpumask *mask) | ||
| 2431 | { | ||
| 2432 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 2433 | |||
| 2434 | return set_ir_ioapic_affinity_irq_desc(desc, mask); | ||
| 2435 | } | ||
| 2436 | #else | 2282 | #else |
| 2437 | static inline int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, | 2283 | static inline int |
| 2438 | const struct cpumask *mask) | 2284 | ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
| 2285 | bool force) | ||
| 2439 | { | 2286 | { |
| 2440 | return 0; | 2287 | return 0; |
| 2441 | } | 2288 | } |
| @@ -2497,10 +2344,8 @@ unlock: | |||
| 2497 | irq_exit(); | 2344 | irq_exit(); |
| 2498 | } | 2345 | } |
| 2499 | 2346 | ||
| 2500 | static void __irq_complete_move(struct irq_desc **descp, unsigned vector) | 2347 | static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) |
| 2501 | { | 2348 | { |
| 2502 | struct irq_desc *desc = *descp; | ||
| 2503 | struct irq_cfg *cfg = desc->chip_data; | ||
| 2504 | unsigned me; | 2349 | unsigned me; |
| 2505 | 2350 | ||
| 2506 | if (likely(!cfg->move_in_progress)) | 2351 | if (likely(!cfg->move_in_progress)) |
| @@ -2512,31 +2357,28 @@ static void __irq_complete_move(struct irq_desc **descp, unsigned vector) | |||
| 2512 | send_cleanup_vector(cfg); | 2357 | send_cleanup_vector(cfg); |
| 2513 | } | 2358 | } |
| 2514 | 2359 | ||
| 2515 | static void irq_complete_move(struct irq_desc **descp) | 2360 | static void irq_complete_move(struct irq_cfg *cfg) |
| 2516 | { | 2361 | { |
| 2517 | __irq_complete_move(descp, ~get_irq_regs()->orig_ax); | 2362 | __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); |
| 2518 | } | 2363 | } |
| 2519 | 2364 | ||
| 2520 | void irq_force_complete_move(int irq) | 2365 | void irq_force_complete_move(int irq) |
| 2521 | { | 2366 | { |
| 2522 | struct irq_desc *desc = irq_to_desc(irq); | 2367 | struct irq_cfg *cfg = get_irq_chip_data(irq); |
| 2523 | struct irq_cfg *cfg = desc->chip_data; | ||
| 2524 | 2368 | ||
| 2525 | if (!cfg) | 2369 | if (!cfg) |
| 2526 | return; | 2370 | return; |
| 2527 | 2371 | ||
| 2528 | __irq_complete_move(&desc, cfg->vector); | 2372 | __irq_complete_move(cfg, cfg->vector); |
| 2529 | } | 2373 | } |
| 2530 | #else | 2374 | #else |
| 2531 | static inline void irq_complete_move(struct irq_desc **descp) {} | 2375 | static inline void irq_complete_move(struct irq_cfg *cfg) { } |
| 2532 | #endif | 2376 | #endif |
| 2533 | 2377 | ||
| 2534 | static void ack_apic_edge(unsigned int irq) | 2378 | static void ack_apic_edge(struct irq_data *data) |
| 2535 | { | 2379 | { |
| 2536 | struct irq_desc *desc = irq_to_desc(irq); | 2380 | irq_complete_move(data->chip_data); |
| 2537 | 2381 | move_native_irq(data->irq); | |
| 2538 | irq_complete_move(&desc); | ||
| 2539 | move_native_irq(irq); | ||
| 2540 | ack_APIC_irq(); | 2382 | ack_APIC_irq(); |
| 2541 | } | 2383 | } |
| 2542 | 2384 | ||
| @@ -2558,10 +2400,12 @@ atomic_t irq_mis_count; | |||
| 2558 | * Otherwise, we simulate the EOI message manually by changing the trigger | 2400 | * Otherwise, we simulate the EOI message manually by changing the trigger |
| 2559 | * mode to edge and then back to level, with RTE being masked during this. | 2401 | * mode to edge and then back to level, with RTE being masked during this. |
| 2560 | */ | 2402 | */ |
| 2561 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | 2403 | static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) |
| 2562 | { | 2404 | { |
| 2563 | struct irq_pin_list *entry; | 2405 | struct irq_pin_list *entry; |
| 2406 | unsigned long flags; | ||
| 2564 | 2407 | ||
| 2408 | raw_spin_lock_irqsave(&ioapic_lock, flags); | ||
| 2565 | for_each_irq_pin(entry, cfg->irq_2_pin) { | 2409 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
| 2566 | if (mp_ioapics[entry->apic].apicver >= 0x20) { | 2410 | if (mp_ioapics[entry->apic].apicver >= 0x20) { |
| 2567 | /* | 2411 | /* |
| @@ -2570,7 +2414,7 @@ static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | |||
| 2570 | * intr-remapping table entry. Hence for the io-apic | 2414 | * intr-remapping table entry. Hence for the io-apic |
| 2571 | * EOI we use the pin number. | 2415 | * EOI we use the pin number. |
| 2572 | */ | 2416 | */ |
| 2573 | if (irq_remapped(irq)) | 2417 | if (irq_remapped(cfg)) |
| 2574 | io_apic_eoi(entry->apic, entry->pin); | 2418 | io_apic_eoi(entry->apic, entry->pin); |
| 2575 | else | 2419 | else |
| 2576 | io_apic_eoi(entry->apic, cfg->vector); | 2420 | io_apic_eoi(entry->apic, cfg->vector); |
| @@ -2579,36 +2423,22 @@ static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | |||
| 2579 | __unmask_and_level_IO_APIC_irq(entry); | 2423 | __unmask_and_level_IO_APIC_irq(entry); |
| 2580 | } | 2424 | } |
| 2581 | } | 2425 | } |
| 2582 | } | ||
| 2583 | |||
| 2584 | static void eoi_ioapic_irq(struct irq_desc *desc) | ||
| 2585 | { | ||
| 2586 | struct irq_cfg *cfg; | ||
| 2587 | unsigned long flags; | ||
| 2588 | unsigned int irq; | ||
| 2589 | |||
| 2590 | irq = desc->irq; | ||
| 2591 | cfg = desc->chip_data; | ||
| 2592 | |||
| 2593 | raw_spin_lock_irqsave(&ioapic_lock, flags); | ||
| 2594 | __eoi_ioapic_irq(irq, cfg); | ||
| 2595 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 2426 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
| 2596 | } | 2427 | } |
| 2597 | 2428 | ||
| 2598 | static void ack_apic_level(unsigned int irq) | 2429 | static void ack_apic_level(struct irq_data *data) |
| 2599 | { | 2430 | { |
| 2431 | struct irq_cfg *cfg = data->chip_data; | ||
| 2432 | int i, do_unmask_irq = 0, irq = data->irq; | ||
| 2600 | struct irq_desc *desc = irq_to_desc(irq); | 2433 | struct irq_desc *desc = irq_to_desc(irq); |
| 2601 | unsigned long v; | 2434 | unsigned long v; |
| 2602 | int i; | ||
| 2603 | struct irq_cfg *cfg; | ||
| 2604 | int do_unmask_irq = 0; | ||
| 2605 | 2435 | ||
| 2606 | irq_complete_move(&desc); | 2436 | irq_complete_move(cfg); |
| 2607 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 2437 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 2608 | /* If we are moving the irq we need to mask it */ | 2438 | /* If we are moving the irq we need to mask it */ |
| 2609 | if (unlikely(desc->status & IRQ_MOVE_PENDING)) { | 2439 | if (unlikely(desc->status & IRQ_MOVE_PENDING)) { |
| 2610 | do_unmask_irq = 1; | 2440 | do_unmask_irq = 1; |
| 2611 | mask_IO_APIC_irq_desc(desc); | 2441 | mask_ioapic(cfg); |
| 2612 | } | 2442 | } |
| 2613 | #endif | 2443 | #endif |
| 2614 | 2444 | ||
| @@ -2644,7 +2474,6 @@ static void ack_apic_level(unsigned int irq) | |||
| 2644 | * we use the above logic (mask+edge followed by unmask+level) from | 2474 | * we use the above logic (mask+edge followed by unmask+level) from |
| 2645 | * Manfred Spraul to clear the remote IRR. | 2475 | * Manfred Spraul to clear the remote IRR. |
| 2646 | */ | 2476 | */ |
| 2647 | cfg = desc->chip_data; | ||
| 2648 | i = cfg->vector; | 2477 | i = cfg->vector; |
| 2649 | v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); | 2478 | v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); |
| 2650 | 2479 | ||
| @@ -2664,7 +2493,7 @@ static void ack_apic_level(unsigned int irq) | |||
| 2664 | if (!(v & (1 << (i & 0x1f)))) { | 2493 | if (!(v & (1 << (i & 0x1f)))) { |
| 2665 | atomic_inc(&irq_mis_count); | 2494 | atomic_inc(&irq_mis_count); |
| 2666 | 2495 | ||
| 2667 | eoi_ioapic_irq(desc); | 2496 | eoi_ioapic_irq(irq, cfg); |
| 2668 | } | 2497 | } |
| 2669 | 2498 | ||
| 2670 | /* Now we can move and renable the irq */ | 2499 | /* Now we can move and renable the irq */ |
| @@ -2695,61 +2524,57 @@ static void ack_apic_level(unsigned int irq) | |||
| 2695 | * accurate and is causing problems then it is a hardware bug | 2524 | * accurate and is causing problems then it is a hardware bug |
| 2696 | * and you can go talk to the chipset vendor about it. | 2525 | * and you can go talk to the chipset vendor about it. |
| 2697 | */ | 2526 | */ |
| 2698 | cfg = desc->chip_data; | ||
| 2699 | if (!io_apic_level_ack_pending(cfg)) | 2527 | if (!io_apic_level_ack_pending(cfg)) |
| 2700 | move_masked_irq(irq); | 2528 | move_masked_irq(irq); |
| 2701 | unmask_IO_APIC_irq_desc(desc); | 2529 | unmask_ioapic(cfg); |
| 2702 | } | 2530 | } |
| 2703 | } | 2531 | } |
| 2704 | 2532 | ||
| 2705 | #ifdef CONFIG_INTR_REMAP | 2533 | #ifdef CONFIG_INTR_REMAP |
| 2706 | static void ir_ack_apic_edge(unsigned int irq) | 2534 | static void ir_ack_apic_edge(struct irq_data *data) |
| 2707 | { | 2535 | { |
| 2708 | ack_APIC_irq(); | 2536 | ack_APIC_irq(); |
| 2709 | } | 2537 | } |
| 2710 | 2538 | ||
| 2711 | static void ir_ack_apic_level(unsigned int irq) | 2539 | static void ir_ack_apic_level(struct irq_data *data) |
| 2712 | { | 2540 | { |
| 2713 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 2714 | |||
| 2715 | ack_APIC_irq(); | 2541 | ack_APIC_irq(); |
| 2716 | eoi_ioapic_irq(desc); | 2542 | eoi_ioapic_irq(data->irq, data->chip_data); |
| 2717 | } | 2543 | } |
| 2718 | #endif /* CONFIG_INTR_REMAP */ | 2544 | #endif /* CONFIG_INTR_REMAP */ |
| 2719 | 2545 | ||
| 2720 | static struct irq_chip ioapic_chip __read_mostly = { | 2546 | static struct irq_chip ioapic_chip __read_mostly = { |
| 2721 | .name = "IO-APIC", | 2547 | .name = "IO-APIC", |
| 2722 | .startup = startup_ioapic_irq, | 2548 | .irq_startup = startup_ioapic_irq, |
| 2723 | .mask = mask_IO_APIC_irq, | 2549 | .irq_mask = mask_ioapic_irq, |
| 2724 | .unmask = unmask_IO_APIC_irq, | 2550 | .irq_unmask = unmask_ioapic_irq, |
| 2725 | .ack = ack_apic_edge, | 2551 | .irq_ack = ack_apic_edge, |
| 2726 | .eoi = ack_apic_level, | 2552 | .irq_eoi = ack_apic_level, |
| 2727 | #ifdef CONFIG_SMP | 2553 | #ifdef CONFIG_SMP |
| 2728 | .set_affinity = set_ioapic_affinity_irq, | 2554 | .irq_set_affinity = ioapic_set_affinity, |
| 2729 | #endif | 2555 | #endif |
| 2730 | .retrigger = ioapic_retrigger_irq, | 2556 | .irq_retrigger = ioapic_retrigger_irq, |
| 2731 | }; | 2557 | }; |
| 2732 | 2558 | ||
| 2733 | static struct irq_chip ir_ioapic_chip __read_mostly = { | 2559 | static struct irq_chip ir_ioapic_chip __read_mostly = { |
| 2734 | .name = "IR-IO-APIC", | 2560 | .name = "IR-IO-APIC", |
| 2735 | .startup = startup_ioapic_irq, | 2561 | .irq_startup = startup_ioapic_irq, |
| 2736 | .mask = mask_IO_APIC_irq, | 2562 | .irq_mask = mask_ioapic_irq, |
| 2737 | .unmask = unmask_IO_APIC_irq, | 2563 | .irq_unmask = unmask_ioapic_irq, |
| 2738 | #ifdef CONFIG_INTR_REMAP | 2564 | #ifdef CONFIG_INTR_REMAP |
| 2739 | .ack = ir_ack_apic_edge, | 2565 | .irq_ack = ir_ack_apic_edge, |
| 2740 | .eoi = ir_ack_apic_level, | 2566 | .irq_eoi = ir_ack_apic_level, |
| 2741 | #ifdef CONFIG_SMP | 2567 | #ifdef CONFIG_SMP |
| 2742 | .set_affinity = set_ir_ioapic_affinity_irq, | 2568 | .irq_set_affinity = ir_ioapic_set_affinity, |
| 2743 | #endif | 2569 | #endif |
| 2744 | #endif | 2570 | #endif |
| 2745 | .retrigger = ioapic_retrigger_irq, | 2571 | .irq_retrigger = ioapic_retrigger_irq, |
| 2746 | }; | 2572 | }; |
| 2747 | 2573 | ||
| 2748 | static inline void init_IO_APIC_traps(void) | 2574 | static inline void init_IO_APIC_traps(void) |
| 2749 | { | 2575 | { |
| 2750 | int irq; | ||
| 2751 | struct irq_desc *desc; | ||
| 2752 | struct irq_cfg *cfg; | 2576 | struct irq_cfg *cfg; |
| 2577 | unsigned int irq; | ||
| 2753 | 2578 | ||
| 2754 | /* | 2579 | /* |
| 2755 | * NOTE! The local APIC isn't very good at handling | 2580 | * NOTE! The local APIC isn't very good at handling |
| @@ -2762,8 +2587,8 @@ static inline void init_IO_APIC_traps(void) | |||
| 2762 | * Also, we've got to be careful not to trash gate | 2587 | * Also, we've got to be careful not to trash gate |
| 2763 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | 2588 | * 0x80, because int 0x80 is hm, kind of importantish. ;) |
| 2764 | */ | 2589 | */ |
| 2765 | for_each_irq_desc(irq, desc) { | 2590 | for_each_active_irq(irq) { |
| 2766 | cfg = desc->chip_data; | 2591 | cfg = get_irq_chip_data(irq); |
| 2767 | if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { | 2592 | if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { |
| 2768 | /* | 2593 | /* |
| 2769 | * Hmm.. We don't have an entry for this, | 2594 | * Hmm.. We don't have an entry for this, |
| @@ -2774,7 +2599,7 @@ static inline void init_IO_APIC_traps(void) | |||
| 2774 | legacy_pic->make_irq(irq); | 2599 | legacy_pic->make_irq(irq); |
| 2775 | else | 2600 | else |
| 2776 | /* Strange. Oh, well.. */ | 2601 | /* Strange. Oh, well.. */ |
| 2777 | desc->chip = &no_irq_chip; | 2602 | set_irq_chip(irq, &no_irq_chip); |
| 2778 | } | 2603 | } |
| 2779 | } | 2604 | } |
| 2780 | } | 2605 | } |
| @@ -2783,7 +2608,7 @@ static inline void init_IO_APIC_traps(void) | |||
| 2783 | * The local APIC irq-chip implementation: | 2608 | * The local APIC irq-chip implementation: |
| 2784 | */ | 2609 | */ |
| 2785 | 2610 | ||
| 2786 | static void mask_lapic_irq(unsigned int irq) | 2611 | static void mask_lapic_irq(struct irq_data *data) |
| 2787 | { | 2612 | { |
| 2788 | unsigned long v; | 2613 | unsigned long v; |
| 2789 | 2614 | ||
| @@ -2791,7 +2616,7 @@ static void mask_lapic_irq(unsigned int irq) | |||
| 2791 | apic_write(APIC_LVT0, v | APIC_LVT_MASKED); | 2616 | apic_write(APIC_LVT0, v | APIC_LVT_MASKED); |
| 2792 | } | 2617 | } |
| 2793 | 2618 | ||
| 2794 | static void unmask_lapic_irq(unsigned int irq) | 2619 | static void unmask_lapic_irq(struct irq_data *data) |
| 2795 | { | 2620 | { |
| 2796 | unsigned long v; | 2621 | unsigned long v; |
| 2797 | 2622 | ||
| @@ -2799,21 +2624,21 @@ static void unmask_lapic_irq(unsigned int irq) | |||
| 2799 | apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); | 2624 | apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); |
| 2800 | } | 2625 | } |
| 2801 | 2626 | ||
| 2802 | static void ack_lapic_irq(unsigned int irq) | 2627 | static void ack_lapic_irq(struct irq_data *data) |
| 2803 | { | 2628 | { |
| 2804 | ack_APIC_irq(); | 2629 | ack_APIC_irq(); |
| 2805 | } | 2630 | } |
| 2806 | 2631 | ||
| 2807 | static struct irq_chip lapic_chip __read_mostly = { | 2632 | static struct irq_chip lapic_chip __read_mostly = { |
| 2808 | .name = "local-APIC", | 2633 | .name = "local-APIC", |
| 2809 | .mask = mask_lapic_irq, | 2634 | .irq_mask = mask_lapic_irq, |
| 2810 | .unmask = unmask_lapic_irq, | 2635 | .irq_unmask = unmask_lapic_irq, |
| 2811 | .ack = ack_lapic_irq, | 2636 | .irq_ack = ack_lapic_irq, |
| 2812 | }; | 2637 | }; |
| 2813 | 2638 | ||
| 2814 | static void lapic_register_intr(int irq, struct irq_desc *desc) | 2639 | static void lapic_register_intr(int irq) |
| 2815 | { | 2640 | { |
| 2816 | desc->status &= ~IRQ_LEVEL; | 2641 | irq_clear_status_flags(irq, IRQ_LEVEL); |
| 2817 | set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, | 2642 | set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, |
| 2818 | "edge"); | 2643 | "edge"); |
| 2819 | } | 2644 | } |
| @@ -2916,8 +2741,7 @@ int timer_through_8259 __initdata; | |||
| 2916 | */ | 2741 | */ |
| 2917 | static inline void __init check_timer(void) | 2742 | static inline void __init check_timer(void) |
| 2918 | { | 2743 | { |
| 2919 | struct irq_desc *desc = irq_to_desc(0); | 2744 | struct irq_cfg *cfg = get_irq_chip_data(0); |
| 2920 | struct irq_cfg *cfg = desc->chip_data; | ||
| 2921 | int node = cpu_to_node(0); | 2745 | int node = cpu_to_node(0); |
| 2922 | int apic1, pin1, apic2, pin2; | 2746 | int apic1, pin1, apic2, pin2; |
| 2923 | unsigned long flags; | 2747 | unsigned long flags; |
| @@ -2928,7 +2752,7 @@ static inline void __init check_timer(void) | |||
| 2928 | /* | 2752 | /* |
| 2929 | * get/set the timer IRQ vector: | 2753 | * get/set the timer IRQ vector: |
| 2930 | */ | 2754 | */ |
| 2931 | legacy_pic->chip->mask(0); | 2755 | legacy_pic->mask(0); |
| 2932 | assign_irq_vector(0, cfg, apic->target_cpus()); | 2756 | assign_irq_vector(0, cfg, apic->target_cpus()); |
| 2933 | 2757 | ||
| 2934 | /* | 2758 | /* |
| @@ -2987,7 +2811,7 @@ static inline void __init check_timer(void) | |||
| 2987 | add_pin_to_irq_node(cfg, node, apic1, pin1); | 2811 | add_pin_to_irq_node(cfg, node, apic1, pin1); |
| 2988 | setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); | 2812 | setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); |
| 2989 | } else { | 2813 | } else { |
| 2990 | /* for edge trigger, setup_IO_APIC_irq already | 2814 | /* for edge trigger, setup_ioapic_irq already |
| 2991 | * leave it unmasked. | 2815 | * leave it unmasked. |
| 2992 | * so only need to unmask if it is level-trigger | 2816 | * so only need to unmask if it is level-trigger |
| 2993 | * do we really have level trigger timer? | 2817 | * do we really have level trigger timer? |
| @@ -2995,12 +2819,12 @@ static inline void __init check_timer(void) | |||
| 2995 | int idx; | 2819 | int idx; |
| 2996 | idx = find_irq_entry(apic1, pin1, mp_INT); | 2820 | idx = find_irq_entry(apic1, pin1, mp_INT); |
| 2997 | if (idx != -1 && irq_trigger(idx)) | 2821 | if (idx != -1 && irq_trigger(idx)) |
| 2998 | unmask_IO_APIC_irq_desc(desc); | 2822 | unmask_ioapic(cfg); |
| 2999 | } | 2823 | } |
| 3000 | if (timer_irq_works()) { | 2824 | if (timer_irq_works()) { |
| 3001 | if (nmi_watchdog == NMI_IO_APIC) { | 2825 | if (nmi_watchdog == NMI_IO_APIC) { |
| 3002 | setup_nmi(); | 2826 | setup_nmi(); |
| 3003 | legacy_pic->chip->unmask(0); | 2827 | legacy_pic->unmask(0); |
| 3004 | } | 2828 | } |
| 3005 | if (disable_timer_pin_1 > 0) | 2829 | if (disable_timer_pin_1 > 0) |
| 3006 | clear_IO_APIC_pin(0, pin1); | 2830 | clear_IO_APIC_pin(0, pin1); |
| @@ -3023,14 +2847,14 @@ static inline void __init check_timer(void) | |||
| 3023 | */ | 2847 | */ |
| 3024 | replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); | 2848 | replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); |
| 3025 | setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); | 2849 | setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); |
| 3026 | legacy_pic->chip->unmask(0); | 2850 | legacy_pic->unmask(0); |
| 3027 | if (timer_irq_works()) { | 2851 | if (timer_irq_works()) { |
| 3028 | apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); | 2852 | apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); |
| 3029 | timer_through_8259 = 1; | 2853 | timer_through_8259 = 1; |
| 3030 | if (nmi_watchdog == NMI_IO_APIC) { | 2854 | if (nmi_watchdog == NMI_IO_APIC) { |
| 3031 | legacy_pic->chip->mask(0); | 2855 | legacy_pic->mask(0); |
| 3032 | setup_nmi(); | 2856 | setup_nmi(); |
| 3033 | legacy_pic->chip->unmask(0); | 2857 | legacy_pic->unmask(0); |
| 3034 | } | 2858 | } |
| 3035 | goto out; | 2859 | goto out; |
| 3036 | } | 2860 | } |
| @@ -3038,7 +2862,7 @@ static inline void __init check_timer(void) | |||
| 3038 | * Cleanup, just in case ... | 2862 | * Cleanup, just in case ... |
| 3039 | */ | 2863 | */ |
| 3040 | local_irq_disable(); | 2864 | local_irq_disable(); |
| 3041 | legacy_pic->chip->mask(0); | 2865 | legacy_pic->mask(0); |
| 3042 | clear_IO_APIC_pin(apic2, pin2); | 2866 | clear_IO_APIC_pin(apic2, pin2); |
| 3043 | apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); | 2867 | apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); |
| 3044 | } | 2868 | } |
| @@ -3055,16 +2879,16 @@ static inline void __init check_timer(void) | |||
| 3055 | apic_printk(APIC_QUIET, KERN_INFO | 2879 | apic_printk(APIC_QUIET, KERN_INFO |
| 3056 | "...trying to set up timer as Virtual Wire IRQ...\n"); | 2880 | "...trying to set up timer as Virtual Wire IRQ...\n"); |
| 3057 | 2881 | ||
| 3058 | lapic_register_intr(0, desc); | 2882 | lapic_register_intr(0); |
| 3059 | apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ | 2883 | apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ |
| 3060 | legacy_pic->chip->unmask(0); | 2884 | legacy_pic->unmask(0); |
| 3061 | 2885 | ||
| 3062 | if (timer_irq_works()) { | 2886 | if (timer_irq_works()) { |
| 3063 | apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); | 2887 | apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); |
| 3064 | goto out; | 2888 | goto out; |
| 3065 | } | 2889 | } |
| 3066 | local_irq_disable(); | 2890 | local_irq_disable(); |
| 3067 | legacy_pic->chip->mask(0); | 2891 | legacy_pic->mask(0); |
| 3068 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); | 2892 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); |
| 3069 | apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); | 2893 | apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); |
| 3070 | 2894 | ||
| @@ -3230,44 +3054,37 @@ device_initcall(ioapic_init_sysfs); | |||
| 3230 | /* | 3054 | /* |
| 3231 | * Dynamic irq allocate and deallocation | 3055 | * Dynamic irq allocate and deallocation |
| 3232 | */ | 3056 | */ |
| 3233 | unsigned int create_irq_nr(unsigned int irq_want, int node) | 3057 | unsigned int create_irq_nr(unsigned int from, int node) |
| 3234 | { | 3058 | { |
| 3235 | /* Allocate an unused irq */ | 3059 | struct irq_cfg *cfg; |
| 3236 | unsigned int irq; | ||
| 3237 | unsigned int new; | ||
| 3238 | unsigned long flags; | 3060 | unsigned long flags; |
| 3239 | struct irq_cfg *cfg_new = NULL; | 3061 | unsigned int ret = 0; |
| 3240 | struct irq_desc *desc_new = NULL; | 3062 | int irq; |
| 3241 | |||
| 3242 | irq = 0; | ||
| 3243 | if (irq_want < nr_irqs_gsi) | ||
| 3244 | irq_want = nr_irqs_gsi; | ||
| 3245 | |||
| 3246 | raw_spin_lock_irqsave(&vector_lock, flags); | ||
| 3247 | for (new = irq_want; new < nr_irqs; new++) { | ||
| 3248 | desc_new = irq_to_desc_alloc_node(new, node); | ||
| 3249 | if (!desc_new) { | ||
| 3250 | printk(KERN_INFO "can not get irq_desc for %d\n", new); | ||
| 3251 | continue; | ||
| 3252 | } | ||
| 3253 | cfg_new = desc_new->chip_data; | ||
| 3254 | |||
| 3255 | if (cfg_new->vector != 0) | ||
| 3256 | continue; | ||
| 3257 | 3063 | ||
| 3258 | desc_new = move_irq_desc(desc_new, node); | 3064 | if (from < nr_irqs_gsi) |
| 3259 | cfg_new = desc_new->chip_data; | 3065 | from = nr_irqs_gsi; |
| 3260 | 3066 | ||
| 3261 | if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) | 3067 | irq = alloc_irq_from(from, node); |
| 3262 | irq = new; | 3068 | if (irq < 0) |
| 3263 | break; | 3069 | return 0; |
| 3070 | cfg = alloc_irq_cfg(irq, node); | ||
| 3071 | if (!cfg) { | ||
| 3072 | free_irq_at(irq, NULL); | ||
| 3073 | return 0; | ||
| 3264 | } | 3074 | } |
| 3265 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
| 3266 | 3075 | ||
| 3267 | if (irq > 0) | 3076 | raw_spin_lock_irqsave(&vector_lock, flags); |
| 3268 | dynamic_irq_init_keep_chip_data(irq); | 3077 | if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) |
| 3078 | ret = irq; | ||
| 3079 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
| 3269 | 3080 | ||
| 3270 | return irq; | 3081 | if (ret) { |
| 3082 | set_irq_chip_data(irq, cfg); | ||
| 3083 | irq_clear_status_flags(irq, IRQ_NOREQUEST); | ||
| 3084 | } else { | ||
| 3085 | free_irq_at(irq, cfg); | ||
| 3086 | } | ||
| 3087 | return ret; | ||
| 3271 | } | 3088 | } |
| 3272 | 3089 | ||
| 3273 | int create_irq(void) | 3090 | int create_irq(void) |
| @@ -3287,14 +3104,17 @@ int create_irq(void) | |||
| 3287 | 3104 | ||
| 3288 | void destroy_irq(unsigned int irq) | 3105 | void destroy_irq(unsigned int irq) |
| 3289 | { | 3106 | { |
| 3107 | struct irq_cfg *cfg = get_irq_chip_data(irq); | ||
| 3290 | unsigned long flags; | 3108 | unsigned long flags; |
| 3291 | 3109 | ||
| 3292 | dynamic_irq_cleanup_keep_chip_data(irq); | 3110 | irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); |
| 3293 | 3111 | ||
| 3294 | free_irte(irq); | 3112 | if (intr_remapping_enabled) |
| 3113 | free_irte(irq); | ||
| 3295 | raw_spin_lock_irqsave(&vector_lock, flags); | 3114 | raw_spin_lock_irqsave(&vector_lock, flags); |
| 3296 | __clear_irq_vector(irq, get_irq_chip_data(irq)); | 3115 | __clear_irq_vector(irq, cfg); |
| 3297 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 3116 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
| 3117 | free_irq_at(irq, cfg); | ||
| 3298 | } | 3118 | } |
| 3299 | 3119 | ||
| 3300 | /* | 3120 | /* |
| @@ -3318,7 +3138,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | |||
| 3318 | 3138 | ||
| 3319 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); | 3139 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); |
| 3320 | 3140 | ||
| 3321 | if (irq_remapped(irq)) { | 3141 | if (irq_remapped(get_irq_chip_data(irq))) { |
| 3322 | struct irte irte; | 3142 | struct irte irte; |
| 3323 | int ir_index; | 3143 | int ir_index; |
| 3324 | u16 sub_handle; | 3144 | u16 sub_handle; |
| @@ -3371,26 +3191,24 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | |||
| 3371 | } | 3191 | } |
| 3372 | 3192 | ||
| 3373 | #ifdef CONFIG_SMP | 3193 | #ifdef CONFIG_SMP |
| 3374 | static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | 3194 | static int |
| 3195 | msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) | ||
| 3375 | { | 3196 | { |
| 3376 | struct irq_desc *desc = irq_to_desc(irq); | 3197 | struct irq_cfg *cfg = data->chip_data; |
| 3377 | struct irq_cfg *cfg; | ||
| 3378 | struct msi_msg msg; | 3198 | struct msi_msg msg; |
| 3379 | unsigned int dest; | 3199 | unsigned int dest; |
| 3380 | 3200 | ||
| 3381 | if (set_desc_affinity(desc, mask, &dest)) | 3201 | if (__ioapic_set_affinity(data, mask, &dest)) |
| 3382 | return -1; | 3202 | return -1; |
| 3383 | 3203 | ||
| 3384 | cfg = desc->chip_data; | 3204 | __get_cached_msi_msg(data->msi_desc, &msg); |
| 3385 | |||
| 3386 | get_cached_msi_msg_desc(desc, &msg); | ||
| 3387 | 3205 | ||
| 3388 | msg.data &= ~MSI_DATA_VECTOR_MASK; | 3206 | msg.data &= ~MSI_DATA_VECTOR_MASK; |
| 3389 | msg.data |= MSI_DATA_VECTOR(cfg->vector); | 3207 | msg.data |= MSI_DATA_VECTOR(cfg->vector); |
| 3390 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; | 3208 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; |
| 3391 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3209 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
| 3392 | 3210 | ||
| 3393 | write_msi_msg_desc(desc, &msg); | 3211 | __write_msi_msg(data->msi_desc, &msg); |
| 3394 | 3212 | ||
| 3395 | return 0; | 3213 | return 0; |
| 3396 | } | 3214 | } |
| @@ -3400,17 +3218,17 @@ static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
| 3400 | * done in the process context using interrupt-remapping hardware. | 3218 | * done in the process context using interrupt-remapping hardware. |
| 3401 | */ | 3219 | */ |
| 3402 | static int | 3220 | static int |
| 3403 | ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | 3221 | ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, |
| 3222 | bool force) | ||
| 3404 | { | 3223 | { |
| 3405 | struct irq_desc *desc = irq_to_desc(irq); | 3224 | struct irq_cfg *cfg = data->chip_data; |
| 3406 | struct irq_cfg *cfg = desc->chip_data; | 3225 | unsigned int dest, irq = data->irq; |
| 3407 | unsigned int dest; | ||
| 3408 | struct irte irte; | 3226 | struct irte irte; |
| 3409 | 3227 | ||
| 3410 | if (get_irte(irq, &irte)) | 3228 | if (get_irte(irq, &irte)) |
| 3411 | return -1; | 3229 | return -1; |
| 3412 | 3230 | ||
| 3413 | if (set_desc_affinity(desc, mask, &dest)) | 3231 | if (__ioapic_set_affinity(data, mask, &dest)) |
| 3414 | return -1; | 3232 | return -1; |
| 3415 | 3233 | ||
| 3416 | irte.vector = cfg->vector; | 3234 | irte.vector = cfg->vector; |
| @@ -3440,27 +3258,27 @@ ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
| 3440 | * which implement the MSI or MSI-X Capability Structure. | 3258 | * which implement the MSI or MSI-X Capability Structure. |
| 3441 | */ | 3259 | */ |
| 3442 | static struct irq_chip msi_chip = { | 3260 | static struct irq_chip msi_chip = { |
| 3443 | .name = "PCI-MSI", | 3261 | .name = "PCI-MSI", |
| 3444 | .unmask = unmask_msi_irq, | 3262 | .irq_unmask = unmask_msi_irq, |
| 3445 | .mask = mask_msi_irq, | 3263 | .irq_mask = mask_msi_irq, |
| 3446 | .ack = ack_apic_edge, | 3264 | .irq_ack = ack_apic_edge, |
| 3447 | #ifdef CONFIG_SMP | 3265 | #ifdef CONFIG_SMP |
| 3448 | .set_affinity = set_msi_irq_affinity, | 3266 | .irq_set_affinity = msi_set_affinity, |
| 3449 | #endif | 3267 | #endif |
| 3450 | .retrigger = ioapic_retrigger_irq, | 3268 | .irq_retrigger = ioapic_retrigger_irq, |
| 3451 | }; | 3269 | }; |
| 3452 | 3270 | ||
| 3453 | static struct irq_chip msi_ir_chip = { | 3271 | static struct irq_chip msi_ir_chip = { |
| 3454 | .name = "IR-PCI-MSI", | 3272 | .name = "IR-PCI-MSI", |
| 3455 | .unmask = unmask_msi_irq, | 3273 | .irq_unmask = unmask_msi_irq, |
| 3456 | .mask = mask_msi_irq, | 3274 | .irq_mask = mask_msi_irq, |
| 3457 | #ifdef CONFIG_INTR_REMAP | 3275 | #ifdef CONFIG_INTR_REMAP |
| 3458 | .ack = ir_ack_apic_edge, | 3276 | .irq_ack = ir_ack_apic_edge, |
| 3459 | #ifdef CONFIG_SMP | 3277 | #ifdef CONFIG_SMP |
| 3460 | .set_affinity = ir_set_msi_irq_affinity, | 3278 | .irq_set_affinity = ir_msi_set_affinity, |
| 3461 | #endif | 3279 | #endif |
| 3462 | #endif | 3280 | #endif |
| 3463 | .retrigger = ioapic_retrigger_irq, | 3281 | .irq_retrigger = ioapic_retrigger_irq, |
| 3464 | }; | 3282 | }; |
| 3465 | 3283 | ||
| 3466 | /* | 3284 | /* |
| @@ -3492,8 +3310,8 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) | |||
| 3492 | 3310 | ||
| 3493 | static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | 3311 | static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) |
| 3494 | { | 3312 | { |
| 3495 | int ret; | ||
| 3496 | struct msi_msg msg; | 3313 | struct msi_msg msg; |
| 3314 | int ret; | ||
| 3497 | 3315 | ||
| 3498 | ret = msi_compose_msg(dev, irq, &msg, -1); | 3316 | ret = msi_compose_msg(dev, irq, &msg, -1); |
| 3499 | if (ret < 0) | 3317 | if (ret < 0) |
| @@ -3502,12 +3320,8 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | |||
| 3502 | set_irq_msi(irq, msidesc); | 3320 | set_irq_msi(irq, msidesc); |
| 3503 | write_msi_msg(irq, &msg); | 3321 | write_msi_msg(irq, &msg); |
| 3504 | 3322 | ||
| 3505 | if (irq_remapped(irq)) { | 3323 | if (irq_remapped(get_irq_chip_data(irq))) { |
| 3506 | struct irq_desc *desc = irq_to_desc(irq); | 3324 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
| 3507 | /* | ||
| 3508 | * irq migration in process context | ||
| 3509 | */ | ||
| 3510 | desc->status |= IRQ_MOVE_PCNTXT; | ||
| 3511 | set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); | 3325 | set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); |
| 3512 | } else | 3326 | } else |
| 3513 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); | 3327 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); |
| @@ -3519,13 +3333,10 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | |||
| 3519 | 3333 | ||
| 3520 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | 3334 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
| 3521 | { | 3335 | { |
| 3522 | unsigned int irq; | 3336 | int node, ret, sub_handle, index = 0; |
| 3523 | int ret, sub_handle; | 3337 | unsigned int irq, irq_want; |
| 3524 | struct msi_desc *msidesc; | 3338 | struct msi_desc *msidesc; |
| 3525 | unsigned int irq_want; | ||
| 3526 | struct intel_iommu *iommu = NULL; | 3339 | struct intel_iommu *iommu = NULL; |
| 3527 | int index = 0; | ||
| 3528 | int node; | ||
| 3529 | 3340 | ||
| 3530 | /* x86 doesn't support multiple MSI yet */ | 3341 | /* x86 doesn't support multiple MSI yet */ |
| 3531 | if (type == PCI_CAP_ID_MSI && nvec > 1) | 3342 | if (type == PCI_CAP_ID_MSI && nvec > 1) |
| @@ -3585,18 +3396,17 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
| 3585 | 3396 | ||
| 3586 | #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) | 3397 | #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) |
| 3587 | #ifdef CONFIG_SMP | 3398 | #ifdef CONFIG_SMP |
| 3588 | static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | 3399 | static int |
| 3400 | dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, | ||
| 3401 | bool force) | ||
| 3589 | { | 3402 | { |
| 3590 | struct irq_desc *desc = irq_to_desc(irq); | 3403 | struct irq_cfg *cfg = data->chip_data; |
| 3591 | struct irq_cfg *cfg; | 3404 | unsigned int dest, irq = data->irq; |
| 3592 | struct msi_msg msg; | 3405 | struct msi_msg msg; |
| 3593 | unsigned int dest; | ||
| 3594 | 3406 | ||
| 3595 | if (set_desc_affinity(desc, mask, &dest)) | 3407 | if (__ioapic_set_affinity(data, mask, &dest)) |
| 3596 | return -1; | 3408 | return -1; |
| 3597 | 3409 | ||
| 3598 | cfg = desc->chip_data; | ||
| 3599 | |||
| 3600 | dmar_msi_read(irq, &msg); | 3410 | dmar_msi_read(irq, &msg); |
| 3601 | 3411 | ||
| 3602 | msg.data &= ~MSI_DATA_VECTOR_MASK; | 3412 | msg.data &= ~MSI_DATA_VECTOR_MASK; |
| @@ -3612,14 +3422,14 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
| 3612 | #endif /* CONFIG_SMP */ | 3422 | #endif /* CONFIG_SMP */ |
| 3613 | 3423 | ||
| 3614 | static struct irq_chip dmar_msi_type = { | 3424 | static struct irq_chip dmar_msi_type = { |
| 3615 | .name = "DMAR_MSI", | 3425 | .name = "DMAR_MSI", |
| 3616 | .unmask = dmar_msi_unmask, | 3426 | .irq_unmask = dmar_msi_unmask, |
| 3617 | .mask = dmar_msi_mask, | 3427 | .irq_mask = dmar_msi_mask, |
| 3618 | .ack = ack_apic_edge, | 3428 | .irq_ack = ack_apic_edge, |
| 3619 | #ifdef CONFIG_SMP | 3429 | #ifdef CONFIG_SMP |
| 3620 | .set_affinity = dmar_msi_set_affinity, | 3430 | .irq_set_affinity = dmar_msi_set_affinity, |
| 3621 | #endif | 3431 | #endif |
| 3622 | .retrigger = ioapic_retrigger_irq, | 3432 | .irq_retrigger = ioapic_retrigger_irq, |
| 3623 | }; | 3433 | }; |
| 3624 | 3434 | ||
| 3625 | int arch_setup_dmar_msi(unsigned int irq) | 3435 | int arch_setup_dmar_msi(unsigned int irq) |
| @@ -3640,26 +3450,24 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
| 3640 | #ifdef CONFIG_HPET_TIMER | 3450 | #ifdef CONFIG_HPET_TIMER |
| 3641 | 3451 | ||
| 3642 | #ifdef CONFIG_SMP | 3452 | #ifdef CONFIG_SMP |
| 3643 | static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | 3453 | static int hpet_msi_set_affinity(struct irq_data *data, |
| 3454 | const struct cpumask *mask, bool force) | ||
| 3644 | { | 3455 | { |
| 3645 | struct irq_desc *desc = irq_to_desc(irq); | 3456 | struct irq_cfg *cfg = data->chip_data; |
| 3646 | struct irq_cfg *cfg; | ||
| 3647 | struct msi_msg msg; | 3457 | struct msi_msg msg; |
| 3648 | unsigned int dest; | 3458 | unsigned int dest; |
| 3649 | 3459 | ||
| 3650 | if (set_desc_affinity(desc, mask, &dest)) | 3460 | if (__ioapic_set_affinity(data, mask, &dest)) |
| 3651 | return -1; | 3461 | return -1; |
| 3652 | 3462 | ||
| 3653 | cfg = desc->chip_data; | 3463 | hpet_msi_read(data->handler_data, &msg); |
| 3654 | |||
| 3655 | hpet_msi_read(irq, &msg); | ||
| 3656 | 3464 | ||
| 3657 | msg.data &= ~MSI_DATA_VECTOR_MASK; | 3465 | msg.data &= ~MSI_DATA_VECTOR_MASK; |
| 3658 | msg.data |= MSI_DATA_VECTOR(cfg->vector); | 3466 | msg.data |= MSI_DATA_VECTOR(cfg->vector); |
| 3659 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; | 3467 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; |
| 3660 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3468 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
| 3661 | 3469 | ||
| 3662 | hpet_msi_write(irq, &msg); | 3470 | hpet_msi_write(data->handler_data, &msg); |
| 3663 | 3471 | ||
| 3664 | return 0; | 3472 | return 0; |
| 3665 | } | 3473 | } |
| @@ -3667,34 +3475,33 @@ static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
| 3667 | #endif /* CONFIG_SMP */ | 3475 | #endif /* CONFIG_SMP */ |
| 3668 | 3476 | ||
| 3669 | static struct irq_chip ir_hpet_msi_type = { | 3477 | static struct irq_chip ir_hpet_msi_type = { |
| 3670 | .name = "IR-HPET_MSI", | 3478 | .name = "IR-HPET_MSI", |
| 3671 | .unmask = hpet_msi_unmask, | 3479 | .irq_unmask = hpet_msi_unmask, |
| 3672 | .mask = hpet_msi_mask, | 3480 | .irq_mask = hpet_msi_mask, |
| 3673 | #ifdef CONFIG_INTR_REMAP | 3481 | #ifdef CONFIG_INTR_REMAP |
| 3674 | .ack = ir_ack_apic_edge, | 3482 | .irq_ack = ir_ack_apic_edge, |
| 3675 | #ifdef CONFIG_SMP | 3483 | #ifdef CONFIG_SMP |
| 3676 | .set_affinity = ir_set_msi_irq_affinity, | 3484 | .irq_set_affinity = ir_msi_set_affinity, |
| 3677 | #endif | 3485 | #endif |
| 3678 | #endif | 3486 | #endif |
| 3679 | .retrigger = ioapic_retrigger_irq, | 3487 | .irq_retrigger = ioapic_retrigger_irq, |
| 3680 | }; | 3488 | }; |
| 3681 | 3489 | ||
| 3682 | static struct irq_chip hpet_msi_type = { | 3490 | static struct irq_chip hpet_msi_type = { |
| 3683 | .name = "HPET_MSI", | 3491 | .name = "HPET_MSI", |
| 3684 | .unmask = hpet_msi_unmask, | 3492 | .irq_unmask = hpet_msi_unmask, |
| 3685 | .mask = hpet_msi_mask, | 3493 | .irq_mask = hpet_msi_mask, |
| 3686 | .ack = ack_apic_edge, | 3494 | .irq_ack = ack_apic_edge, |
| 3687 | #ifdef CONFIG_SMP | 3495 | #ifdef CONFIG_SMP |
| 3688 | .set_affinity = hpet_msi_set_affinity, | 3496 | .irq_set_affinity = hpet_msi_set_affinity, |
| 3689 | #endif | 3497 | #endif |
| 3690 | .retrigger = ioapic_retrigger_irq, | 3498 | .irq_retrigger = ioapic_retrigger_irq, |
| 3691 | }; | 3499 | }; |
| 3692 | 3500 | ||
| 3693 | int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | 3501 | int arch_setup_hpet_msi(unsigned int irq, unsigned int id) |
| 3694 | { | 3502 | { |
| 3695 | int ret; | ||
| 3696 | struct msi_msg msg; | 3503 | struct msi_msg msg; |
| 3697 | struct irq_desc *desc = irq_to_desc(irq); | 3504 | int ret; |
| 3698 | 3505 | ||
| 3699 | if (intr_remapping_enabled) { | 3506 | if (intr_remapping_enabled) { |
| 3700 | struct intel_iommu *iommu = map_hpet_to_ir(id); | 3507 | struct intel_iommu *iommu = map_hpet_to_ir(id); |
| @@ -3712,9 +3519,9 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | |||
| 3712 | if (ret < 0) | 3519 | if (ret < 0) |
| 3713 | return ret; | 3520 | return ret; |
| 3714 | 3521 | ||
| 3715 | hpet_msi_write(irq, &msg); | 3522 | hpet_msi_write(get_irq_data(irq), &msg); |
| 3716 | desc->status |= IRQ_MOVE_PCNTXT; | 3523 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
| 3717 | if (irq_remapped(irq)) | 3524 | if (irq_remapped(get_irq_chip_data(irq))) |
| 3718 | set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, | 3525 | set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, |
| 3719 | handle_edge_irq, "edge"); | 3526 | handle_edge_irq, "edge"); |
| 3720 | else | 3527 | else |
| @@ -3747,33 +3554,30 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
| 3747 | write_ht_irq_msg(irq, &msg); | 3554 | write_ht_irq_msg(irq, &msg); |
| 3748 | } | 3555 | } |
| 3749 | 3556 | ||
| 3750 | static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) | 3557 | static int |
| 3558 | ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) | ||
| 3751 | { | 3559 | { |
| 3752 | struct irq_desc *desc = irq_to_desc(irq); | 3560 | struct irq_cfg *cfg = data->chip_data; |
| 3753 | struct irq_cfg *cfg; | ||
| 3754 | unsigned int dest; | 3561 | unsigned int dest; |
| 3755 | 3562 | ||
| 3756 | if (set_desc_affinity(desc, mask, &dest)) | 3563 | if (__ioapic_set_affinity(data, mask, &dest)) |
| 3757 | return -1; | 3564 | return -1; |
| 3758 | 3565 | ||
| 3759 | cfg = desc->chip_data; | 3566 | target_ht_irq(data->irq, dest, cfg->vector); |
| 3760 | |||
| 3761 | target_ht_irq(irq, dest, cfg->vector); | ||
| 3762 | |||
| 3763 | return 0; | 3567 | return 0; |
| 3764 | } | 3568 | } |
| 3765 | 3569 | ||
| 3766 | #endif | 3570 | #endif |
| 3767 | 3571 | ||
| 3768 | static struct irq_chip ht_irq_chip = { | 3572 | static struct irq_chip ht_irq_chip = { |
| 3769 | .name = "PCI-HT", | 3573 | .name = "PCI-HT", |
| 3770 | .mask = mask_ht_irq, | 3574 | .irq_mask = mask_ht_irq, |
| 3771 | .unmask = unmask_ht_irq, | 3575 | .irq_unmask = unmask_ht_irq, |
| 3772 | .ack = ack_apic_edge, | 3576 | .irq_ack = ack_apic_edge, |
| 3773 | #ifdef CONFIG_SMP | 3577 | #ifdef CONFIG_SMP |
| 3774 | .set_affinity = set_ht_irq_affinity, | 3578 | .irq_set_affinity = ht_set_affinity, |
| 3775 | #endif | 3579 | #endif |
| 3776 | .retrigger = ioapic_retrigger_irq, | 3580 | .irq_retrigger = ioapic_retrigger_irq, |
| 3777 | }; | 3581 | }; |
| 3778 | 3582 | ||
| 3779 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | 3583 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) |
| @@ -3864,14 +3668,13 @@ int __init arch_probe_nr_irqs(void) | |||
| 3864 | if (nr < nr_irqs) | 3668 | if (nr < nr_irqs) |
| 3865 | nr_irqs = nr; | 3669 | nr_irqs = nr; |
| 3866 | 3670 | ||
| 3867 | return 0; | 3671 | return NR_IRQS_LEGACY; |
| 3868 | } | 3672 | } |
| 3869 | #endif | 3673 | #endif |
| 3870 | 3674 | ||
| 3871 | static int __io_apic_set_pci_routing(struct device *dev, int irq, | 3675 | static int __io_apic_set_pci_routing(struct device *dev, int irq, |
| 3872 | struct io_apic_irq_attr *irq_attr) | 3676 | struct io_apic_irq_attr *irq_attr) |
| 3873 | { | 3677 | { |
| 3874 | struct irq_desc *desc; | ||
| 3875 | struct irq_cfg *cfg; | 3678 | struct irq_cfg *cfg; |
| 3876 | int node; | 3679 | int node; |
| 3877 | int ioapic, pin; | 3680 | int ioapic, pin; |
| @@ -3889,11 +3692,9 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq, | |||
| 3889 | else | 3692 | else |
| 3890 | node = cpu_to_node(0); | 3693 | node = cpu_to_node(0); |
| 3891 | 3694 | ||
| 3892 | desc = irq_to_desc_alloc_node(irq, node); | 3695 | cfg = alloc_irq_and_cfg_at(irq, node); |
| 3893 | if (!desc) { | 3696 | if (!cfg) |
| 3894 | printk(KERN_INFO "can not get irq_desc %d\n", irq); | ||
| 3895 | return 0; | 3697 | return 0; |
| 3896 | } | ||
| 3897 | 3698 | ||
| 3898 | pin = irq_attr->ioapic_pin; | 3699 | pin = irq_attr->ioapic_pin; |
| 3899 | trigger = irq_attr->trigger; | 3700 | trigger = irq_attr->trigger; |
| @@ -3903,15 +3704,14 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq, | |||
| 3903 | * IRQs < 16 are already in the irq_2_pin[] map | 3704 | * IRQs < 16 are already in the irq_2_pin[] map |
| 3904 | */ | 3705 | */ |
| 3905 | if (irq >= legacy_pic->nr_legacy_irqs) { | 3706 | if (irq >= legacy_pic->nr_legacy_irqs) { |
| 3906 | cfg = desc->chip_data; | 3707 | if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) { |
| 3907 | if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { | ||
| 3908 | printk(KERN_INFO "can not add pin %d for irq %d\n", | 3708 | printk(KERN_INFO "can not add pin %d for irq %d\n", |
| 3909 | pin, irq); | 3709 | pin, irq); |
| 3910 | return 0; | 3710 | return 0; |
| 3911 | } | 3711 | } |
| 3912 | } | 3712 | } |
| 3913 | 3713 | ||
| 3914 | setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); | 3714 | setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity); |
| 3915 | 3715 | ||
| 3916 | return 0; | 3716 | return 0; |
| 3917 | } | 3717 | } |
| @@ -4104,14 +3904,14 @@ void __init setup_ioapic_dest(void) | |||
| 4104 | */ | 3904 | */ |
| 4105 | if (desc->status & | 3905 | if (desc->status & |
| 4106 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) | 3906 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) |
| 4107 | mask = desc->affinity; | 3907 | mask = desc->irq_data.affinity; |
| 4108 | else | 3908 | else |
| 4109 | mask = apic->target_cpus(); | 3909 | mask = apic->target_cpus(); |
| 4110 | 3910 | ||
| 4111 | if (intr_remapping_enabled) | 3911 | if (intr_remapping_enabled) |
| 4112 | set_ir_ioapic_affinity_irq_desc(desc, mask); | 3912 | ir_ioapic_set_affinity(&desc->irq_data, mask, false); |
| 4113 | else | 3913 | else |
| 4114 | set_ioapic_affinity_irq_desc(desc, mask); | 3914 | ioapic_set_affinity(&desc->irq_data, mask, false); |
| 4115 | } | 3915 | } |
| 4116 | 3916 | ||
| 4117 | } | 3917 | } |
| @@ -4295,19 +4095,18 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | |||
| 4295 | void __init pre_init_apic_IRQ0(void) | 4095 | void __init pre_init_apic_IRQ0(void) |
| 4296 | { | 4096 | { |
| 4297 | struct irq_cfg *cfg; | 4097 | struct irq_cfg *cfg; |
| 4298 | struct irq_desc *desc; | ||
| 4299 | 4098 | ||
| 4300 | printk(KERN_INFO "Early APIC setup for system timer0\n"); | 4099 | printk(KERN_INFO "Early APIC setup for system timer0\n"); |
| 4301 | #ifndef CONFIG_SMP | 4100 | #ifndef CONFIG_SMP |
| 4302 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); | 4101 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); |
| 4303 | #endif | 4102 | #endif |
| 4304 | desc = irq_to_desc_alloc_node(0, 0); | 4103 | /* Make sure the irq descriptor is set up */ |
| 4104 | cfg = alloc_irq_and_cfg_at(0, 0); | ||
| 4305 | 4105 | ||
| 4306 | setup_local_APIC(); | 4106 | setup_local_APIC(); |
| 4307 | 4107 | ||
| 4308 | cfg = irq_cfg(0); | ||
| 4309 | add_pin_to_irq_node(cfg, 0, 0, 0); | 4108 | add_pin_to_irq_node(cfg, 0, 0, 0); |
| 4310 | set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); | 4109 | set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); |
| 4311 | 4110 | ||
| 4312 | setup_IO_APIC_irq(0, 0, 0, desc, 0, 0); | 4111 | setup_ioapic_irq(0, 0, 0, cfg, 0, 0); |
| 4313 | } | 4112 | } |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index a43f71cb30f..c90041ccb74 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
| @@ -178,7 +178,7 @@ int __init check_nmi_watchdog(void) | |||
| 178 | error: | 178 | error: |
| 179 | if (nmi_watchdog == NMI_IO_APIC) { | 179 | if (nmi_watchdog == NMI_IO_APIC) { |
| 180 | if (!timer_through_8259) | 180 | if (!timer_through_8259) |
| 181 | legacy_pic->chip->mask(0); | 181 | legacy_pic->mask(0); |
| 182 | on_each_cpu(__acpi_nmi_disable, NULL, 1); | 182 | on_each_cpu(__acpi_nmi_disable, NULL, 1); |
| 183 | } | 183 | } |
| 184 | 184 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 39aaee5c1ab..80c482382d5 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
| @@ -131,7 +131,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
| 131 | u32 low = 0, high = 0, address = 0; | 131 | u32 low = 0, high = 0, address = 0; |
| 132 | unsigned int bank, block; | 132 | unsigned int bank, block; |
| 133 | struct thresh_restart tr; | 133 | struct thresh_restart tr; |
| 134 | u8 lvt_off; | 134 | int lvt_off = -1; |
| 135 | u8 offset; | ||
| 135 | 136 | ||
| 136 | for (bank = 0; bank < NR_BANKS; ++bank) { | 137 | for (bank = 0; bank < NR_BANKS; ++bank) { |
| 137 | for (block = 0; block < NR_BLOCKS; ++block) { | 138 | for (block = 0; block < NR_BLOCKS; ++block) { |
| @@ -162,8 +163,28 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
| 162 | if (shared_bank[bank] && c->cpu_core_id) | 163 | if (shared_bank[bank] && c->cpu_core_id) |
| 163 | break; | 164 | break; |
| 164 | #endif | 165 | #endif |
| 165 | lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR, | 166 | offset = (high & MASK_LVTOFF_HI) >> 20; |
| 166 | APIC_EILVT_MSG_FIX, 0); | 167 | if (lvt_off < 0) { |
| 168 | if (setup_APIC_eilvt(offset, | ||
| 169 | THRESHOLD_APIC_VECTOR, | ||
| 170 | APIC_EILVT_MSG_FIX, 0)) { | ||
| 171 | pr_err(FW_BUG "cpu %d, failed to " | ||
| 172 | "setup threshold interrupt " | ||
| 173 | "for bank %d, block %d " | ||
| 174 | "(MSR%08X=0x%x%08x)", | ||
| 175 | smp_processor_id(), bank, block, | ||
| 176 | address, high, low); | ||
| 177 | continue; | ||
| 178 | } | ||
| 179 | lvt_off = offset; | ||
| 180 | } else if (lvt_off != offset) { | ||
| 181 | pr_err(FW_BUG "cpu %d, invalid threshold " | ||
| 182 | "interrupt offset %d for bank %d," | ||
| 183 | "block %d (MSR%08X=0x%x%08x)", | ||
| 184 | smp_processor_id(), lvt_off, bank, | ||
| 185 | block, address, high, low); | ||
| 186 | continue; | ||
| 187 | } | ||
| 167 | 188 | ||
| 168 | high &= ~MASK_LVTOFF_HI; | 189 | high &= ~MASK_LVTOFF_HI; |
| 169 | high |= lvt_off << 20; | 190 | high |= lvt_off << 20; |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 7494999141b..efaf906daf9 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
| @@ -440,9 +440,9 @@ static int hpet_legacy_next_event(unsigned long delta, | |||
| 440 | static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); | 440 | static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); |
| 441 | static struct hpet_dev *hpet_devs; | 441 | static struct hpet_dev *hpet_devs; |
| 442 | 442 | ||
| 443 | void hpet_msi_unmask(unsigned int irq) | 443 | void hpet_msi_unmask(struct irq_data *data) |
| 444 | { | 444 | { |
| 445 | struct hpet_dev *hdev = get_irq_data(irq); | 445 | struct hpet_dev *hdev = data->handler_data; |
| 446 | unsigned int cfg; | 446 | unsigned int cfg; |
| 447 | 447 | ||
| 448 | /* unmask it */ | 448 | /* unmask it */ |
| @@ -451,10 +451,10 @@ void hpet_msi_unmask(unsigned int irq) | |||
| 451 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); | 451 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); |
| 452 | } | 452 | } |
| 453 | 453 | ||
| 454 | void hpet_msi_mask(unsigned int irq) | 454 | void hpet_msi_mask(struct irq_data *data) |
| 455 | { | 455 | { |
| 456 | struct hpet_dev *hdev = data->handler_data; | ||
| 456 | unsigned int cfg; | 457 | unsigned int cfg; |
| 457 | struct hpet_dev *hdev = get_irq_data(irq); | ||
| 458 | 458 | ||
| 459 | /* mask it */ | 459 | /* mask it */ |
| 460 | cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); | 460 | cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); |
| @@ -462,18 +462,14 @@ void hpet_msi_mask(unsigned int irq) | |||
| 462 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); | 462 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); |
| 463 | } | 463 | } |
| 464 | 464 | ||
| 465 | void hpet_msi_write(unsigned int irq, struct msi_msg *msg) | 465 | void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg) |
| 466 | { | 466 | { |
| 467 | struct hpet_dev *hdev = get_irq_data(irq); | ||
| 468 | |||
| 469 | hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); | 467 | hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); |
| 470 | hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); | 468 | hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); |
| 471 | } | 469 | } |
| 472 | 470 | ||
| 473 | void hpet_msi_read(unsigned int irq, struct msi_msg *msg) | 471 | void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg) |
| 474 | { | 472 | { |
| 475 | struct hpet_dev *hdev = get_irq_data(irq); | ||
| 476 | |||
| 477 | msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); | 473 | msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); |
| 478 | msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); | 474 | msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); |
| 479 | msg->address_hi = 0; | 475 | msg->address_hi = 0; |
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index cafa7c80ac9..20757cb2efa 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c | |||
| @@ -29,24 +29,10 @@ | |||
| 29 | * plus some generic x86 specific things if generic specifics makes | 29 | * plus some generic x86 specific things if generic specifics makes |
| 30 | * any sense at all. | 30 | * any sense at all. |
| 31 | */ | 31 | */ |
| 32 | static void init_8259A(int auto_eoi); | ||
| 32 | 33 | ||
| 33 | static int i8259A_auto_eoi; | 34 | static int i8259A_auto_eoi; |
| 34 | DEFINE_RAW_SPINLOCK(i8259A_lock); | 35 | DEFINE_RAW_SPINLOCK(i8259A_lock); |
| 35 | static void mask_and_ack_8259A(unsigned int); | ||
| 36 | static void mask_8259A(void); | ||
| 37 | static void unmask_8259A(void); | ||
| 38 | static void disable_8259A_irq(unsigned int irq); | ||
| 39 | static void enable_8259A_irq(unsigned int irq); | ||
| 40 | static void init_8259A(int auto_eoi); | ||
| 41 | static int i8259A_irq_pending(unsigned int irq); | ||
| 42 | |||
| 43 | struct irq_chip i8259A_chip = { | ||
| 44 | .name = "XT-PIC", | ||
| 45 | .mask = disable_8259A_irq, | ||
| 46 | .disable = disable_8259A_irq, | ||
| 47 | .unmask = enable_8259A_irq, | ||
| 48 | .mask_ack = mask_and_ack_8259A, | ||
| 49 | }; | ||
| 50 | 36 | ||
| 51 | /* | 37 | /* |
| 52 | * 8259A PIC functions to handle ISA devices: | 38 | * 8259A PIC functions to handle ISA devices: |
| @@ -68,7 +54,7 @@ unsigned int cached_irq_mask = 0xffff; | |||
| 68 | */ | 54 | */ |
| 69 | unsigned long io_apic_irqs; | 55 | unsigned long io_apic_irqs; |
| 70 | 56 | ||
| 71 | static void disable_8259A_irq(unsigned int irq) | 57 | static void mask_8259A_irq(unsigned int irq) |
| 72 | { | 58 | { |
| 73 | unsigned int mask = 1 << irq; | 59 | unsigned int mask = 1 << irq; |
| 74 | unsigned long flags; | 60 | unsigned long flags; |
| @@ -82,7 +68,12 @@ static void disable_8259A_irq(unsigned int irq) | |||
| 82 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 68 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
| 83 | } | 69 | } |
| 84 | 70 | ||
| 85 | static void enable_8259A_irq(unsigned int irq) | 71 | static void disable_8259A_irq(struct irq_data *data) |
| 72 | { | ||
| 73 | mask_8259A_irq(data->irq); | ||
| 74 | } | ||
| 75 | |||
| 76 | static void unmask_8259A_irq(unsigned int irq) | ||
| 86 | { | 77 | { |
| 87 | unsigned int mask = ~(1 << irq); | 78 | unsigned int mask = ~(1 << irq); |
| 88 | unsigned long flags; | 79 | unsigned long flags; |
| @@ -96,6 +87,11 @@ static void enable_8259A_irq(unsigned int irq) | |||
| 96 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 87 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
| 97 | } | 88 | } |
| 98 | 89 | ||
| 90 | static void enable_8259A_irq(struct irq_data *data) | ||
| 91 | { | ||
| 92 | unmask_8259A_irq(data->irq); | ||
| 93 | } | ||
| 94 | |||
| 99 | static int i8259A_irq_pending(unsigned int irq) | 95 | static int i8259A_irq_pending(unsigned int irq) |
| 100 | { | 96 | { |
| 101 | unsigned int mask = 1<<irq; | 97 | unsigned int mask = 1<<irq; |
| @@ -117,7 +113,7 @@ static void make_8259A_irq(unsigned int irq) | |||
| 117 | disable_irq_nosync(irq); | 113 | disable_irq_nosync(irq); |
| 118 | io_apic_irqs &= ~(1<<irq); | 114 | io_apic_irqs &= ~(1<<irq); |
| 119 | set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, | 115 | set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, |
| 120 | "XT"); | 116 | i8259A_chip.name); |
| 121 | enable_irq(irq); | 117 | enable_irq(irq); |
| 122 | } | 118 | } |
| 123 | 119 | ||
| @@ -150,8 +146,9 @@ static inline int i8259A_irq_real(unsigned int irq) | |||
| 150 | * first, _then_ send the EOI, and the order of EOI | 146 | * first, _then_ send the EOI, and the order of EOI |
| 151 | * to the two 8259s is important! | 147 | * to the two 8259s is important! |
| 152 | */ | 148 | */ |
| 153 | static void mask_and_ack_8259A(unsigned int irq) | 149 | static void mask_and_ack_8259A(struct irq_data *data) |
| 154 | { | 150 | { |
| 151 | unsigned int irq = data->irq; | ||
| 155 | unsigned int irqmask = 1 << irq; | 152 | unsigned int irqmask = 1 << irq; |
| 156 | unsigned long flags; | 153 | unsigned long flags; |
| 157 | 154 | ||
| @@ -223,6 +220,14 @@ spurious_8259A_irq: | |||
| 223 | } | 220 | } |
| 224 | } | 221 | } |
| 225 | 222 | ||
| 223 | struct irq_chip i8259A_chip = { | ||
| 224 | .name = "XT-PIC", | ||
| 225 | .irq_mask = disable_8259A_irq, | ||
| 226 | .irq_disable = disable_8259A_irq, | ||
| 227 | .irq_unmask = enable_8259A_irq, | ||
| 228 | .irq_mask_ack = mask_and_ack_8259A, | ||
| 229 | }; | ||
| 230 | |||
| 226 | static char irq_trigger[2]; | 231 | static char irq_trigger[2]; |
| 227 | /** | 232 | /** |
| 228 | * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ | 233 | * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ |
| @@ -342,9 +347,9 @@ static void init_8259A(int auto_eoi) | |||
| 342 | * In AEOI mode we just have to mask the interrupt | 347 | * In AEOI mode we just have to mask the interrupt |
| 343 | * when acking. | 348 | * when acking. |
| 344 | */ | 349 | */ |
| 345 | i8259A_chip.mask_ack = disable_8259A_irq; | 350 | i8259A_chip.irq_mask_ack = disable_8259A_irq; |
| 346 | else | 351 | else |
| 347 | i8259A_chip.mask_ack = mask_and_ack_8259A; | 352 | i8259A_chip.irq_mask_ack = mask_and_ack_8259A; |
| 348 | 353 | ||
| 349 | udelay(100); /* wait for 8259A to initialize */ | 354 | udelay(100); /* wait for 8259A to initialize */ |
| 350 | 355 | ||
| @@ -363,14 +368,6 @@ static void init_8259A(int auto_eoi) | |||
| 363 | static void legacy_pic_noop(void) { }; | 368 | static void legacy_pic_noop(void) { }; |
| 364 | static void legacy_pic_uint_noop(unsigned int unused) { }; | 369 | static void legacy_pic_uint_noop(unsigned int unused) { }; |
| 365 | static void legacy_pic_int_noop(int unused) { }; | 370 | static void legacy_pic_int_noop(int unused) { }; |
| 366 | |||
| 367 | static struct irq_chip dummy_pic_chip = { | ||
| 368 | .name = "dummy pic", | ||
| 369 | .mask = legacy_pic_uint_noop, | ||
| 370 | .unmask = legacy_pic_uint_noop, | ||
| 371 | .disable = legacy_pic_uint_noop, | ||
| 372 | .mask_ack = legacy_pic_uint_noop, | ||
| 373 | }; | ||
| 374 | static int legacy_pic_irq_pending_noop(unsigned int irq) | 371 | static int legacy_pic_irq_pending_noop(unsigned int irq) |
| 375 | { | 372 | { |
| 376 | return 0; | 373 | return 0; |
| @@ -378,7 +375,9 @@ static int legacy_pic_irq_pending_noop(unsigned int irq) | |||
| 378 | 375 | ||
| 379 | struct legacy_pic null_legacy_pic = { | 376 | struct legacy_pic null_legacy_pic = { |
| 380 | .nr_legacy_irqs = 0, | 377 | .nr_legacy_irqs = 0, |
| 381 | .chip = &dummy_pic_chip, | 378 | .chip = &dummy_irq_chip, |
| 379 | .mask = legacy_pic_uint_noop, | ||
| 380 | .unmask = legacy_pic_uint_noop, | ||
| 382 | .mask_all = legacy_pic_noop, | 381 | .mask_all = legacy_pic_noop, |
| 383 | .restore_mask = legacy_pic_noop, | 382 | .restore_mask = legacy_pic_noop, |
| 384 | .init = legacy_pic_int_noop, | 383 | .init = legacy_pic_int_noop, |
| @@ -389,7 +388,9 @@ struct legacy_pic null_legacy_pic = { | |||
| 389 | struct legacy_pic default_legacy_pic = { | 388 | struct legacy_pic default_legacy_pic = { |
| 390 | .nr_legacy_irqs = NR_IRQS_LEGACY, | 389 | .nr_legacy_irqs = NR_IRQS_LEGACY, |
| 391 | .chip = &i8259A_chip, | 390 | .chip = &i8259A_chip, |
| 392 | .mask_all = mask_8259A, | 391 | .mask = mask_8259A_irq, |
| 392 | .unmask = unmask_8259A_irq, | ||
| 393 | .mask_all = mask_8259A, | ||
| 393 | .restore_mask = unmask_8259A, | 394 | .restore_mask = unmask_8259A, |
| 394 | .init = init_8259A, | 395 | .init = init_8259A, |
| 395 | .irq_pending = i8259A_irq_pending, | 396 | .irq_pending = i8259A_irq_pending, |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 44edb03fc9e..83ec0175f98 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
| @@ -159,7 +159,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 159 | seq_printf(p, "%*d: ", prec, i); | 159 | seq_printf(p, "%*d: ", prec, i); |
| 160 | for_each_online_cpu(j) | 160 | for_each_online_cpu(j) |
| 161 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 161 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
| 162 | seq_printf(p, " %8s", desc->chip->name); | 162 | seq_printf(p, " %8s", desc->irq_data.chip->name); |
| 163 | seq_printf(p, "-%-8s", desc->name); | 163 | seq_printf(p, "-%-8s", desc->name); |
| 164 | 164 | ||
| 165 | if (action) { | 165 | if (action) { |
| @@ -282,6 +282,7 @@ void fixup_irqs(void) | |||
| 282 | unsigned int irq, vector; | 282 | unsigned int irq, vector; |
| 283 | static int warned; | 283 | static int warned; |
| 284 | struct irq_desc *desc; | 284 | struct irq_desc *desc; |
| 285 | struct irq_data *data; | ||
| 285 | 286 | ||
| 286 | for_each_irq_desc(irq, desc) { | 287 | for_each_irq_desc(irq, desc) { |
| 287 | int break_affinity = 0; | 288 | int break_affinity = 0; |
| @@ -296,7 +297,8 @@ void fixup_irqs(void) | |||
| 296 | /* interrupt's are disabled at this point */ | 297 | /* interrupt's are disabled at this point */ |
| 297 | raw_spin_lock(&desc->lock); | 298 | raw_spin_lock(&desc->lock); |
| 298 | 299 | ||
| 299 | affinity = desc->affinity; | 300 | data = &desc->irq_data; |
| 301 | affinity = data->affinity; | ||
| 300 | if (!irq_has_action(irq) || | 302 | if (!irq_has_action(irq) || |
| 301 | cpumask_equal(affinity, cpu_online_mask)) { | 303 | cpumask_equal(affinity, cpu_online_mask)) { |
| 302 | raw_spin_unlock(&desc->lock); | 304 | raw_spin_unlock(&desc->lock); |
| @@ -315,16 +317,16 @@ void fixup_irqs(void) | |||
| 315 | affinity = cpu_all_mask; | 317 | affinity = cpu_all_mask; |
| 316 | } | 318 | } |
| 317 | 319 | ||
| 318 | if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask) | 320 | if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask) |
| 319 | desc->chip->mask(irq); | 321 | data->chip->irq_mask(data); |
| 320 | 322 | ||
| 321 | if (desc->chip->set_affinity) | 323 | if (data->chip->irq_set_affinity) |
| 322 | desc->chip->set_affinity(irq, affinity); | 324 | data->chip->irq_set_affinity(data, affinity, true); |
| 323 | else if (!(warned++)) | 325 | else if (!(warned++)) |
| 324 | set_affinity = 0; | 326 | set_affinity = 0; |
| 325 | 327 | ||
| 326 | if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) | 328 | if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask) |
| 327 | desc->chip->unmask(irq); | 329 | data->chip->irq_unmask(data); |
| 328 | 330 | ||
| 329 | raw_spin_unlock(&desc->lock); | 331 | raw_spin_unlock(&desc->lock); |
| 330 | 332 | ||
| @@ -355,10 +357,10 @@ void fixup_irqs(void) | |||
| 355 | if (irr & (1 << (vector % 32))) { | 357 | if (irr & (1 << (vector % 32))) { |
| 356 | irq = __get_cpu_var(vector_irq)[vector]; | 358 | irq = __get_cpu_var(vector_irq)[vector]; |
| 357 | 359 | ||
| 358 | desc = irq_to_desc(irq); | 360 | data = irq_get_irq_data(irq); |
| 359 | raw_spin_lock(&desc->lock); | 361 | raw_spin_lock(&desc->lock); |
| 360 | if (desc->chip->retrigger) | 362 | if (data->chip->irq_retrigger) |
| 361 | desc->chip->retrigger(irq); | 363 | data->chip->irq_retrigger(data); |
| 362 | raw_spin_unlock(&desc->lock); | 364 | raw_spin_unlock(&desc->lock); |
| 363 | } | 365 | } |
| 364 | } | 366 | } |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 713969b9266..c752e973958 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
| @@ -100,6 +100,8 @@ int vector_used_by_percpu_irq(unsigned int vector) | |||
| 100 | 100 | ||
| 101 | void __init init_ISA_irqs(void) | 101 | void __init init_ISA_irqs(void) |
| 102 | { | 102 | { |
| 103 | struct irq_chip *chip = legacy_pic->chip; | ||
| 104 | const char *name = chip->name; | ||
| 103 | int i; | 105 | int i; |
| 104 | 106 | ||
| 105 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) | 107 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) |
| @@ -107,19 +109,8 @@ void __init init_ISA_irqs(void) | |||
| 107 | #endif | 109 | #endif |
| 108 | legacy_pic->init(0); | 110 | legacy_pic->init(0); |
| 109 | 111 | ||
| 110 | /* | 112 | for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) |
| 111 | * 16 old-style INTA-cycle interrupts: | 113 | set_irq_chip_and_handler_name(i, chip, handle_level_irq, name); |
| 112 | */ | ||
| 113 | for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) { | ||
| 114 | struct irq_desc *desc = irq_to_desc(i); | ||
| 115 | |||
| 116 | desc->status = IRQ_DISABLED; | ||
| 117 | desc->action = NULL; | ||
| 118 | desc->depth = 1; | ||
| 119 | |||
| 120 | set_irq_chip_and_handler_name(i, &i8259A_chip, | ||
| 121 | handle_level_irq, "XT"); | ||
| 122 | } | ||
| 123 | } | 114 | } |
| 124 | 115 | ||
| 125 | void __init init_IRQ(void) | 116 | void __init init_IRQ(void) |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2ced73ba048..dfb50890b5b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -323,9 +323,9 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
| 323 | check_tsc_sync_target(); | 323 | check_tsc_sync_target(); |
| 324 | 324 | ||
| 325 | if (nmi_watchdog == NMI_IO_APIC) { | 325 | if (nmi_watchdog == NMI_IO_APIC) { |
| 326 | legacy_pic->chip->mask(0); | 326 | legacy_pic->mask(0); |
| 327 | enable_NMI_through_LVT0(); | 327 | enable_NMI_through_LVT0(); |
| 328 | legacy_pic->chip->unmask(0); | 328 | legacy_pic->unmask(0); |
| 329 | } | 329 | } |
| 330 | 330 | ||
| 331 | /* This must be done before setting cpu_online_mask */ | 331 | /* This must be done before setting cpu_online_mask */ |
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c index 1132129db79..7b24460917d 100644 --- a/arch/x86/kernel/uv_irq.c +++ b/arch/x86/kernel/uv_irq.c | |||
| @@ -28,34 +28,21 @@ struct uv_irq_2_mmr_pnode{ | |||
| 28 | static spinlock_t uv_irq_lock; | 28 | static spinlock_t uv_irq_lock; |
| 29 | static struct rb_root uv_irq_root; | 29 | static struct rb_root uv_irq_root; |
| 30 | 30 | ||
| 31 | static int uv_set_irq_affinity(unsigned int, const struct cpumask *); | 31 | static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool); |
| 32 | 32 | ||
| 33 | static void uv_noop(unsigned int irq) | 33 | static void uv_noop(struct irq_data *data) { } |
| 34 | { | ||
| 35 | } | ||
| 36 | |||
| 37 | static unsigned int uv_noop_ret(unsigned int irq) | ||
| 38 | { | ||
| 39 | return 0; | ||
| 40 | } | ||
| 41 | 34 | ||
| 42 | static void uv_ack_apic(unsigned int irq) | 35 | static void uv_ack_apic(struct irq_data *data) |
| 43 | { | 36 | { |
| 44 | ack_APIC_irq(); | 37 | ack_APIC_irq(); |
| 45 | } | 38 | } |
| 46 | 39 | ||
| 47 | static struct irq_chip uv_irq_chip = { | 40 | static struct irq_chip uv_irq_chip = { |
| 48 | .name = "UV-CORE", | 41 | .name = "UV-CORE", |
| 49 | .startup = uv_noop_ret, | 42 | .irq_mask = uv_noop, |
| 50 | .shutdown = uv_noop, | 43 | .irq_unmask = uv_noop, |
| 51 | .enable = uv_noop, | 44 | .irq_eoi = uv_ack_apic, |
| 52 | .disable = uv_noop, | 45 | .irq_set_affinity = uv_set_irq_affinity, |
| 53 | .ack = uv_noop, | ||
| 54 | .mask = uv_noop, | ||
| 55 | .unmask = uv_noop, | ||
| 56 | .eoi = uv_ack_apic, | ||
| 57 | .end = uv_noop, | ||
| 58 | .set_affinity = uv_set_irq_affinity, | ||
| 59 | }; | 46 | }; |
| 60 | 47 | ||
| 61 | /* | 48 | /* |
| @@ -144,26 +131,22 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
| 144 | unsigned long mmr_offset, int limit) | 131 | unsigned long mmr_offset, int limit) |
| 145 | { | 132 | { |
| 146 | const struct cpumask *eligible_cpu = cpumask_of(cpu); | 133 | const struct cpumask *eligible_cpu = cpumask_of(cpu); |
| 147 | struct irq_desc *desc = irq_to_desc(irq); | 134 | struct irq_cfg *cfg = get_irq_chip_data(irq); |
| 148 | struct irq_cfg *cfg; | ||
| 149 | int mmr_pnode; | ||
| 150 | unsigned long mmr_value; | 135 | unsigned long mmr_value; |
| 151 | struct uv_IO_APIC_route_entry *entry; | 136 | struct uv_IO_APIC_route_entry *entry; |
| 152 | int err; | 137 | int mmr_pnode, err; |
| 153 | 138 | ||
| 154 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != | 139 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != |
| 155 | sizeof(unsigned long)); | 140 | sizeof(unsigned long)); |
| 156 | 141 | ||
| 157 | cfg = irq_cfg(irq); | ||
| 158 | |||
| 159 | err = assign_irq_vector(irq, cfg, eligible_cpu); | 142 | err = assign_irq_vector(irq, cfg, eligible_cpu); |
| 160 | if (err != 0) | 143 | if (err != 0) |
| 161 | return err; | 144 | return err; |
| 162 | 145 | ||
| 163 | if (limit == UV_AFFINITY_CPU) | 146 | if (limit == UV_AFFINITY_CPU) |
| 164 | desc->status |= IRQ_NO_BALANCING; | 147 | irq_set_status_flags(irq, IRQ_NO_BALANCING); |
| 165 | else | 148 | else |
| 166 | desc->status |= IRQ_MOVE_PCNTXT; | 149 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
| 167 | 150 | ||
| 168 | set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, | 151 | set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, |
| 169 | irq_name); | 152 | irq_name); |
| @@ -206,17 +189,17 @@ static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset) | |||
| 206 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 189 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); |
| 207 | } | 190 | } |
| 208 | 191 | ||
| 209 | static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) | 192 | static int |
| 193 | uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask, | ||
| 194 | bool force) | ||
| 210 | { | 195 | { |
| 211 | struct irq_desc *desc = irq_to_desc(irq); | 196 | struct irq_cfg *cfg = data->chip_data; |
| 212 | struct irq_cfg *cfg = desc->chip_data; | ||
| 213 | unsigned int dest; | 197 | unsigned int dest; |
| 214 | unsigned long mmr_value; | 198 | unsigned long mmr_value, mmr_offset; |
| 215 | struct uv_IO_APIC_route_entry *entry; | 199 | struct uv_IO_APIC_route_entry *entry; |
| 216 | unsigned long mmr_offset; | ||
| 217 | int mmr_pnode; | 200 | int mmr_pnode; |
| 218 | 201 | ||
| 219 | if (set_desc_affinity(desc, mask, &dest)) | 202 | if (__ioapic_set_affinity(data, mask, &dest)) |
| 220 | return -1; | 203 | return -1; |
| 221 | 204 | ||
| 222 | mmr_value = 0; | 205 | mmr_value = 0; |
| @@ -231,7 +214,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
| 231 | entry->dest = dest; | 214 | entry->dest = dest; |
| 232 | 215 | ||
| 233 | /* Get previously stored MMR and pnode of hub sourcing interrupts */ | 216 | /* Get previously stored MMR and pnode of hub sourcing interrupts */ |
| 234 | if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode)) | 217 | if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode)) |
| 235 | return -1; | 218 | return -1; |
| 236 | 219 | ||
| 237 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 220 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); |
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index e680ea52db9..3371bd053b8 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c | |||
| @@ -66,10 +66,7 @@ static void __init visws_time_init(void) | |||
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | /* Replaces the default init_ISA_irqs in the generic setup */ | 68 | /* Replaces the default init_ISA_irqs in the generic setup */ |
| 69 | static void __init visws_pre_intr_init(void) | 69 | static void __init visws_pre_intr_init(void); |
| 70 | { | ||
| 71 | init_VISWS_APIC_irqs(); | ||
| 72 | } | ||
| 73 | 70 | ||
| 74 | /* Quirk for machine specific memory setup. */ | 71 | /* Quirk for machine specific memory setup. */ |
| 75 | 72 | ||
| @@ -429,67 +426,34 @@ static int is_co_apic(unsigned int irq) | |||
| 429 | /* | 426 | /* |
| 430 | * This is the SGI Cobalt (IO-)APIC: | 427 | * This is the SGI Cobalt (IO-)APIC: |
| 431 | */ | 428 | */ |
| 432 | 429 | static void enable_cobalt_irq(struct irq_data *data) | |
| 433 | static void enable_cobalt_irq(unsigned int irq) | ||
| 434 | { | 430 | { |
| 435 | co_apic_set(is_co_apic(irq), irq); | 431 | co_apic_set(is_co_apic(data->irq), data->irq); |
| 436 | } | 432 | } |
| 437 | 433 | ||
| 438 | static void disable_cobalt_irq(unsigned int irq) | 434 | static void disable_cobalt_irq(struct irq_data *data) |
| 439 | { | 435 | { |
| 440 | int entry = is_co_apic(irq); | 436 | int entry = is_co_apic(data->irq); |
| 441 | 437 | ||
| 442 | co_apic_write(CO_APIC_LO(entry), CO_APIC_MASK); | 438 | co_apic_write(CO_APIC_LO(entry), CO_APIC_MASK); |
| 443 | co_apic_read(CO_APIC_LO(entry)); | 439 | co_apic_read(CO_APIC_LO(entry)); |
| 444 | } | 440 | } |
| 445 | 441 | ||
| 446 | /* | 442 | static void ack_cobalt_irq(struct irq_data *data) |
| 447 | * "irq" really just serves to identify the device. Here is where we | ||
| 448 | * map this to the Cobalt APIC entry where it's physically wired. | ||
| 449 | * This is called via request_irq -> setup_irq -> irq_desc->startup() | ||
| 450 | */ | ||
| 451 | static unsigned int startup_cobalt_irq(unsigned int irq) | ||
| 452 | { | 443 | { |
| 453 | unsigned long flags; | 444 | unsigned long flags; |
| 454 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 455 | 445 | ||
| 456 | spin_lock_irqsave(&cobalt_lock, flags); | 446 | spin_lock_irqsave(&cobalt_lock, flags); |
| 457 | if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING))) | 447 | disable_cobalt_irq(data); |
| 458 | desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING); | ||
| 459 | enable_cobalt_irq(irq); | ||
| 460 | spin_unlock_irqrestore(&cobalt_lock, flags); | ||
| 461 | return 0; | ||
| 462 | } | ||
| 463 | |||
| 464 | static void ack_cobalt_irq(unsigned int irq) | ||
| 465 | { | ||
| 466 | unsigned long flags; | ||
| 467 | |||
| 468 | spin_lock_irqsave(&cobalt_lock, flags); | ||
| 469 | disable_cobalt_irq(irq); | ||
| 470 | apic_write(APIC_EOI, APIC_EIO_ACK); | 448 | apic_write(APIC_EOI, APIC_EIO_ACK); |
| 471 | spin_unlock_irqrestore(&cobalt_lock, flags); | 449 | spin_unlock_irqrestore(&cobalt_lock, flags); |
| 472 | } | 450 | } |
| 473 | 451 | ||
| 474 | static void end_cobalt_irq(unsigned int irq) | ||
| 475 | { | ||
| 476 | unsigned long flags; | ||
| 477 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 478 | |||
| 479 | spin_lock_irqsave(&cobalt_lock, flags); | ||
| 480 | if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS))) | ||
| 481 | enable_cobalt_irq(irq); | ||
| 482 | spin_unlock_irqrestore(&cobalt_lock, flags); | ||
| 483 | } | ||
| 484 | |||
| 485 | static struct irq_chip cobalt_irq_type = { | 452 | static struct irq_chip cobalt_irq_type = { |
| 486 | .name = "Cobalt-APIC", | 453 | .name = "Cobalt-APIC", |
| 487 | .startup = startup_cobalt_irq, | 454 | .irq_enable = enable_cobalt_irq, |
| 488 | .shutdown = disable_cobalt_irq, | 455 | .irq_disable = disable_cobalt_irq, |
| 489 | .enable = enable_cobalt_irq, | 456 | .irq_ack = ack_cobalt_irq, |
| 490 | .disable = disable_cobalt_irq, | ||
| 491 | .ack = ack_cobalt_irq, | ||
| 492 | .end = end_cobalt_irq, | ||
| 493 | }; | 457 | }; |
| 494 | 458 | ||
| 495 | 459 | ||
| @@ -503,35 +467,34 @@ static struct irq_chip cobalt_irq_type = { | |||
| 503 | * interrupt controller type, and through a special virtual interrupt- | 467 | * interrupt controller type, and through a special virtual interrupt- |
| 504 | * controller. Device drivers only see the virtual interrupt sources. | 468 | * controller. Device drivers only see the virtual interrupt sources. |
| 505 | */ | 469 | */ |
| 506 | static unsigned int startup_piix4_master_irq(unsigned int irq) | 470 | static unsigned int startup_piix4_master_irq(struct irq_data *data) |
| 507 | { | 471 | { |
| 508 | legacy_pic->init(0); | 472 | legacy_pic->init(0); |
| 509 | 473 | enable_cobalt_irq(data); | |
| 510 | return startup_cobalt_irq(irq); | ||
| 511 | } | 474 | } |
| 512 | 475 | ||
| 513 | static void end_piix4_master_irq(unsigned int irq) | 476 | static void end_piix4_master_irq(struct irq_data *data) |
| 514 | { | 477 | { |
| 515 | unsigned long flags; | 478 | unsigned long flags; |
| 516 | 479 | ||
| 517 | spin_lock_irqsave(&cobalt_lock, flags); | 480 | spin_lock_irqsave(&cobalt_lock, flags); |
| 518 | enable_cobalt_irq(irq); | 481 | enable_cobalt_irq(data); |
| 519 | spin_unlock_irqrestore(&cobalt_lock, flags); | 482 | spin_unlock_irqrestore(&cobalt_lock, flags); |
| 520 | } | 483 | } |
| 521 | 484 | ||
| 522 | static struct irq_chip piix4_master_irq_type = { | 485 | static struct irq_chip piix4_master_irq_type = { |
| 523 | .name = "PIIX4-master", | 486 | .name = "PIIX4-master", |
| 524 | .startup = startup_piix4_master_irq, | 487 | .irq_startup = startup_piix4_master_irq, |
| 525 | .ack = ack_cobalt_irq, | 488 | .irq_ack = ack_cobalt_irq, |
| 526 | .end = end_piix4_master_irq, | ||
| 527 | }; | 489 | }; |
| 528 | 490 | ||
| 491 | static void pii4_mask(struct irq_data *data) { } | ||
| 529 | 492 | ||
| 530 | static struct irq_chip piix4_virtual_irq_type = { | 493 | static struct irq_chip piix4_virtual_irq_type = { |
| 531 | .name = "PIIX4-virtual", | 494 | .name = "PIIX4-virtual", |
| 495 | .mask = pii4_mask, | ||
| 532 | }; | 496 | }; |
| 533 | 497 | ||
| 534 | |||
| 535 | /* | 498 | /* |
| 536 | * PIIX4-8259 master/virtual functions to handle interrupt requests | 499 | * PIIX4-8259 master/virtual functions to handle interrupt requests |
| 537 | * from legacy devices: floppy, parallel, serial, rtc. | 500 | * from legacy devices: floppy, parallel, serial, rtc. |
| @@ -549,9 +512,8 @@ static struct irq_chip piix4_virtual_irq_type = { | |||
| 549 | */ | 512 | */ |
| 550 | static irqreturn_t piix4_master_intr(int irq, void *dev_id) | 513 | static irqreturn_t piix4_master_intr(int irq, void *dev_id) |
| 551 | { | 514 | { |
| 552 | int realirq; | ||
| 553 | struct irq_desc *desc; | ||
| 554 | unsigned long flags; | 515 | unsigned long flags; |
| 516 | int realirq; | ||
| 555 | 517 | ||
| 556 | raw_spin_lock_irqsave(&i8259A_lock, flags); | 518 | raw_spin_lock_irqsave(&i8259A_lock, flags); |
| 557 | 519 | ||
| @@ -592,18 +554,10 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id) | |||
| 592 | 554 | ||
| 593 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 555 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
| 594 | 556 | ||
| 595 | desc = irq_to_desc(realirq); | ||
| 596 | |||
| 597 | /* | 557 | /* |
| 598 | * handle this 'virtual interrupt' as a Cobalt one now. | 558 | * handle this 'virtual interrupt' as a Cobalt one now. |
| 599 | */ | 559 | */ |
| 600 | kstat_incr_irqs_this_cpu(realirq, desc); | 560 | generic_handle_irq(realirq); |
| 601 | |||
| 602 | if (likely(desc->action != NULL)) | ||
| 603 | handle_IRQ_event(realirq, desc->action); | ||
| 604 | |||
| 605 | if (!(desc->status & IRQ_DISABLED)) | ||
| 606 | legacy_pic->chip->unmask(realirq); | ||
| 607 | 561 | ||
| 608 | return IRQ_HANDLED; | 562 | return IRQ_HANDLED; |
| 609 | 563 | ||
| @@ -624,41 +578,35 @@ static struct irqaction cascade_action = { | |||
| 624 | 578 | ||
| 625 | static inline void set_piix4_virtual_irq_type(void) | 579 | static inline void set_piix4_virtual_irq_type(void) |
| 626 | { | 580 | { |
| 627 | piix4_virtual_irq_type.shutdown = i8259A_chip.mask; | ||
| 628 | piix4_virtual_irq_type.enable = i8259A_chip.unmask; | 581 | piix4_virtual_irq_type.enable = i8259A_chip.unmask; |
| 629 | piix4_virtual_irq_type.disable = i8259A_chip.mask; | 582 | piix4_virtual_irq_type.disable = i8259A_chip.mask; |
| 583 | piix4_virtual_irq_type.unmask = i8259A_chip.unmask; | ||
| 630 | } | 584 | } |
| 631 | 585 | ||
| 632 | void init_VISWS_APIC_irqs(void) | 586 | static void __init visws_pre_intr_init(void) |
| 633 | { | 587 | { |
| 634 | int i; | 588 | int i; |
| 635 | 589 | ||
| 636 | for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { | 590 | set_piix4_virtual_irq_type(); |
| 637 | struct irq_desc *desc = irq_to_desc(i); | ||
| 638 | |||
| 639 | desc->status = IRQ_DISABLED; | ||
| 640 | desc->action = 0; | ||
| 641 | desc->depth = 1; | ||
| 642 | 591 | ||
| 643 | if (i == 0) { | 592 | for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { |
| 644 | desc->chip = &cobalt_irq_type; | 593 | struct irq_chip *chip = NULL; |
| 645 | } | 594 | |
| 646 | else if (i == CO_IRQ_IDE0) { | 595 | if (i == 0) |
| 647 | desc->chip = &cobalt_irq_type; | 596 | chip = &cobalt_irq_type; |
| 648 | } | 597 | else if (i == CO_IRQ_IDE0) |
| 649 | else if (i == CO_IRQ_IDE1) { | 598 | chip = &cobalt_irq_type; |
| 650 | desc->chip = &cobalt_irq_type; | 599 | else if (i == CO_IRQ_IDE1) |
| 651 | } | 600 | >chip = &cobalt_irq_type; |
| 652 | else if (i == CO_IRQ_8259) { | 601 | else if (i == CO_IRQ_8259) |
| 653 | desc->chip = &piix4_master_irq_type; | 602 | chip = &piix4_master_irq_type; |
| 654 | } | 603 | else if (i < CO_IRQ_APIC0) |
| 655 | else if (i < CO_IRQ_APIC0) { | 604 | chip = &piix4_virtual_irq_type; |
| 656 | set_piix4_virtual_irq_type(); | 605 | else if (IS_CO_APIC(i)) |
| 657 | desc->chip = &piix4_virtual_irq_type; | 606 | chip = &cobalt_irq_type; |
| 658 | } | 607 | |
| 659 | else if (IS_CO_APIC(i)) { | 608 | if (chip) |
| 660 | desc->chip = &cobalt_irq_type; | 609 | set_irq_chip(i, chip); |
| 661 | } | ||
| 662 | } | 610 | } |
| 663 | 611 | ||
| 664 | setup_irq(CO_IRQ_8259, &master_action); | 612 | setup_irq(CO_IRQ_8259, &master_action); |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 9d5f5584845..73b1e1a1f48 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
| @@ -791,22 +791,22 @@ static void lguest_flush_tlb_kernel(void) | |||
| 791 | * simple as setting a bit. We don't actually "ack" interrupts as such, we | 791 | * simple as setting a bit. We don't actually "ack" interrupts as such, we |
| 792 | * just mask and unmask them. I wonder if we should be cleverer? | 792 | * just mask and unmask them. I wonder if we should be cleverer? |
| 793 | */ | 793 | */ |
| 794 | static void disable_lguest_irq(unsigned int irq) | 794 | static void disable_lguest_irq(struct irq_data *data) |
| 795 | { | 795 | { |
| 796 | set_bit(irq, lguest_data.blocked_interrupts); | 796 | set_bit(data->irq, lguest_data.blocked_interrupts); |
| 797 | } | 797 | } |
| 798 | 798 | ||
| 799 | static void enable_lguest_irq(unsigned int irq) | 799 | static void enable_lguest_irq(struct irq_data *data) |
| 800 | { | 800 | { |
| 801 | clear_bit(irq, lguest_data.blocked_interrupts); | 801 | clear_bit(data->irq, lguest_data.blocked_interrupts); |
| 802 | } | 802 | } |
| 803 | 803 | ||
| 804 | /* This structure describes the lguest IRQ controller. */ | 804 | /* This structure describes the lguest IRQ controller. */ |
| 805 | static struct irq_chip lguest_irq_controller = { | 805 | static struct irq_chip lguest_irq_controller = { |
| 806 | .name = "lguest", | 806 | .name = "lguest", |
| 807 | .mask = disable_lguest_irq, | 807 | .irq_mask = disable_lguest_irq, |
| 808 | .mask_ack = disable_lguest_irq, | 808 | .irq_mask_ack = disable_lguest_irq, |
| 809 | .unmask = enable_lguest_irq, | 809 | .irq_unmask = enable_lguest_irq, |
| 810 | }; | 810 | }; |
| 811 | 811 | ||
| 812 | /* | 812 | /* |
| @@ -838,12 +838,12 @@ static void __init lguest_init_IRQ(void) | |||
| 838 | * rather than set them in lguest_init_IRQ we are called here every time an | 838 | * rather than set them in lguest_init_IRQ we are called here every time an |
| 839 | * lguest device needs an interrupt. | 839 | * lguest device needs an interrupt. |
| 840 | * | 840 | * |
| 841 | * FIXME: irq_to_desc_alloc_node() can fail due to lack of memory, we should | 841 | * FIXME: irq_alloc_desc_at() can fail due to lack of memory, we should |
| 842 | * pass that up! | 842 | * pass that up! |
| 843 | */ | 843 | */ |
| 844 | void lguest_setup_irq(unsigned int irq) | 844 | void lguest_setup_irq(unsigned int irq) |
| 845 | { | 845 | { |
| 846 | irq_to_desc_alloc_node(irq, 0); | 846 | irq_alloc_desc_at(irq, 0); |
| 847 | set_irq_chip_and_handler_name(irq, &lguest_irq_controller, | 847 | set_irq_chip_and_handler_name(irq, &lguest_irq_controller, |
| 848 | handle_level_irq, "level"); | 848 | handle_level_irq, "level"); |
| 849 | } | 849 | } |
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index b67a6b5aa8d..42fb46f8388 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
| @@ -64,15 +64,22 @@ static u64 ibs_op_ctl; | |||
| 64 | * IBS cpuid feature detection | 64 | * IBS cpuid feature detection |
| 65 | */ | 65 | */ |
| 66 | 66 | ||
| 67 | #define IBS_CPUID_FEATURES 0x8000001b | 67 | #define IBS_CPUID_FEATURES 0x8000001b |
| 68 | 68 | ||
| 69 | /* | 69 | /* |
| 70 | * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but | 70 | * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but |
| 71 | * bit 0 is used to indicate the existence of IBS. | 71 | * bit 0 is used to indicate the existence of IBS. |
| 72 | */ | 72 | */ |
| 73 | #define IBS_CAPS_AVAIL (1LL<<0) | 73 | #define IBS_CAPS_AVAIL (1U<<0) |
| 74 | #define IBS_CAPS_RDWROPCNT (1LL<<3) | 74 | #define IBS_CAPS_RDWROPCNT (1U<<3) |
| 75 | #define IBS_CAPS_OPCNT (1LL<<4) | 75 | #define IBS_CAPS_OPCNT (1U<<4) |
| 76 | |||
| 77 | /* | ||
| 78 | * IBS APIC setup | ||
| 79 | */ | ||
| 80 | #define IBSCTL 0x1cc | ||
| 81 | #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) | ||
| 82 | #define IBSCTL_LVT_OFFSET_MASK 0x0F | ||
| 76 | 83 | ||
| 77 | /* | 84 | /* |
| 78 | * IBS randomization macros | 85 | * IBS randomization macros |
| @@ -266,6 +273,74 @@ static void op_amd_stop_ibs(void) | |||
| 266 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); | 273 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); |
| 267 | } | 274 | } |
| 268 | 275 | ||
| 276 | static inline int eilvt_is_available(int offset) | ||
| 277 | { | ||
| 278 | /* check if we may assign a vector */ | ||
| 279 | return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1); | ||
| 280 | } | ||
| 281 | |||
| 282 | static inline int ibs_eilvt_valid(void) | ||
| 283 | { | ||
| 284 | u64 val; | ||
| 285 | int offset; | ||
| 286 | |||
| 287 | rdmsrl(MSR_AMD64_IBSCTL, val); | ||
| 288 | if (!(val & IBSCTL_LVT_OFFSET_VALID)) { | ||
| 289 | pr_err(FW_BUG "cpu %d, invalid IBS " | ||
| 290 | "interrupt offset %d (MSR%08X=0x%016llx)", | ||
| 291 | smp_processor_id(), offset, | ||
| 292 | MSR_AMD64_IBSCTL, val); | ||
| 293 | return 0; | ||
| 294 | } | ||
| 295 | |||
| 296 | offset = val & IBSCTL_LVT_OFFSET_MASK; | ||
| 297 | |||
| 298 | if (eilvt_is_available(offset)) | ||
| 299 | return !0; | ||
| 300 | |||
| 301 | pr_err(FW_BUG "cpu %d, IBS interrupt offset %d " | ||
| 302 | "not available (MSR%08X=0x%016llx)", | ||
| 303 | smp_processor_id(), offset, | ||
| 304 | MSR_AMD64_IBSCTL, val); | ||
| 305 | |||
| 306 | return 0; | ||
| 307 | } | ||
| 308 | |||
| 309 | static inline int get_ibs_offset(void) | ||
| 310 | { | ||
| 311 | u64 val; | ||
| 312 | |||
| 313 | rdmsrl(MSR_AMD64_IBSCTL, val); | ||
| 314 | if (!(val & IBSCTL_LVT_OFFSET_VALID)) | ||
| 315 | return -EINVAL; | ||
| 316 | |||
| 317 | return val & IBSCTL_LVT_OFFSET_MASK; | ||
| 318 | } | ||
| 319 | |||
| 320 | static void setup_APIC_ibs(void) | ||
| 321 | { | ||
| 322 | int offset; | ||
| 323 | |||
| 324 | offset = get_ibs_offset(); | ||
| 325 | if (offset < 0) | ||
| 326 | goto failed; | ||
| 327 | |||
| 328 | if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0)) | ||
| 329 | return; | ||
| 330 | failed: | ||
| 331 | pr_warn("oprofile: IBS APIC setup failed on cpu #%d\n", | ||
| 332 | smp_processor_id()); | ||
| 333 | } | ||
| 334 | |||
| 335 | static void clear_APIC_ibs(void) | ||
| 336 | { | ||
| 337 | int offset; | ||
| 338 | |||
| 339 | offset = get_ibs_offset(); | ||
| 340 | if (offset >= 0) | ||
| 341 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); | ||
| 342 | } | ||
| 343 | |||
| 269 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | 344 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX |
| 270 | 345 | ||
| 271 | static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, | 346 | static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, |
| @@ -376,13 +451,13 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, | |||
| 376 | } | 451 | } |
| 377 | 452 | ||
| 378 | if (ibs_caps) | 453 | if (ibs_caps) |
| 379 | setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0); | 454 | setup_APIC_ibs(); |
| 380 | } | 455 | } |
| 381 | 456 | ||
| 382 | static void op_amd_cpu_shutdown(void) | 457 | static void op_amd_cpu_shutdown(void) |
| 383 | { | 458 | { |
| 384 | if (ibs_caps) | 459 | if (ibs_caps) |
| 385 | setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); | 460 | clear_APIC_ibs(); |
| 386 | } | 461 | } |
| 387 | 462 | ||
| 388 | static int op_amd_check_ctrs(struct pt_regs * const regs, | 463 | static int op_amd_check_ctrs(struct pt_regs * const regs, |
| @@ -445,16 +520,11 @@ static void op_amd_stop(struct op_msrs const * const msrs) | |||
| 445 | op_amd_stop_ibs(); | 520 | op_amd_stop_ibs(); |
| 446 | } | 521 | } |
| 447 | 522 | ||
| 448 | static int __init_ibs_nmi(void) | 523 | static int setup_ibs_ctl(int ibs_eilvt_off) |
| 449 | { | 524 | { |
| 450 | #define IBSCTL_LVTOFFSETVAL (1 << 8) | ||
| 451 | #define IBSCTL 0x1cc | ||
| 452 | struct pci_dev *cpu_cfg; | 525 | struct pci_dev *cpu_cfg; |
| 453 | int nodes; | 526 | int nodes; |
| 454 | u32 value = 0; | 527 | u32 value = 0; |
| 455 | u8 ibs_eilvt_off; | ||
| 456 | |||
| 457 | ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); | ||
| 458 | 528 | ||
| 459 | nodes = 0; | 529 | nodes = 0; |
| 460 | cpu_cfg = NULL; | 530 | cpu_cfg = NULL; |
| @@ -466,21 +536,60 @@ static int __init_ibs_nmi(void) | |||
| 466 | break; | 536 | break; |
| 467 | ++nodes; | 537 | ++nodes; |
| 468 | pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off | 538 | pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off |
| 469 | | IBSCTL_LVTOFFSETVAL); | 539 | | IBSCTL_LVT_OFFSET_VALID); |
| 470 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); | 540 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); |
| 471 | if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { | 541 | if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) { |
| 472 | pci_dev_put(cpu_cfg); | 542 | pci_dev_put(cpu_cfg); |
| 473 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " | 543 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " |
| 474 | "IBSCTL = 0x%08x", value); | 544 | "IBSCTL = 0x%08x\n", value); |
| 475 | return 1; | 545 | return -EINVAL; |
| 476 | } | 546 | } |
| 477 | } while (1); | 547 | } while (1); |
| 478 | 548 | ||
| 479 | if (!nodes) { | 549 | if (!nodes) { |
| 480 | printk(KERN_DEBUG "No CPU node configured for IBS"); | 550 | printk(KERN_DEBUG "No CPU node configured for IBS\n"); |
| 481 | return 1; | 551 | return -ENODEV; |
| 552 | } | ||
| 553 | |||
| 554 | return 0; | ||
| 555 | } | ||
| 556 | |||
| 557 | static int force_ibs_eilvt_setup(void) | ||
| 558 | { | ||
| 559 | int i; | ||
| 560 | int ret; | ||
| 561 | |||
| 562 | /* find the next free available EILVT entry */ | ||
| 563 | for (i = 1; i < 4; i++) { | ||
| 564 | if (!eilvt_is_available(i)) | ||
| 565 | continue; | ||
| 566 | ret = setup_ibs_ctl(i); | ||
| 567 | if (ret) | ||
| 568 | return ret; | ||
| 569 | return 0; | ||
| 482 | } | 570 | } |
| 483 | 571 | ||
| 572 | printk(KERN_DEBUG "No EILVT entry available\n"); | ||
| 573 | |||
| 574 | return -EBUSY; | ||
| 575 | } | ||
| 576 | |||
| 577 | static int __init_ibs_nmi(void) | ||
| 578 | { | ||
| 579 | int ret; | ||
| 580 | |||
| 581 | if (ibs_eilvt_valid()) | ||
| 582 | return 0; | ||
| 583 | |||
| 584 | ret = force_ibs_eilvt_setup(); | ||
| 585 | if (ret) | ||
| 586 | return ret; | ||
| 587 | |||
| 588 | if (!ibs_eilvt_valid()) | ||
| 589 | return -EFAULT; | ||
| 590 | |||
| 591 | pr_err(FW_BUG "workaround enabled for IBS LVT offset\n"); | ||
| 592 | |||
| 484 | return 0; | 593 | return 0; |
| 485 | } | 594 | } |
| 486 | 595 | ||
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index c64a5d387de..87508886cbb 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c | |||
| @@ -92,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 92 | for_each_online_cpu(j) | 92 | for_each_online_cpu(j) |
| 93 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 93 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
| 94 | #endif | 94 | #endif |
| 95 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | 95 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
| 96 | seq_printf(p, " %s", action->name); | 96 | seq_printf(p, " %s", action->name); |
| 97 | 97 | ||
| 98 | for (action=action->next; action; action = action->next) | 98 | for (action=action->next; action; action = action->next) |
diff --git a/drivers/isdn/act2000/act2000.h b/drivers/isdn/act2000/act2000.h index d4c50512a1f..88c9423500d 100644 --- a/drivers/isdn/act2000/act2000.h +++ b/drivers/isdn/act2000/act2000.h | |||
| @@ -141,9 +141,9 @@ typedef struct irq_data_isa { | |||
| 141 | __u8 rcvhdr[8]; | 141 | __u8 rcvhdr[8]; |
| 142 | } irq_data_isa; | 142 | } irq_data_isa; |
| 143 | 143 | ||
| 144 | typedef union irq_data { | 144 | typedef union act2000_irq_data { |
| 145 | irq_data_isa isa; | 145 | irq_data_isa isa; |
| 146 | } irq_data; | 146 | } act2000_irq_data; |
| 147 | 147 | ||
| 148 | /* | 148 | /* |
| 149 | * Per card driver data | 149 | * Per card driver data |
| @@ -176,7 +176,7 @@ typedef struct act2000_card { | |||
| 176 | char *status_buf_read; | 176 | char *status_buf_read; |
| 177 | char *status_buf_write; | 177 | char *status_buf_write; |
| 178 | char *status_buf_end; | 178 | char *status_buf_end; |
| 179 | irq_data idat; /* Data used for IRQ handler */ | 179 | act2000_irq_data idat; /* Data used for IRQ handler */ |
| 180 | isdn_if interface; /* Interface to upper layer */ | 180 | isdn_if interface; /* Interface to upper layer */ |
| 181 | char regname[35]; /* Name used for request_region */ | 181 | char regname[35]; /* Name used for request_region */ |
| 182 | } act2000_card; | 182 | } act2000_card; |
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index 6f9afcd5ca4..b133378d4dc 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c | |||
| @@ -801,6 +801,16 @@ static void closecard(int cardnr) | |||
| 801 | ll_unload(csta); | 801 | ll_unload(csta); |
| 802 | } | 802 | } |
| 803 | 803 | ||
| 804 | static irqreturn_t card_irq(int intno, void *dev_id) | ||
| 805 | { | ||
| 806 | struct IsdnCardState *cs = dev_id; | ||
| 807 | irqreturn_t ret = cs->irq_func(intno, cs); | ||
| 808 | |||
| 809 | if (ret == IRQ_HANDLED) | ||
| 810 | cs->irq_cnt++; | ||
| 811 | return ret; | ||
| 812 | } | ||
| 813 | |||
| 804 | static int init_card(struct IsdnCardState *cs) | 814 | static int init_card(struct IsdnCardState *cs) |
| 805 | { | 815 | { |
| 806 | int irq_cnt, cnt = 3, ret; | 816 | int irq_cnt, cnt = 3, ret; |
| @@ -809,10 +819,10 @@ static int init_card(struct IsdnCardState *cs) | |||
| 809 | ret = cs->cardmsg(cs, CARD_INIT, NULL); | 819 | ret = cs->cardmsg(cs, CARD_INIT, NULL); |
| 810 | return(ret); | 820 | return(ret); |
| 811 | } | 821 | } |
| 812 | irq_cnt = kstat_irqs(cs->irq); | 822 | irq_cnt = cs->irq_cnt = 0; |
| 813 | printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ], | 823 | printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ], |
| 814 | cs->irq, irq_cnt); | 824 | cs->irq, irq_cnt); |
| 815 | if (request_irq(cs->irq, cs->irq_func, cs->irq_flags, "HiSax", cs)) { | 825 | if (request_irq(cs->irq, card_irq, cs->irq_flags, "HiSax", cs)) { |
| 816 | printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n", | 826 | printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n", |
| 817 | cs->irq); | 827 | cs->irq); |
| 818 | return 1; | 828 | return 1; |
| @@ -822,8 +832,8 @@ static int init_card(struct IsdnCardState *cs) | |||
| 822 | /* Timeout 10ms */ | 832 | /* Timeout 10ms */ |
| 823 | msleep(10); | 833 | msleep(10); |
| 824 | printk(KERN_INFO "%s: IRQ %d count %d\n", | 834 | printk(KERN_INFO "%s: IRQ %d count %d\n", |
| 825 | CardType[cs->typ], cs->irq, kstat_irqs(cs->irq)); | 835 | CardType[cs->typ], cs->irq, cs->irq_cnt); |
| 826 | if (kstat_irqs(cs->irq) == irq_cnt) { | 836 | if (cs->irq_cnt == irq_cnt) { |
| 827 | printk(KERN_WARNING | 837 | printk(KERN_WARNING |
| 828 | "%s: IRQ(%d) getting no interrupts during init %d\n", | 838 | "%s: IRQ(%d) getting no interrupts during init %d\n", |
| 829 | CardType[cs->typ], cs->irq, 4 - cnt); | 839 | CardType[cs->typ], cs->irq, 4 - cnt); |
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h index 832a87855ff..32ab3924aa7 100644 --- a/drivers/isdn/hisax/hisax.h +++ b/drivers/isdn/hisax/hisax.h | |||
| @@ -959,6 +959,7 @@ struct IsdnCardState { | |||
| 959 | u_long event; | 959 | u_long event; |
| 960 | struct work_struct tqueue; | 960 | struct work_struct tqueue; |
| 961 | struct timer_list dbusytimer; | 961 | struct timer_list dbusytimer; |
| 962 | unsigned int irq_cnt; | ||
| 962 | #ifdef ERROR_STATISTIC | 963 | #ifdef ERROR_STATISTIC |
| 963 | int err_crc; | 964 | int err_crc; |
| 964 | int err_tx; | 965 | int err_tx; |
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 097f24d8bce..b9fda7018ce 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c | |||
| @@ -78,7 +78,7 @@ struct sih { | |||
| 78 | u8 irq_lines; /* number of supported irq lines */ | 78 | u8 irq_lines; /* number of supported irq lines */ |
| 79 | 79 | ||
| 80 | /* SIR ignored -- set interrupt, for testing only */ | 80 | /* SIR ignored -- set interrupt, for testing only */ |
| 81 | struct irq_data { | 81 | struct sih_irq_data { |
| 82 | u8 isr_offset; | 82 | u8 isr_offset; |
| 83 | u8 imr_offset; | 83 | u8 imr_offset; |
| 84 | } mask[2]; | 84 | } mask[2]; |
| @@ -810,7 +810,7 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) | |||
| 810 | twl4030_irq_chip = dummy_irq_chip; | 810 | twl4030_irq_chip = dummy_irq_chip; |
| 811 | twl4030_irq_chip.name = "twl4030"; | 811 | twl4030_irq_chip.name = "twl4030"; |
| 812 | 812 | ||
| 813 | twl4030_sih_irq_chip.ack = dummy_irq_chip.ack; | 813 | twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack; |
| 814 | 814 | ||
| 815 | for (i = irq_base; i < irq_end; i++) { | 815 | for (i = irq_base; i < irq_end; i++) { |
| 816 | set_irq_chip_and_handler(i, &twl4030_irq_chip, | 816 | set_irq_chip_and_handler(i, &twl4030_irq_chip, |
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 0a19708074c..3de3a436a43 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
| @@ -1221,9 +1221,9 @@ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) | |||
| 1221 | } | 1221 | } |
| 1222 | } | 1222 | } |
| 1223 | 1223 | ||
| 1224 | void dmar_msi_unmask(unsigned int irq) | 1224 | void dmar_msi_unmask(struct irq_data *data) |
| 1225 | { | 1225 | { |
| 1226 | struct intel_iommu *iommu = get_irq_data(irq); | 1226 | struct intel_iommu *iommu = irq_data_get_irq_data(data); |
| 1227 | unsigned long flag; | 1227 | unsigned long flag; |
| 1228 | 1228 | ||
| 1229 | /* unmask it */ | 1229 | /* unmask it */ |
| @@ -1234,10 +1234,10 @@ void dmar_msi_unmask(unsigned int irq) | |||
| 1234 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1234 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1235 | } | 1235 | } |
| 1236 | 1236 | ||
| 1237 | void dmar_msi_mask(unsigned int irq) | 1237 | void dmar_msi_mask(struct irq_data *data) |
| 1238 | { | 1238 | { |
| 1239 | unsigned long flag; | 1239 | unsigned long flag; |
| 1240 | struct intel_iommu *iommu = get_irq_data(irq); | 1240 | struct intel_iommu *iommu = irq_data_get_irq_data(data); |
| 1241 | 1241 | ||
| 1242 | /* mask it */ | 1242 | /* mask it */ |
| 1243 | spin_lock_irqsave(&iommu->register_lock, flag); | 1243 | spin_lock_irqsave(&iommu->register_lock, flag); |
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c index 98abf8b9129..834842aa5bb 100644 --- a/drivers/pci/htirq.c +++ b/drivers/pci/htirq.c | |||
| @@ -57,28 +57,22 @@ void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) | |||
| 57 | *msg = cfg->msg; | 57 | *msg = cfg->msg; |
| 58 | } | 58 | } |
| 59 | 59 | ||
| 60 | void mask_ht_irq(unsigned int irq) | 60 | void mask_ht_irq(struct irq_data *data) |
| 61 | { | 61 | { |
| 62 | struct ht_irq_cfg *cfg; | 62 | struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); |
| 63 | struct ht_irq_msg msg; | 63 | struct ht_irq_msg msg = cfg->msg; |
| 64 | |||
| 65 | cfg = get_irq_data(irq); | ||
| 66 | 64 | ||
| 67 | msg = cfg->msg; | ||
| 68 | msg.address_lo |= 1; | 65 | msg.address_lo |= 1; |
| 69 | write_ht_irq_msg(irq, &msg); | 66 | write_ht_irq_msg(data->irq, &msg); |
| 70 | } | 67 | } |
| 71 | 68 | ||
| 72 | void unmask_ht_irq(unsigned int irq) | 69 | void unmask_ht_irq(struct irq_data *data) |
| 73 | { | 70 | { |
| 74 | struct ht_irq_cfg *cfg; | 71 | struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); |
| 75 | struct ht_irq_msg msg; | 72 | struct ht_irq_msg msg = cfg->msg; |
| 76 | |||
| 77 | cfg = get_irq_data(irq); | ||
| 78 | 73 | ||
| 79 | msg = cfg->msg; | ||
| 80 | msg.address_lo &= ~1; | 74 | msg.address_lo &= ~1; |
| 81 | write_ht_irq_msg(irq, &msg); | 75 | write_ht_irq_msg(data->irq, &msg); |
| 82 | } | 76 | } |
| 83 | 77 | ||
| 84 | /** | 78 | /** |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index fd1d2867cdc..ec87cd66f3e 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
| @@ -46,109 +46,24 @@ static __init int setup_intremap(char *str) | |||
| 46 | } | 46 | } |
| 47 | early_param("intremap", setup_intremap); | 47 | early_param("intremap", setup_intremap); |
| 48 | 48 | ||
| 49 | struct irq_2_iommu { | ||
| 50 | struct intel_iommu *iommu; | ||
| 51 | u16 irte_index; | ||
| 52 | u16 sub_handle; | ||
| 53 | u8 irte_mask; | ||
| 54 | }; | ||
| 55 | |||
| 56 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
| 57 | static struct irq_2_iommu *get_one_free_irq_2_iommu(int node) | ||
| 58 | { | ||
| 59 | struct irq_2_iommu *iommu; | ||
| 60 | |||
| 61 | iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); | ||
| 62 | printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node); | ||
| 63 | |||
| 64 | return iommu; | ||
| 65 | } | ||
| 66 | |||
| 67 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | ||
| 68 | { | ||
| 69 | struct irq_desc *desc; | ||
| 70 | |||
| 71 | desc = irq_to_desc(irq); | ||
| 72 | |||
| 73 | if (WARN_ON_ONCE(!desc)) | ||
| 74 | return NULL; | ||
| 75 | |||
| 76 | return desc->irq_2_iommu; | ||
| 77 | } | ||
| 78 | |||
| 79 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | ||
| 80 | { | ||
| 81 | struct irq_desc *desc; | ||
| 82 | struct irq_2_iommu *irq_iommu; | ||
| 83 | |||
| 84 | desc = irq_to_desc(irq); | ||
| 85 | if (!desc) { | ||
| 86 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | ||
| 87 | return NULL; | ||
| 88 | } | ||
| 89 | |||
| 90 | irq_iommu = desc->irq_2_iommu; | ||
| 91 | |||
| 92 | if (!irq_iommu) | ||
| 93 | desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq)); | ||
| 94 | |||
| 95 | return desc->irq_2_iommu; | ||
| 96 | } | ||
| 97 | |||
| 98 | #else /* !CONFIG_SPARSE_IRQ */ | ||
| 99 | |||
| 100 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | ||
| 101 | |||
| 102 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | ||
| 103 | { | ||
| 104 | if (irq < nr_irqs) | ||
| 105 | return &irq_2_iommuX[irq]; | ||
| 106 | |||
| 107 | return NULL; | ||
| 108 | } | ||
| 109 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | ||
| 110 | { | ||
| 111 | return irq_2_iommu(irq); | ||
| 112 | } | ||
| 113 | #endif | ||
| 114 | |||
| 115 | static DEFINE_SPINLOCK(irq_2_ir_lock); | 49 | static DEFINE_SPINLOCK(irq_2_ir_lock); |
| 116 | 50 | ||
| 117 | static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) | 51 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
| 118 | { | ||
| 119 | struct irq_2_iommu *irq_iommu; | ||
| 120 | |||
| 121 | irq_iommu = irq_2_iommu(irq); | ||
| 122 | |||
| 123 | if (!irq_iommu) | ||
| 124 | return NULL; | ||
| 125 | |||
| 126 | if (!irq_iommu->iommu) | ||
| 127 | return NULL; | ||
| 128 | |||
| 129 | return irq_iommu; | ||
| 130 | } | ||
| 131 | |||
| 132 | int irq_remapped(int irq) | ||
| 133 | { | 52 | { |
| 134 | return valid_irq_2_iommu(irq) != NULL; | 53 | struct irq_cfg *cfg = get_irq_chip_data(irq); |
| 54 | return cfg ? &cfg->irq_2_iommu : NULL; | ||
| 135 | } | 55 | } |
| 136 | 56 | ||
| 137 | int get_irte(int irq, struct irte *entry) | 57 | int get_irte(int irq, struct irte *entry) |
| 138 | { | 58 | { |
| 139 | int index; | 59 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
| 140 | struct irq_2_iommu *irq_iommu; | ||
| 141 | unsigned long flags; | 60 | unsigned long flags; |
| 61 | int index; | ||
| 142 | 62 | ||
| 143 | if (!entry) | 63 | if (!entry || !irq_iommu) |
| 144 | return -1; | 64 | return -1; |
| 145 | 65 | ||
| 146 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 66 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
| 147 | irq_iommu = valid_irq_2_iommu(irq); | ||
| 148 | if (!irq_iommu) { | ||
| 149 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 150 | return -1; | ||
| 151 | } | ||
| 152 | 67 | ||
| 153 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 68 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
| 154 | *entry = *(irq_iommu->iommu->ir_table->base + index); | 69 | *entry = *(irq_iommu->iommu->ir_table->base + index); |
| @@ -160,20 +75,14 @@ int get_irte(int irq, struct irte *entry) | |||
| 160 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | 75 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) |
| 161 | { | 76 | { |
| 162 | struct ir_table *table = iommu->ir_table; | 77 | struct ir_table *table = iommu->ir_table; |
| 163 | struct irq_2_iommu *irq_iommu; | 78 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
| 164 | u16 index, start_index; | 79 | u16 index, start_index; |
| 165 | unsigned int mask = 0; | 80 | unsigned int mask = 0; |
| 166 | unsigned long flags; | 81 | unsigned long flags; |
| 167 | int i; | 82 | int i; |
| 168 | 83 | ||
| 169 | if (!count) | 84 | if (!count || !irq_iommu) |
| 170 | return -1; | ||
| 171 | |||
| 172 | #ifndef CONFIG_SPARSE_IRQ | ||
| 173 | /* protect irq_2_iommu_alloc later */ | ||
| 174 | if (irq >= nr_irqs) | ||
| 175 | return -1; | 85 | return -1; |
| 176 | #endif | ||
| 177 | 86 | ||
| 178 | /* | 87 | /* |
| 179 | * start the IRTE search from index 0. | 88 | * start the IRTE search from index 0. |
| @@ -214,13 +123,6 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
| 214 | for (i = index; i < index + count; i++) | 123 | for (i = index; i < index + count; i++) |
| 215 | table->base[i].present = 1; | 124 | table->base[i].present = 1; |
| 216 | 125 | ||
| 217 | irq_iommu = irq_2_iommu_alloc(irq); | ||
| 218 | if (!irq_iommu) { | ||
| 219 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 220 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | ||
| 221 | return -1; | ||
| 222 | } | ||
| 223 | |||
| 224 | irq_iommu->iommu = iommu; | 126 | irq_iommu->iommu = iommu; |
| 225 | irq_iommu->irte_index = index; | 127 | irq_iommu->irte_index = index; |
| 226 | irq_iommu->sub_handle = 0; | 128 | irq_iommu->sub_handle = 0; |
| @@ -244,17 +146,14 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) | |||
| 244 | 146 | ||
| 245 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) | 147 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) |
| 246 | { | 148 | { |
| 247 | int index; | 149 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
| 248 | struct irq_2_iommu *irq_iommu; | ||
| 249 | unsigned long flags; | 150 | unsigned long flags; |
| 151 | int index; | ||
| 250 | 152 | ||
| 251 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 153 | if (!irq_iommu) |
| 252 | irq_iommu = valid_irq_2_iommu(irq); | ||
| 253 | if (!irq_iommu) { | ||
| 254 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 255 | return -1; | 154 | return -1; |
| 256 | } | ||
| 257 | 155 | ||
| 156 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
| 258 | *sub_handle = irq_iommu->sub_handle; | 157 | *sub_handle = irq_iommu->sub_handle; |
| 259 | index = irq_iommu->irte_index; | 158 | index = irq_iommu->irte_index; |
| 260 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 159 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
| @@ -263,18 +162,13 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle) | |||
| 263 | 162 | ||
| 264 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | 163 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) |
| 265 | { | 164 | { |
| 266 | struct irq_2_iommu *irq_iommu; | 165 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
| 267 | unsigned long flags; | 166 | unsigned long flags; |
| 268 | 167 | ||
| 269 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 168 | if (!irq_iommu) |
| 270 | |||
| 271 | irq_iommu = irq_2_iommu_alloc(irq); | ||
| 272 | |||
| 273 | if (!irq_iommu) { | ||
| 274 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 275 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | ||
| 276 | return -1; | 169 | return -1; |
| 277 | } | 170 | |
| 171 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
| 278 | 172 | ||
| 279 | irq_iommu->iommu = iommu; | 173 | irq_iommu->iommu = iommu; |
| 280 | irq_iommu->irte_index = index; | 174 | irq_iommu->irte_index = index; |
| @@ -286,43 +180,18 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |||
| 286 | return 0; | 180 | return 0; |
| 287 | } | 181 | } |
| 288 | 182 | ||
| 289 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) | ||
| 290 | { | ||
| 291 | struct irq_2_iommu *irq_iommu; | ||
| 292 | unsigned long flags; | ||
| 293 | |||
| 294 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
| 295 | irq_iommu = valid_irq_2_iommu(irq); | ||
| 296 | if (!irq_iommu) { | ||
| 297 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 298 | return -1; | ||
| 299 | } | ||
| 300 | |||
| 301 | irq_iommu->iommu = NULL; | ||
| 302 | irq_iommu->irte_index = 0; | ||
| 303 | irq_iommu->sub_handle = 0; | ||
| 304 | irq_2_iommu(irq)->irte_mask = 0; | ||
| 305 | |||
| 306 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 307 | |||
| 308 | return 0; | ||
| 309 | } | ||
| 310 | |||
| 311 | int modify_irte(int irq, struct irte *irte_modified) | 183 | int modify_irte(int irq, struct irte *irte_modified) |
| 312 | { | 184 | { |
| 313 | int rc; | 185 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
| 314 | int index; | ||
| 315 | struct irte *irte; | ||
| 316 | struct intel_iommu *iommu; | 186 | struct intel_iommu *iommu; |
| 317 | struct irq_2_iommu *irq_iommu; | ||
| 318 | unsigned long flags; | 187 | unsigned long flags; |
| 188 | struct irte *irte; | ||
| 189 | int rc, index; | ||
| 319 | 190 | ||
| 320 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 191 | if (!irq_iommu) |
| 321 | irq_iommu = valid_irq_2_iommu(irq); | ||
| 322 | if (!irq_iommu) { | ||
| 323 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 324 | return -1; | 192 | return -1; |
| 325 | } | 193 | |
| 194 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
| 326 | 195 | ||
| 327 | iommu = irq_iommu->iommu; | 196 | iommu = irq_iommu->iommu; |
| 328 | 197 | ||
| @@ -339,31 +208,6 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
| 339 | return rc; | 208 | return rc; |
| 340 | } | 209 | } |
| 341 | 210 | ||
| 342 | int flush_irte(int irq) | ||
| 343 | { | ||
| 344 | int rc; | ||
| 345 | int index; | ||
| 346 | struct intel_iommu *iommu; | ||
| 347 | struct irq_2_iommu *irq_iommu; | ||
| 348 | unsigned long flags; | ||
| 349 | |||
| 350 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
| 351 | irq_iommu = valid_irq_2_iommu(irq); | ||
| 352 | if (!irq_iommu) { | ||
| 353 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 354 | return -1; | ||
| 355 | } | ||
| 356 | |||
| 357 | iommu = irq_iommu->iommu; | ||
| 358 | |||
| 359 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | ||
| 360 | |||
| 361 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); | ||
| 362 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 363 | |||
| 364 | return rc; | ||
| 365 | } | ||
| 366 | |||
| 367 | struct intel_iommu *map_hpet_to_ir(u8 hpet_id) | 211 | struct intel_iommu *map_hpet_to_ir(u8 hpet_id) |
| 368 | { | 212 | { |
| 369 | int i; | 213 | int i; |
| @@ -420,16 +264,14 @@ static int clear_entries(struct irq_2_iommu *irq_iommu) | |||
| 420 | 264 | ||
| 421 | int free_irte(int irq) | 265 | int free_irte(int irq) |
| 422 | { | 266 | { |
| 423 | int rc = 0; | 267 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
| 424 | struct irq_2_iommu *irq_iommu; | ||
| 425 | unsigned long flags; | 268 | unsigned long flags; |
| 269 | int rc; | ||
| 426 | 270 | ||
| 427 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 271 | if (!irq_iommu) |
| 428 | irq_iommu = valid_irq_2_iommu(irq); | ||
| 429 | if (!irq_iommu) { | ||
| 430 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | ||
| 431 | return -1; | 272 | return -1; |
| 432 | } | 273 | |
| 274 | spin_lock_irqsave(&irq_2_ir_lock, flags); | ||
| 433 | 275 | ||
| 434 | rc = clear_entries(irq_iommu); | 276 | rc = clear_entries(irq_iommu); |
| 435 | 277 | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 69b7be33b3a..5fcf5aec680 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
| @@ -170,33 +170,31 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag) | |||
| 170 | desc->masked = __msix_mask_irq(desc, flag); | 170 | desc->masked = __msix_mask_irq(desc, flag); |
| 171 | } | 171 | } |
| 172 | 172 | ||
| 173 | static void msi_set_mask_bit(unsigned irq, u32 flag) | 173 | static void msi_set_mask_bit(struct irq_data *data, u32 flag) |
| 174 | { | 174 | { |
| 175 | struct msi_desc *desc = get_irq_msi(irq); | 175 | struct msi_desc *desc = irq_data_get_msi(data); |
| 176 | 176 | ||
| 177 | if (desc->msi_attrib.is_msix) { | 177 | if (desc->msi_attrib.is_msix) { |
| 178 | msix_mask_irq(desc, flag); | 178 | msix_mask_irq(desc, flag); |
| 179 | readl(desc->mask_base); /* Flush write to device */ | 179 | readl(desc->mask_base); /* Flush write to device */ |
| 180 | } else { | 180 | } else { |
| 181 | unsigned offset = irq - desc->dev->irq; | 181 | unsigned offset = data->irq - desc->dev->irq; |
| 182 | msi_mask_irq(desc, 1 << offset, flag << offset); | 182 | msi_mask_irq(desc, 1 << offset, flag << offset); |
| 183 | } | 183 | } |
| 184 | } | 184 | } |
| 185 | 185 | ||
| 186 | void mask_msi_irq(unsigned int irq) | 186 | void mask_msi_irq(struct irq_data *data) |
| 187 | { | 187 | { |
| 188 | msi_set_mask_bit(irq, 1); | 188 | msi_set_mask_bit(data, 1); |
| 189 | } | 189 | } |
| 190 | 190 | ||
| 191 | void unmask_msi_irq(unsigned int irq) | 191 | void unmask_msi_irq(struct irq_data *data) |
| 192 | { | 192 | { |
| 193 | msi_set_mask_bit(irq, 0); | 193 | msi_set_mask_bit(data, 0); |
| 194 | } | 194 | } |
| 195 | 195 | ||
| 196 | void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | 196 | void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
| 197 | { | 197 | { |
| 198 | struct msi_desc *entry = get_irq_desc_msi(desc); | ||
| 199 | |||
| 200 | BUG_ON(entry->dev->current_state != PCI_D0); | 198 | BUG_ON(entry->dev->current_state != PCI_D0); |
| 201 | 199 | ||
| 202 | if (entry->msi_attrib.is_msix) { | 200 | if (entry->msi_attrib.is_msix) { |
| @@ -227,15 +225,13 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | |||
| 227 | 225 | ||
| 228 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) | 226 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) |
| 229 | { | 227 | { |
| 230 | struct irq_desc *desc = irq_to_desc(irq); | 228 | struct msi_desc *entry = get_irq_msi(irq); |
| 231 | 229 | ||
| 232 | read_msi_msg_desc(desc, msg); | 230 | __read_msi_msg(entry, msg); |
| 233 | } | 231 | } |
| 234 | 232 | ||
| 235 | void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | 233 | void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
| 236 | { | 234 | { |
| 237 | struct msi_desc *entry = get_irq_desc_msi(desc); | ||
| 238 | |||
| 239 | /* Assert that the cache is valid, assuming that | 235 | /* Assert that the cache is valid, assuming that |
| 240 | * valid messages are not all-zeroes. */ | 236 | * valid messages are not all-zeroes. */ |
| 241 | BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | | 237 | BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | |
| @@ -246,15 +242,13 @@ void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | |||
| 246 | 242 | ||
| 247 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) | 243 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) |
| 248 | { | 244 | { |
| 249 | struct irq_desc *desc = irq_to_desc(irq); | 245 | struct msi_desc *entry = get_irq_msi(irq); |
| 250 | 246 | ||
| 251 | get_cached_msi_msg_desc(desc, msg); | 247 | __get_cached_msi_msg(entry, msg); |
| 252 | } | 248 | } |
| 253 | 249 | ||
| 254 | void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | 250 | void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
| 255 | { | 251 | { |
| 256 | struct msi_desc *entry = get_irq_desc_msi(desc); | ||
| 257 | |||
| 258 | if (entry->dev->current_state != PCI_D0) { | 252 | if (entry->dev->current_state != PCI_D0) { |
| 259 | /* Don't touch the hardware now */ | 253 | /* Don't touch the hardware now */ |
| 260 | } else if (entry->msi_attrib.is_msix) { | 254 | } else if (entry->msi_attrib.is_msix) { |
| @@ -292,9 +286,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | |||
| 292 | 286 | ||
| 293 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) | 287 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) |
| 294 | { | 288 | { |
| 295 | struct irq_desc *desc = irq_to_desc(irq); | 289 | struct msi_desc *entry = get_irq_msi(irq); |
| 296 | 290 | ||
| 297 | write_msi_msg_desc(desc, msg); | 291 | __write_msi_msg(entry, msg); |
| 298 | } | 292 | } |
| 299 | 293 | ||
| 300 | static void free_msi_irqs(struct pci_dev *dev) | 294 | static void free_msi_irqs(struct pci_dev *dev) |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 13365ba3521..7d24b0d94ed 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
| @@ -338,30 +338,29 @@ static void unmask_evtchn(int port) | |||
| 338 | 338 | ||
| 339 | static int find_unbound_irq(void) | 339 | static int find_unbound_irq(void) |
| 340 | { | 340 | { |
| 341 | int irq; | 341 | struct irq_data *data; |
| 342 | struct irq_desc *desc; | 342 | int irq, res; |
| 343 | 343 | ||
| 344 | for (irq = 0; irq < nr_irqs; irq++) { | 344 | for (irq = 0; irq < nr_irqs; irq++) { |
| 345 | desc = irq_to_desc(irq); | 345 | data = irq_get_irq_data(irq); |
| 346 | /* only 0->15 have init'd desc; handle irq > 16 */ | 346 | /* only 0->15 have init'd desc; handle irq > 16 */ |
| 347 | if (desc == NULL) | 347 | if (!data) |
| 348 | break; | 348 | break; |
| 349 | if (desc->chip == &no_irq_chip) | 349 | if (data->chip == &no_irq_chip) |
| 350 | break; | 350 | break; |
| 351 | if (desc->chip != &xen_dynamic_chip) | 351 | if (data->chip != &xen_dynamic_chip) |
| 352 | continue; | 352 | continue; |
| 353 | if (irq_info[irq].type == IRQT_UNBOUND) | 353 | if (irq_info[irq].type == IRQT_UNBOUND) |
| 354 | break; | 354 | return irq; |
| 355 | } | 355 | } |
| 356 | 356 | ||
| 357 | if (irq == nr_irqs) | 357 | if (irq == nr_irqs) |
| 358 | panic("No available IRQ to bind to: increase nr_irqs!\n"); | 358 | panic("No available IRQ to bind to: increase nr_irqs!\n"); |
| 359 | 359 | ||
| 360 | desc = irq_to_desc_alloc_node(irq, 0); | 360 | res = irq_alloc_desc_at(irq, 0); |
| 361 | if (WARN_ON(desc == NULL)) | ||
| 362 | return -1; | ||
| 363 | 361 | ||
| 364 | dynamic_irq_init_keep_chip_data(irq); | 362 | if (WARN_ON(res != irq)) |
| 363 | return -1; | ||
| 365 | 364 | ||
| 366 | return irq; | 365 | return irq; |
| 367 | } | 366 | } |
| @@ -495,7 +494,7 @@ static void unbind_from_irq(unsigned int irq) | |||
| 495 | if (irq_info[irq].type != IRQT_UNBOUND) { | 494 | if (irq_info[irq].type != IRQT_UNBOUND) { |
| 496 | irq_info[irq] = mk_unbound_info(); | 495 | irq_info[irq] = mk_unbound_info(); |
| 497 | 496 | ||
| 498 | dynamic_irq_cleanup(irq); | 497 | irq_free_desc(irq); |
| 499 | } | 498 | } |
| 500 | 499 | ||
| 501 | spin_unlock(&irq_mapping_update_lock); | 500 | spin_unlock(&irq_mapping_update_lock); |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index d7cecc90ed3..51651b76d40 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
| @@ -106,6 +106,7 @@ struct irte { | |||
| 106 | __u64 high; | 106 | __u64 high; |
| 107 | }; | 107 | }; |
| 108 | }; | 108 | }; |
| 109 | |||
| 109 | #ifdef CONFIG_INTR_REMAP | 110 | #ifdef CONFIG_INTR_REMAP |
| 110 | extern int intr_remapping_enabled; | 111 | extern int intr_remapping_enabled; |
| 111 | extern int intr_remapping_supported(void); | 112 | extern int intr_remapping_supported(void); |
| @@ -119,11 +120,8 @@ extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); | |||
| 119 | extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, | 120 | extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, |
| 120 | u16 sub_handle); | 121 | u16 sub_handle); |
| 121 | extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); | 122 | extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); |
| 122 | extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index); | ||
| 123 | extern int flush_irte(int irq); | ||
| 124 | extern int free_irte(int irq); | 123 | extern int free_irte(int irq); |
| 125 | 124 | ||
| 126 | extern int irq_remapped(int irq); | ||
| 127 | extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); | 125 | extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); |
| 128 | extern struct intel_iommu *map_ioapic_to_ir(int apic); | 126 | extern struct intel_iommu *map_ioapic_to_ir(int apic); |
| 129 | extern struct intel_iommu *map_hpet_to_ir(u8 id); | 127 | extern struct intel_iommu *map_hpet_to_ir(u8 id); |
| @@ -177,7 +175,6 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) | |||
| 177 | return 0; | 175 | return 0; |
| 178 | } | 176 | } |
| 179 | 177 | ||
| 180 | #define irq_remapped(irq) (0) | ||
| 181 | #define enable_intr_remapping(mode) (-1) | 178 | #define enable_intr_remapping(mode) (-1) |
| 182 | #define disable_intr_remapping() (0) | 179 | #define disable_intr_remapping() (0) |
| 183 | #define reenable_intr_remapping(mode) (0) | 180 | #define reenable_intr_remapping(mode) (0) |
| @@ -187,8 +184,9 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) | |||
| 187 | /* Can't use the common MSI interrupt functions | 184 | /* Can't use the common MSI interrupt functions |
| 188 | * since DMAR is not a pci device | 185 | * since DMAR is not a pci device |
| 189 | */ | 186 | */ |
| 190 | extern void dmar_msi_unmask(unsigned int irq); | 187 | struct irq_data; |
| 191 | extern void dmar_msi_mask(unsigned int irq); | 188 | extern void dmar_msi_unmask(struct irq_data *data); |
| 189 | extern void dmar_msi_mask(struct irq_data *data); | ||
| 192 | extern void dmar_msi_read(int irq, struct msi_msg *msg); | 190 | extern void dmar_msi_read(int irq, struct msi_msg *msg); |
| 193 | extern void dmar_msi_write(int irq, struct msi_msg *msg); | 191 | extern void dmar_msi_write(int irq, struct msi_msg *msg); |
| 194 | extern int dmar_set_interrupt(struct intel_iommu *iommu); | 192 | extern int dmar_set_interrupt(struct intel_iommu *iommu); |
diff --git a/include/linux/htirq.h b/include/linux/htirq.h index c96ea46737d..70a1dbbf209 100644 --- a/include/linux/htirq.h +++ b/include/linux/htirq.h | |||
| @@ -9,8 +9,9 @@ struct ht_irq_msg { | |||
| 9 | /* Helper functions.. */ | 9 | /* Helper functions.. */ |
| 10 | void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); | 10 | void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); |
| 11 | void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); | 11 | void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); |
| 12 | void mask_ht_irq(unsigned int irq); | 12 | struct irq_data; |
| 13 | void unmask_ht_irq(unsigned int irq); | 13 | void mask_ht_irq(struct irq_data *data); |
| 14 | void unmask_ht_irq(struct irq_data *data); | ||
| 14 | 15 | ||
| 15 | /* The arch hook for getting things started */ | 16 | /* The arch hook for getting things started */ |
| 16 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev); | 17 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev); |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 531495db170..414328577ce 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -647,11 +647,8 @@ static inline void init_irq_proc(void) | |||
| 647 | struct seq_file; | 647 | struct seq_file; |
| 648 | int show_interrupts(struct seq_file *p, void *v); | 648 | int show_interrupts(struct seq_file *p, void *v); |
| 649 | 649 | ||
| 650 | struct irq_desc; | ||
| 651 | |||
| 652 | extern int early_irq_init(void); | 650 | extern int early_irq_init(void); |
| 653 | extern int arch_probe_nr_irqs(void); | 651 | extern int arch_probe_nr_irqs(void); |
| 654 | extern int arch_early_irq_init(void); | 652 | extern int arch_early_irq_init(void); |
| 655 | extern int arch_init_chip_data(struct irq_desc *desc, int node); | ||
| 656 | 653 | ||
| 657 | #endif | 654 | #endif |
diff --git a/include/linux/irq.h b/include/linux/irq.h index c03243ad84b..e9639115dff 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -72,6 +72,10 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
| 72 | #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ | 72 | #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ |
| 73 | #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ | 73 | #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ |
| 74 | 74 | ||
| 75 | #define IRQF_MODIFY_MASK \ | ||
| 76 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ | ||
| 77 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL) | ||
| 78 | |||
| 75 | #ifdef CONFIG_IRQ_PER_CPU | 79 | #ifdef CONFIG_IRQ_PER_CPU |
| 76 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | 80 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) |
| 77 | # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) | 81 | # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
| @@ -80,36 +84,77 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
| 80 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING | 84 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING |
| 81 | #endif | 85 | #endif |
| 82 | 86 | ||
| 83 | struct proc_dir_entry; | ||
| 84 | struct msi_desc; | 87 | struct msi_desc; |
| 85 | 88 | ||
| 86 | /** | 89 | /** |
| 90 | * struct irq_data - per irq and irq chip data passed down to chip functions | ||
| 91 | * @irq: interrupt number | ||
| 92 | * @node: node index useful for balancing | ||
| 93 | * @chip: low level interrupt hardware access | ||
| 94 | * @handler_data: per-IRQ data for the irq_chip methods | ||
| 95 | * @chip_data: platform-specific per-chip private data for the chip | ||
| 96 | * methods, to allow shared chip implementations | ||
| 97 | * @msi_desc: MSI descriptor | ||
| 98 | * @affinity: IRQ affinity on SMP | ||
| 99 | * | ||
| 100 | * The fields here need to overlay the ones in irq_desc until we | ||
| 101 | * cleaned up the direct references and switched everything over to | ||
| 102 | * irq_data. | ||
| 103 | */ | ||
| 104 | struct irq_data { | ||
| 105 | unsigned int irq; | ||
| 106 | unsigned int node; | ||
| 107 | struct irq_chip *chip; | ||
| 108 | void *handler_data; | ||
| 109 | void *chip_data; | ||
| 110 | struct msi_desc *msi_desc; | ||
| 111 | #ifdef CONFIG_SMP | ||
| 112 | cpumask_var_t affinity; | ||
| 113 | #endif | ||
| 114 | }; | ||
| 115 | |||
| 116 | /** | ||
| 87 | * struct irq_chip - hardware interrupt chip descriptor | 117 | * struct irq_chip - hardware interrupt chip descriptor |
| 88 | * | 118 | * |
| 89 | * @name: name for /proc/interrupts | 119 | * @name: name for /proc/interrupts |
| 90 | * @startup: start up the interrupt (defaults to ->enable if NULL) | 120 | * @startup: deprecated, replaced by irq_startup |
| 91 | * @shutdown: shut down the interrupt (defaults to ->disable if NULL) | 121 | * @shutdown: deprecated, replaced by irq_shutdown |
| 92 | * @enable: enable the interrupt (defaults to chip->unmask if NULL) | 122 | * @enable: deprecated, replaced by irq_enable |
| 93 | * @disable: disable the interrupt | 123 | * @disable: deprecated, replaced by irq_disable |
| 94 | * @ack: start of a new interrupt | 124 | * @ack: deprecated, replaced by irq_ack |
| 95 | * @mask: mask an interrupt source | 125 | * @mask: deprecated, replaced by irq_mask |
| 96 | * @mask_ack: ack and mask an interrupt source | 126 | * @mask_ack: deprecated, replaced by irq_mask_ack |
| 97 | * @unmask: unmask an interrupt source | 127 | * @unmask: deprecated, replaced by irq_unmask |
| 98 | * @eoi: end of interrupt - chip level | 128 | * @eoi: deprecated, replaced by irq_eoi |
| 99 | * @end: end of interrupt - flow level | 129 | * @end: deprecated, will go away with __do_IRQ() |
| 100 | * @set_affinity: set the CPU affinity on SMP machines | 130 | * @set_affinity: deprecated, replaced by irq_set_affinity |
| 101 | * @retrigger: resend an IRQ to the CPU | 131 | * @retrigger: deprecated, replaced by irq_retrigger |
| 102 | * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ | 132 | * @set_type: deprecated, replaced by irq_set_type |
| 103 | * @set_wake: enable/disable power-management wake-on of an IRQ | 133 | * @set_wake: deprecated, replaced by irq_wake |
| 134 | * @bus_lock: deprecated, replaced by irq_bus_lock | ||
| 135 | * @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock | ||
| 104 | * | 136 | * |
| 105 | * @bus_lock: function to lock access to slow bus (i2c) chips | 137 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) |
| 106 | * @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips | 138 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) |
| 139 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) | ||
| 140 | * @irq_disable: disable the interrupt | ||
| 141 | * @irq_ack: start of a new interrupt | ||
| 142 | * @irq_mask: mask an interrupt source | ||
| 143 | * @irq_mask_ack: ack and mask an interrupt source | ||
| 144 | * @irq_unmask: unmask an interrupt source | ||
| 145 | * @irq_eoi: end of interrupt | ||
| 146 | * @irq_set_affinity: set the CPU affinity on SMP machines | ||
| 147 | * @irq_retrigger: resend an IRQ to the CPU | ||
| 148 | * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ | ||
| 149 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ | ||
| 150 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips | ||
| 151 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips | ||
| 107 | * | 152 | * |
| 108 | * @release: release function solely used by UML | 153 | * @release: release function solely used by UML |
| 109 | * @typename: obsoleted by name, kept as migration helper | ||
| 110 | */ | 154 | */ |
| 111 | struct irq_chip { | 155 | struct irq_chip { |
| 112 | const char *name; | 156 | const char *name; |
| 157 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
| 113 | unsigned int (*startup)(unsigned int irq); | 158 | unsigned int (*startup)(unsigned int irq); |
| 114 | void (*shutdown)(unsigned int irq); | 159 | void (*shutdown)(unsigned int irq); |
| 115 | void (*enable)(unsigned int irq); | 160 | void (*enable)(unsigned int irq); |
| @@ -130,154 +175,66 @@ struct irq_chip { | |||
| 130 | 175 | ||
| 131 | void (*bus_lock)(unsigned int irq); | 176 | void (*bus_lock)(unsigned int irq); |
| 132 | void (*bus_sync_unlock)(unsigned int irq); | 177 | void (*bus_sync_unlock)(unsigned int irq); |
| 178 | #endif | ||
| 179 | unsigned int (*irq_startup)(struct irq_data *data); | ||
| 180 | void (*irq_shutdown)(struct irq_data *data); | ||
| 181 | void (*irq_enable)(struct irq_data *data); | ||
| 182 | void (*irq_disable)(struct irq_data *data); | ||
| 183 | |||
| 184 | void (*irq_ack)(struct irq_data *data); | ||
| 185 | void (*irq_mask)(struct irq_data *data); | ||
| 186 | void (*irq_mask_ack)(struct irq_data *data); | ||
| 187 | void (*irq_unmask)(struct irq_data *data); | ||
| 188 | void (*irq_eoi)(struct irq_data *data); | ||
| 189 | |||
| 190 | int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); | ||
| 191 | int (*irq_retrigger)(struct irq_data *data); | ||
| 192 | int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); | ||
| 193 | int (*irq_set_wake)(struct irq_data *data, unsigned int on); | ||
| 194 | |||
| 195 | void (*irq_bus_lock)(struct irq_data *data); | ||
| 196 | void (*irq_bus_sync_unlock)(struct irq_data *data); | ||
| 133 | 197 | ||
| 134 | /* Currently used only by UML, might disappear one day.*/ | 198 | /* Currently used only by UML, might disappear one day.*/ |
| 135 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 199 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
| 136 | void (*release)(unsigned int irq, void *dev_id); | 200 | void (*release)(unsigned int irq, void *dev_id); |
| 137 | #endif | 201 | #endif |
| 138 | /* | ||
| 139 | * For compatibility, ->typename is copied into ->name. | ||
| 140 | * Will disappear. | ||
| 141 | */ | ||
| 142 | const char *typename; | ||
| 143 | }; | 202 | }; |
| 144 | 203 | ||
| 145 | struct timer_rand_state; | 204 | /* This include will go away once we isolated irq_desc usage to core code */ |
| 146 | struct irq_2_iommu; | 205 | #include <linux/irqdesc.h> |
| 147 | /** | ||
| 148 | * struct irq_desc - interrupt descriptor | ||
| 149 | * @irq: interrupt number for this descriptor | ||
| 150 | * @timer_rand_state: pointer to timer rand state struct | ||
| 151 | * @kstat_irqs: irq stats per cpu | ||
| 152 | * @irq_2_iommu: iommu with this irq | ||
| 153 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | ||
| 154 | * @chip: low level interrupt hardware access | ||
| 155 | * @msi_desc: MSI descriptor | ||
| 156 | * @handler_data: per-IRQ data for the irq_chip methods | ||
| 157 | * @chip_data: platform-specific per-chip private data for the chip | ||
| 158 | * methods, to allow shared chip implementations | ||
| 159 | * @action: the irq action chain | ||
| 160 | * @status: status information | ||
| 161 | * @depth: disable-depth, for nested irq_disable() calls | ||
| 162 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | ||
| 163 | * @irq_count: stats field to detect stalled irqs | ||
| 164 | * @last_unhandled: aging timer for unhandled count | ||
| 165 | * @irqs_unhandled: stats field for spurious unhandled interrupts | ||
| 166 | * @lock: locking for SMP | ||
| 167 | * @affinity: IRQ affinity on SMP | ||
| 168 | * @node: node index useful for balancing | ||
| 169 | * @pending_mask: pending rebalanced interrupts | ||
| 170 | * @threads_active: number of irqaction threads currently running | ||
| 171 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | ||
| 172 | * @dir: /proc/irq/ procfs entry | ||
| 173 | * @name: flow handler name for /proc/interrupts output | ||
| 174 | */ | ||
| 175 | struct irq_desc { | ||
| 176 | unsigned int irq; | ||
| 177 | struct timer_rand_state *timer_rand_state; | ||
| 178 | unsigned int *kstat_irqs; | ||
| 179 | #ifdef CONFIG_INTR_REMAP | ||
| 180 | struct irq_2_iommu *irq_2_iommu; | ||
| 181 | #endif | ||
| 182 | irq_flow_handler_t handle_irq; | ||
| 183 | struct irq_chip *chip; | ||
| 184 | struct msi_desc *msi_desc; | ||
| 185 | void *handler_data; | ||
| 186 | void *chip_data; | ||
| 187 | struct irqaction *action; /* IRQ action list */ | ||
| 188 | unsigned int status; /* IRQ status */ | ||
| 189 | |||
| 190 | unsigned int depth; /* nested irq disables */ | ||
| 191 | unsigned int wake_depth; /* nested wake enables */ | ||
| 192 | unsigned int irq_count; /* For detecting broken IRQs */ | ||
| 193 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | ||
| 194 | unsigned int irqs_unhandled; | ||
| 195 | raw_spinlock_t lock; | ||
| 196 | #ifdef CONFIG_SMP | ||
| 197 | cpumask_var_t affinity; | ||
| 198 | const struct cpumask *affinity_hint; | ||
| 199 | unsigned int node; | ||
| 200 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
| 201 | cpumask_var_t pending_mask; | ||
| 202 | #endif | ||
| 203 | #endif | ||
| 204 | atomic_t threads_active; | ||
| 205 | wait_queue_head_t wait_for_threads; | ||
| 206 | #ifdef CONFIG_PROC_FS | ||
| 207 | struct proc_dir_entry *dir; | ||
| 208 | #endif | ||
| 209 | const char *name; | ||
| 210 | } ____cacheline_internodealigned_in_smp; | ||
| 211 | 206 | ||
| 212 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, | 207 | /* |
| 213 | struct irq_desc *desc, int node); | 208 | * Pick up the arch-dependent methods: |
| 214 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); | 209 | */ |
| 210 | #include <asm/hw_irq.h> | ||
| 215 | 211 | ||
| 216 | #ifndef CONFIG_SPARSE_IRQ | 212 | #ifndef NR_IRQS_LEGACY |
| 217 | extern struct irq_desc irq_desc[NR_IRQS]; | 213 | # define NR_IRQS_LEGACY 0 |
| 218 | #endif | 214 | #endif |
| 219 | 215 | ||
| 220 | #ifdef CONFIG_NUMA_IRQ_DESC | 216 | #ifndef ARCH_IRQ_INIT_FLAGS |
| 221 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); | 217 | # define ARCH_IRQ_INIT_FLAGS 0 |
| 222 | #else | ||
| 223 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | ||
| 224 | { | ||
| 225 | return desc; | ||
| 226 | } | ||
| 227 | #endif | 218 | #endif |
| 228 | 219 | ||
| 229 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | 220 | #define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS) |
| 230 | |||
| 231 | /* | ||
| 232 | * Pick up the arch-dependent methods: | ||
| 233 | */ | ||
| 234 | #include <asm/hw_irq.h> | ||
| 235 | 221 | ||
| 222 | struct irqaction; | ||
| 236 | extern int setup_irq(unsigned int irq, struct irqaction *new); | 223 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
| 237 | extern void remove_irq(unsigned int irq, struct irqaction *act); | 224 | extern void remove_irq(unsigned int irq, struct irqaction *act); |
| 238 | 225 | ||
| 239 | #ifdef CONFIG_GENERIC_HARDIRQS | 226 | #ifdef CONFIG_GENERIC_HARDIRQS |
| 240 | 227 | ||
| 241 | #ifdef CONFIG_SMP | 228 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) |
| 242 | |||
| 243 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
| 244 | |||
| 245 | void move_native_irq(int irq); | 229 | void move_native_irq(int irq); |
| 246 | void move_masked_irq(int irq); | 230 | void move_masked_irq(int irq); |
| 247 | 231 | #else | |
| 248 | #else /* CONFIG_GENERIC_PENDING_IRQ */ | 232 | static inline void move_native_irq(int irq) { } |
| 249 | 233 | static inline void move_masked_irq(int irq) { } | |
| 250 | static inline void move_irq(int irq) | 234 | #endif |
| 251 | { | ||
| 252 | } | ||
| 253 | |||
| 254 | static inline void move_native_irq(int irq) | ||
| 255 | { | ||
| 256 | } | ||
| 257 | |||
| 258 | static inline void move_masked_irq(int irq) | ||
| 259 | { | ||
| 260 | } | ||
| 261 | |||
| 262 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ | ||
| 263 | |||
| 264 | #else /* CONFIG_SMP */ | ||
| 265 | |||
| 266 | #define move_native_irq(x) | ||
| 267 | #define move_masked_irq(x) | ||
| 268 | |||
| 269 | #endif /* CONFIG_SMP */ | ||
| 270 | 235 | ||
| 271 | extern int no_irq_affinity; | 236 | extern int no_irq_affinity; |
| 272 | 237 | ||
| 273 | static inline int irq_balancing_disabled(unsigned int irq) | ||
| 274 | { | ||
| 275 | struct irq_desc *desc; | ||
| 276 | |||
| 277 | desc = irq_to_desc(irq); | ||
| 278 | return desc->status & IRQ_NO_BALANCING_MASK; | ||
| 279 | } | ||
| 280 | |||
| 281 | /* Handle irq action chains: */ | 238 | /* Handle irq action chains: */ |
| 282 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); | 239 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); |
| 283 | 240 | ||
| @@ -293,42 +250,10 @@ extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | |||
| 293 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 250 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
| 294 | extern void handle_nested_irq(unsigned int irq); | 251 | extern void handle_nested_irq(unsigned int irq); |
| 295 | 252 | ||
| 296 | /* | ||
| 297 | * Monolithic do_IRQ implementation. | ||
| 298 | */ | ||
| 299 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
| 300 | extern unsigned int __do_IRQ(unsigned int irq); | ||
| 301 | #endif | ||
| 302 | |||
| 303 | /* | ||
| 304 | * Architectures call this to let the generic IRQ layer | ||
| 305 | * handle an interrupt. If the descriptor is attached to an | ||
| 306 | * irqchip-style controller then we call the ->handle_irq() handler, | ||
| 307 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | ||
| 308 | */ | ||
| 309 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
| 310 | { | ||
| 311 | #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
| 312 | desc->handle_irq(irq, desc); | ||
| 313 | #else | ||
| 314 | if (likely(desc->handle_irq)) | ||
| 315 | desc->handle_irq(irq, desc); | ||
| 316 | else | ||
| 317 | __do_IRQ(irq); | ||
| 318 | #endif | ||
| 319 | } | ||
| 320 | |||
| 321 | static inline void generic_handle_irq(unsigned int irq) | ||
| 322 | { | ||
| 323 | generic_handle_irq_desc(irq, irq_to_desc(irq)); | ||
| 324 | } | ||
| 325 | |||
| 326 | /* Handling of unhandled and spurious interrupts: */ | 253 | /* Handling of unhandled and spurious interrupts: */ |
| 327 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, | 254 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, |
| 328 | irqreturn_t action_ret); | 255 | irqreturn_t action_ret); |
| 329 | 256 | ||
| 330 | /* Resending of interrupts :*/ | ||
| 331 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | ||
| 332 | 257 | ||
| 333 | /* Enable/disable irq debugging output: */ | 258 | /* Enable/disable irq debugging output: */ |
| 334 | extern int noirqdebug_setup(char *str); | 259 | extern int noirqdebug_setup(char *str); |
| @@ -351,16 +276,6 @@ extern void | |||
| 351 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 276 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
| 352 | const char *name); | 277 | const char *name); |
| 353 | 278 | ||
| 354 | /* caller has locked the irq_desc and both params are valid */ | ||
| 355 | static inline void __set_irq_handler_unlocked(int irq, | ||
| 356 | irq_flow_handler_t handler) | ||
| 357 | { | ||
| 358 | struct irq_desc *desc; | ||
| 359 | |||
| 360 | desc = irq_to_desc(irq); | ||
| 361 | desc->handle_irq = handler; | ||
| 362 | } | ||
| 363 | |||
| 364 | /* | 279 | /* |
| 365 | * Set a highlevel flow handler for a given IRQ: | 280 | * Set a highlevel flow handler for a given IRQ: |
| 366 | */ | 281 | */ |
| @@ -384,141 +299,121 @@ set_irq_chained_handler(unsigned int irq, | |||
| 384 | 299 | ||
| 385 | extern void set_irq_nested_thread(unsigned int irq, int nest); | 300 | extern void set_irq_nested_thread(unsigned int irq, int nest); |
| 386 | 301 | ||
| 387 | extern void set_irq_noprobe(unsigned int irq); | 302 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); |
| 388 | extern void set_irq_probe(unsigned int irq); | 303 | |
| 304 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) | ||
| 305 | { | ||
| 306 | irq_modify_status(irq, 0, set); | ||
| 307 | } | ||
| 308 | |||
| 309 | static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) | ||
| 310 | { | ||
| 311 | irq_modify_status(irq, clr, 0); | ||
| 312 | } | ||
| 313 | |||
| 314 | static inline void set_irq_noprobe(unsigned int irq) | ||
| 315 | { | ||
| 316 | irq_modify_status(irq, 0, IRQ_NOPROBE); | ||
| 317 | } | ||
| 318 | |||
| 319 | static inline void set_irq_probe(unsigned int irq) | ||
| 320 | { | ||
| 321 | irq_modify_status(irq, IRQ_NOPROBE, 0); | ||
| 322 | } | ||
| 389 | 323 | ||
| 390 | /* Handle dynamic irq creation and destruction */ | 324 | /* Handle dynamic irq creation and destruction */ |
| 391 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); | 325 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); |
| 392 | extern int create_irq(void); | 326 | extern int create_irq(void); |
| 393 | extern void destroy_irq(unsigned int irq); | 327 | extern void destroy_irq(unsigned int irq); |
| 394 | 328 | ||
| 395 | /* Test to see if a driver has successfully requested an irq */ | 329 | /* |
| 396 | static inline int irq_has_action(unsigned int irq) | 330 | * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and |
| 331 | * irq_free_desc instead. | ||
| 332 | */ | ||
| 333 | extern void dynamic_irq_cleanup(unsigned int irq); | ||
| 334 | static inline void dynamic_irq_init(unsigned int irq) | ||
| 397 | { | 335 | { |
| 398 | struct irq_desc *desc = irq_to_desc(irq); | 336 | dynamic_irq_cleanup(irq); |
| 399 | return desc->action != NULL; | ||
| 400 | } | 337 | } |
| 401 | 338 | ||
| 402 | /* Dynamic irq helper functions */ | ||
| 403 | extern void dynamic_irq_init(unsigned int irq); | ||
| 404 | void dynamic_irq_init_keep_chip_data(unsigned int irq); | ||
| 405 | extern void dynamic_irq_cleanup(unsigned int irq); | ||
| 406 | void dynamic_irq_cleanup_keep_chip_data(unsigned int irq); | ||
| 407 | |||
| 408 | /* Set/get chip/data for an IRQ: */ | 339 | /* Set/get chip/data for an IRQ: */ |
| 409 | extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); | 340 | extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); |
| 410 | extern int set_irq_data(unsigned int irq, void *data); | 341 | extern int set_irq_data(unsigned int irq, void *data); |
| 411 | extern int set_irq_chip_data(unsigned int irq, void *data); | 342 | extern int set_irq_chip_data(unsigned int irq, void *data); |
| 412 | extern int set_irq_type(unsigned int irq, unsigned int type); | 343 | extern int set_irq_type(unsigned int irq, unsigned int type); |
| 413 | extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | 344 | extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); |
| 345 | extern struct irq_data *irq_get_irq_data(unsigned int irq); | ||
| 414 | 346 | ||
| 415 | #define get_irq_chip(irq) (irq_to_desc(irq)->chip) | 347 | static inline struct irq_chip *get_irq_chip(unsigned int irq) |
| 416 | #define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data) | ||
| 417 | #define get_irq_data(irq) (irq_to_desc(irq)->handler_data) | ||
| 418 | #define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) | ||
| 419 | |||
| 420 | #define get_irq_desc_chip(desc) ((desc)->chip) | ||
| 421 | #define get_irq_desc_chip_data(desc) ((desc)->chip_data) | ||
| 422 | #define get_irq_desc_data(desc) ((desc)->handler_data) | ||
| 423 | #define get_irq_desc_msi(desc) ((desc)->msi_desc) | ||
| 424 | |||
| 425 | #endif /* CONFIG_GENERIC_HARDIRQS */ | ||
| 426 | |||
| 427 | #endif /* !CONFIG_S390 */ | ||
| 428 | |||
| 429 | #ifdef CONFIG_SMP | ||
| 430 | /** | ||
| 431 | * alloc_desc_masks - allocate cpumasks for irq_desc | ||
| 432 | * @desc: pointer to irq_desc struct | ||
| 433 | * @node: node which will be handling the cpumasks | ||
| 434 | * @boot: true if need bootmem | ||
| 435 | * | ||
| 436 | * Allocates affinity and pending_mask cpumask if required. | ||
| 437 | * Returns true if successful (or not required). | ||
| 438 | */ | ||
| 439 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | ||
| 440 | bool boot) | ||
| 441 | { | 348 | { |
| 442 | gfp_t gfp = GFP_ATOMIC; | 349 | struct irq_data *d = irq_get_irq_data(irq); |
| 443 | 350 | return d ? d->chip : NULL; | |
| 444 | if (boot) | 351 | } |
| 445 | gfp = GFP_NOWAIT; | ||
| 446 | |||
| 447 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
| 448 | if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) | ||
| 449 | return false; | ||
| 450 | 352 | ||
| 451 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 353 | static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) |
| 452 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | 354 | { |
| 453 | free_cpumask_var(desc->affinity); | 355 | return d->chip; |
| 454 | return false; | ||
| 455 | } | ||
| 456 | #endif | ||
| 457 | #endif | ||
| 458 | return true; | ||
| 459 | } | 356 | } |
| 460 | 357 | ||
| 461 | static inline void init_desc_masks(struct irq_desc *desc) | 358 | static inline void *get_irq_chip_data(unsigned int irq) |
| 462 | { | 359 | { |
| 463 | cpumask_setall(desc->affinity); | 360 | struct irq_data *d = irq_get_irq_data(irq); |
| 464 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 361 | return d ? d->chip_data : NULL; |
| 465 | cpumask_clear(desc->pending_mask); | ||
| 466 | #endif | ||
| 467 | } | 362 | } |
| 468 | 363 | ||
| 469 | /** | 364 | static inline void *irq_data_get_irq_chip_data(struct irq_data *d) |
| 470 | * init_copy_desc_masks - copy cpumasks for irq_desc | 365 | { |
| 471 | * @old_desc: pointer to old irq_desc struct | 366 | return d->chip_data; |
| 472 | * @new_desc: pointer to new irq_desc struct | 367 | } |
| 473 | * | ||
| 474 | * Insures affinity and pending_masks are copied to new irq_desc. | ||
| 475 | * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the | ||
| 476 | * irq_desc struct so the copy is redundant. | ||
| 477 | */ | ||
| 478 | 368 | ||
| 479 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 369 | static inline void *get_irq_data(unsigned int irq) |
| 480 | struct irq_desc *new_desc) | ||
| 481 | { | 370 | { |
| 482 | #ifdef CONFIG_CPUMASK_OFFSTACK | 371 | struct irq_data *d = irq_get_irq_data(irq); |
| 483 | cpumask_copy(new_desc->affinity, old_desc->affinity); | 372 | return d ? d->handler_data : NULL; |
| 373 | } | ||
| 484 | 374 | ||
| 485 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 375 | static inline void *irq_data_get_irq_data(struct irq_data *d) |
| 486 | cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); | 376 | { |
| 487 | #endif | 377 | return d->handler_data; |
| 488 | #endif | ||
| 489 | } | 378 | } |
| 490 | 379 | ||
| 491 | static inline void free_desc_masks(struct irq_desc *old_desc, | 380 | static inline struct msi_desc *get_irq_msi(unsigned int irq) |
| 492 | struct irq_desc *new_desc) | ||
| 493 | { | 381 | { |
| 494 | free_cpumask_var(old_desc->affinity); | 382 | struct irq_data *d = irq_get_irq_data(irq); |
| 383 | return d ? d->msi_desc : NULL; | ||
| 384 | } | ||
| 495 | 385 | ||
| 496 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 386 | static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) |
| 497 | free_cpumask_var(old_desc->pending_mask); | 387 | { |
| 498 | #endif | 388 | return d->msi_desc; |
| 499 | } | 389 | } |
| 500 | 390 | ||
| 501 | #else /* !CONFIG_SMP */ | 391 | int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); |
| 392 | void irq_free_descs(unsigned int irq, unsigned int cnt); | ||
| 393 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); | ||
| 502 | 394 | ||
| 503 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | 395 | static inline int irq_alloc_desc(int node) |
| 504 | bool boot) | ||
| 505 | { | 396 | { |
| 506 | return true; | 397 | return irq_alloc_descs(-1, 0, 1, node); |
| 507 | } | 398 | } |
| 508 | 399 | ||
| 509 | static inline void init_desc_masks(struct irq_desc *desc) | 400 | static inline int irq_alloc_desc_at(unsigned int at, int node) |
| 510 | { | 401 | { |
| 402 | return irq_alloc_descs(at, at, 1, node); | ||
| 511 | } | 403 | } |
| 512 | 404 | ||
| 513 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 405 | static inline int irq_alloc_desc_from(unsigned int from, int node) |
| 514 | struct irq_desc *new_desc) | ||
| 515 | { | 406 | { |
| 407 | return irq_alloc_descs(-1, from, 1, node); | ||
| 516 | } | 408 | } |
| 517 | 409 | ||
| 518 | static inline void free_desc_masks(struct irq_desc *old_desc, | 410 | static inline void irq_free_desc(unsigned int irq) |
| 519 | struct irq_desc *new_desc) | ||
| 520 | { | 411 | { |
| 412 | irq_free_descs(irq, 1); | ||
| 521 | } | 413 | } |
| 522 | #endif /* CONFIG_SMP */ | 414 | |
| 415 | #endif /* CONFIG_GENERIC_HARDIRQS */ | ||
| 416 | |||
| 417 | #endif /* !CONFIG_S390 */ | ||
| 523 | 418 | ||
| 524 | #endif /* _LINUX_IRQ_H */ | 419 | #endif /* _LINUX_IRQ_H */ |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h new file mode 100644 index 00000000000..979c68cc745 --- /dev/null +++ b/include/linux/irqdesc.h | |||
| @@ -0,0 +1,159 @@ | |||
| 1 | #ifndef _LINUX_IRQDESC_H | ||
| 2 | #define _LINUX_IRQDESC_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Core internal functions to deal with irq descriptors | ||
| 6 | * | ||
| 7 | * This include will move to kernel/irq once we cleaned up the tree. | ||
| 8 | * For now it's included from <linux/irq.h> | ||
| 9 | */ | ||
| 10 | |||
| 11 | struct proc_dir_entry; | ||
| 12 | struct timer_rand_state; | ||
| 13 | /** | ||
| 14 | * struct irq_desc - interrupt descriptor | ||
| 15 | * @irq_data: per irq and chip data passed down to chip functions | ||
| 16 | * @timer_rand_state: pointer to timer rand state struct | ||
| 17 | * @kstat_irqs: irq stats per cpu | ||
| 18 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | ||
| 19 | * @action: the irq action chain | ||
| 20 | * @status: status information | ||
| 21 | * @depth: disable-depth, for nested irq_disable() calls | ||
| 22 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | ||
| 23 | * @irq_count: stats field to detect stalled irqs | ||
| 24 | * @last_unhandled: aging timer for unhandled count | ||
| 25 | * @irqs_unhandled: stats field for spurious unhandled interrupts | ||
| 26 | * @lock: locking for SMP | ||
| 27 | * @pending_mask: pending rebalanced interrupts | ||
| 28 | * @threads_active: number of irqaction threads currently running | ||
| 29 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | ||
| 30 | * @dir: /proc/irq/ procfs entry | ||
| 31 | * @name: flow handler name for /proc/interrupts output | ||
| 32 | */ | ||
| 33 | struct irq_desc { | ||
| 34 | |||
| 35 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
| 36 | struct irq_data irq_data; | ||
| 37 | #else | ||
| 38 | /* | ||
| 39 | * This union will go away, once we fixed the direct access to | ||
| 40 | * irq_desc all over the place. The direct fields are a 1:1 | ||
| 41 | * overlay of irq_data. | ||
| 42 | */ | ||
| 43 | union { | ||
| 44 | struct irq_data irq_data; | ||
| 45 | struct { | ||
| 46 | unsigned int irq; | ||
| 47 | unsigned int node; | ||
| 48 | struct irq_chip *chip; | ||
| 49 | void *handler_data; | ||
| 50 | void *chip_data; | ||
| 51 | struct msi_desc *msi_desc; | ||
| 52 | #ifdef CONFIG_SMP | ||
| 53 | cpumask_var_t affinity; | ||
| 54 | #endif | ||
| 55 | }; | ||
| 56 | }; | ||
| 57 | #endif | ||
| 58 | |||
| 59 | struct timer_rand_state *timer_rand_state; | ||
| 60 | unsigned int *kstat_irqs; | ||
| 61 | irq_flow_handler_t handle_irq; | ||
| 62 | struct irqaction *action; /* IRQ action list */ | ||
| 63 | unsigned int status; /* IRQ status */ | ||
| 64 | |||
| 65 | unsigned int depth; /* nested irq disables */ | ||
| 66 | unsigned int wake_depth; /* nested wake enables */ | ||
| 67 | unsigned int irq_count; /* For detecting broken IRQs */ | ||
| 68 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | ||
| 69 | unsigned int irqs_unhandled; | ||
| 70 | raw_spinlock_t lock; | ||
| 71 | #ifdef CONFIG_SMP | ||
| 72 | const struct cpumask *affinity_hint; | ||
| 73 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
| 74 | cpumask_var_t pending_mask; | ||
| 75 | #endif | ||
| 76 | #endif | ||
| 77 | atomic_t threads_active; | ||
| 78 | wait_queue_head_t wait_for_threads; | ||
| 79 | #ifdef CONFIG_PROC_FS | ||
| 80 | struct proc_dir_entry *dir; | ||
| 81 | #endif | ||
| 82 | const char *name; | ||
| 83 | } ____cacheline_internodealigned_in_smp; | ||
| 84 | |||
| 85 | #ifndef CONFIG_SPARSE_IRQ | ||
| 86 | extern struct irq_desc irq_desc[NR_IRQS]; | ||
| 87 | #endif | ||
| 88 | |||
| 89 | /* Will be removed once the last users in power and sh are gone */ | ||
| 90 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | ||
| 91 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | ||
| 92 | { | ||
| 93 | return desc; | ||
| 94 | } | ||
| 95 | |||
| 96 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
| 97 | |||
| 98 | #define get_irq_desc_chip(desc) ((desc)->irq_data.chip) | ||
| 99 | #define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) | ||
| 100 | #define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) | ||
| 101 | #define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) | ||
| 102 | |||
| 103 | /* | ||
| 104 | * Monolithic do_IRQ implementation. | ||
| 105 | */ | ||
| 106 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
| 107 | extern unsigned int __do_IRQ(unsigned int irq); | ||
| 108 | #endif | ||
| 109 | |||
| 110 | /* | ||
| 111 | * Architectures call this to let the generic IRQ layer | ||
| 112 | * handle an interrupt. If the descriptor is attached to an | ||
| 113 | * irqchip-style controller then we call the ->handle_irq() handler, | ||
| 114 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | ||
| 115 | */ | ||
| 116 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
| 117 | { | ||
| 118 | #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
| 119 | desc->handle_irq(irq, desc); | ||
| 120 | #else | ||
| 121 | if (likely(desc->handle_irq)) | ||
| 122 | desc->handle_irq(irq, desc); | ||
| 123 | else | ||
| 124 | __do_IRQ(irq); | ||
| 125 | #endif | ||
| 126 | } | ||
| 127 | |||
| 128 | static inline void generic_handle_irq(unsigned int irq) | ||
| 129 | { | ||
| 130 | generic_handle_irq_desc(irq, irq_to_desc(irq)); | ||
| 131 | } | ||
| 132 | |||
| 133 | /* Test to see if a driver has successfully requested an irq */ | ||
| 134 | static inline int irq_has_action(unsigned int irq) | ||
| 135 | { | ||
| 136 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 137 | return desc->action != NULL; | ||
| 138 | } | ||
| 139 | |||
| 140 | static inline int irq_balancing_disabled(unsigned int irq) | ||
| 141 | { | ||
| 142 | struct irq_desc *desc; | ||
| 143 | |||
| 144 | desc = irq_to_desc(irq); | ||
| 145 | return desc->status & IRQ_NO_BALANCING_MASK; | ||
| 146 | } | ||
| 147 | |||
| 148 | /* caller has locked the irq_desc and both params are valid */ | ||
| 149 | static inline void __set_irq_handler_unlocked(int irq, | ||
| 150 | irq_flow_handler_t handler) | ||
| 151 | { | ||
| 152 | struct irq_desc *desc; | ||
| 153 | |||
| 154 | desc = irq_to_desc(irq); | ||
| 155 | desc->handle_irq = handler; | ||
| 156 | } | ||
| 157 | #endif | ||
| 158 | |||
| 159 | #endif | ||
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 7bf89bc8cbc..05aa8c23483 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | 25 | ||
| 26 | extern int nr_irqs; | 26 | extern int nr_irqs; |
| 27 | extern struct irq_desc *irq_to_desc(unsigned int irq); | 27 | extern struct irq_desc *irq_to_desc(unsigned int irq); |
| 28 | unsigned int irq_get_next_irq(unsigned int offset); | ||
| 28 | 29 | ||
| 29 | # define for_each_irq_desc(irq, desc) \ | 30 | # define for_each_irq_desc(irq, desc) \ |
| 30 | for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ | 31 | for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ |
| @@ -47,6 +48,10 @@ extern struct irq_desc *irq_to_desc(unsigned int irq); | |||
| 47 | #define irq_node(irq) 0 | 48 | #define irq_node(irq) 0 |
| 48 | #endif | 49 | #endif |
| 49 | 50 | ||
| 51 | # define for_each_active_irq(irq) \ | ||
| 52 | for (irq = irq_get_next_irq(0); irq < nr_irqs; \ | ||
| 53 | irq = irq_get_next_irq(irq + 1)) | ||
| 54 | |||
| 50 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 55 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
| 51 | 56 | ||
| 52 | #define for_each_irq_nr(irq) \ | 57 | #define for_each_irq_nr(irq) \ |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 2186a64ee4b..71c09b26c75 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -435,14 +435,6 @@ do { \ | |||
| 435 | 435 | ||
| 436 | #endif /* CONFIG_LOCKDEP */ | 436 | #endif /* CONFIG_LOCKDEP */ |
| 437 | 437 | ||
| 438 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
| 439 | extern void early_init_irq_lock_class(void); | ||
| 440 | #else | ||
| 441 | static inline void early_init_irq_lock_class(void) | ||
| 442 | { | ||
| 443 | } | ||
| 444 | #endif | ||
| 445 | |||
| 446 | #ifdef CONFIG_TRACE_IRQFLAGS | 438 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 447 | extern void early_boot_irqs_off(void); | 439 | extern void early_boot_irqs_off(void); |
| 448 | extern void early_boot_irqs_on(void); | 440 | extern void early_boot_irqs_on(void); |
diff --git a/include/linux/msi.h b/include/linux/msi.h index 91b05c17185..05acced439a 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
| @@ -10,12 +10,13 @@ struct msi_msg { | |||
| 10 | }; | 10 | }; |
| 11 | 11 | ||
| 12 | /* Helper functions */ | 12 | /* Helper functions */ |
| 13 | struct irq_desc; | 13 | struct irq_data; |
| 14 | extern void mask_msi_irq(unsigned int irq); | 14 | struct msi_desc; |
| 15 | extern void unmask_msi_irq(unsigned int irq); | 15 | extern void mask_msi_irq(struct irq_data *data); |
| 16 | extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | 16 | extern void unmask_msi_irq(struct irq_data *data); |
| 17 | extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | 17 | extern void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
| 18 | extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | 18 | extern void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
| 19 | extern void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); | ||
| 19 | extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); | 20 | extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); |
| 20 | extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); | 21 | extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); |
| 21 | extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); | 22 | extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); |
diff --git a/init/Kconfig b/init/Kconfig index 7b920aafa98..36890f0c845 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
| @@ -339,6 +339,8 @@ config AUDIT_TREE | |||
| 339 | depends on AUDITSYSCALL | 339 | depends on AUDITSYSCALL |
| 340 | select FSNOTIFY | 340 | select FSNOTIFY |
| 341 | 341 | ||
| 342 | source "kernel/irq/Kconfig" | ||
| 343 | |||
| 342 | menu "RCU Subsystem" | 344 | menu "RCU Subsystem" |
| 343 | 345 | ||
| 344 | choice | 346 | choice |
diff --git a/init/main.c b/init/main.c index 94ab488039a..9684c9670b4 100644 --- a/init/main.c +++ b/init/main.c | |||
| @@ -556,7 +556,6 @@ asmlinkage void __init start_kernel(void) | |||
| 556 | 556 | ||
| 557 | local_irq_disable(); | 557 | local_irq_disable(); |
| 558 | early_boot_irqs_off(); | 558 | early_boot_irqs_off(); |
| 559 | early_init_irq_lock_class(); | ||
| 560 | 559 | ||
| 561 | /* | 560 | /* |
| 562 | * Interrupts are still disabled. Do necessary setups, then | 561 | * Interrupts are still disabled. Do necessary setups, then |
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig new file mode 100644 index 00000000000..31d766bf5d2 --- /dev/null +++ b/kernel/irq/Kconfig | |||
| @@ -0,0 +1,53 @@ | |||
| 1 | config HAVE_GENERIC_HARDIRQS | ||
| 2 | def_bool n | ||
| 3 | |||
| 4 | if HAVE_GENERIC_HARDIRQS | ||
| 5 | menu "IRQ subsystem" | ||
| 6 | # | ||
| 7 | # Interrupt subsystem related configuration options | ||
| 8 | # | ||
| 9 | config GENERIC_HARDIRQS | ||
| 10 | def_bool y | ||
| 11 | |||
| 12 | config GENERIC_HARDIRQS_NO__DO_IRQ | ||
| 13 | def_bool y | ||
| 14 | |||
| 15 | # Select this to disable the deprecated stuff | ||
| 16 | config GENERIC_HARDIRQS_NO_DEPRECATED | ||
| 17 | def_bool n | ||
| 18 | |||
| 19 | # Options selectable by the architecture code | ||
| 20 | config HAVE_SPARSE_IRQ | ||
| 21 | def_bool n | ||
| 22 | |||
| 23 | config GENERIC_IRQ_PROBE | ||
| 24 | def_bool n | ||
| 25 | |||
| 26 | config GENERIC_PENDING_IRQ | ||
| 27 | def_bool n | ||
| 28 | |||
| 29 | config AUTO_IRQ_AFFINITY | ||
| 30 | def_bool n | ||
| 31 | |||
| 32 | config IRQ_PER_CPU | ||
| 33 | def_bool n | ||
| 34 | |||
| 35 | config HARDIRQS_SW_RESEND | ||
| 36 | def_bool n | ||
| 37 | |||
| 38 | config SPARSE_IRQ | ||
| 39 | bool "Support sparse irq numbering" | ||
| 40 | depends on HAVE_SPARSE_IRQ | ||
| 41 | ---help--- | ||
| 42 | |||
| 43 | Sparse irq numbering is useful for distro kernels that want | ||
| 44 | to define a high CONFIG_NR_CPUS value but still want to have | ||
| 45 | low kernel memory footprint on smaller machines. | ||
| 46 | |||
| 47 | ( Sparse irqs can also be beneficial on NUMA boxes, as they spread | ||
| 48 | out the interrupt descriptors in a more NUMA-friendly way. ) | ||
| 49 | |||
| 50 | If you don't know what to do here, say N. | ||
| 51 | |||
| 52 | endmenu | ||
| 53 | endif | ||
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 7d047808419..54329cd7b3e 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile | |||
| @@ -1,7 +1,6 @@ | |||
| 1 | 1 | ||
| 2 | obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o | 2 | obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o |
| 3 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o | 3 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o |
| 4 | obj-$(CONFIG_PROC_FS) += proc.o | 4 | obj-$(CONFIG_PROC_FS) += proc.o |
| 5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o | 5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o |
| 6 | obj-$(CONFIG_NUMA_IRQ_DESC) += numa_migrate.o | ||
| 7 | obj-$(CONFIG_PM_SLEEP) += pm.o | 6 | obj-$(CONFIG_PM_SLEEP) += pm.o |
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 2295a31ef11..505798f86c3 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c | |||
| @@ -57,9 +57,10 @@ unsigned long probe_irq_on(void) | |||
| 57 | * Some chips need to know about probing in | 57 | * Some chips need to know about probing in |
| 58 | * progress: | 58 | * progress: |
| 59 | */ | 59 | */ |
| 60 | if (desc->chip->set_type) | 60 | if (desc->irq_data.chip->irq_set_type) |
| 61 | desc->chip->set_type(i, IRQ_TYPE_PROBE); | 61 | desc->irq_data.chip->irq_set_type(&desc->irq_data, |
| 62 | desc->chip->startup(i); | 62 | IRQ_TYPE_PROBE); |
| 63 | desc->irq_data.chip->irq_startup(&desc->irq_data); | ||
| 63 | } | 64 | } |
| 64 | raw_spin_unlock_irq(&desc->lock); | 65 | raw_spin_unlock_irq(&desc->lock); |
| 65 | } | 66 | } |
| @@ -76,7 +77,7 @@ unsigned long probe_irq_on(void) | |||
| 76 | raw_spin_lock_irq(&desc->lock); | 77 | raw_spin_lock_irq(&desc->lock); |
| 77 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { | 78 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { |
| 78 | desc->status |= IRQ_AUTODETECT | IRQ_WAITING; | 79 | desc->status |= IRQ_AUTODETECT | IRQ_WAITING; |
| 79 | if (desc->chip->startup(i)) | 80 | if (desc->irq_data.chip->irq_startup(&desc->irq_data)) |
| 80 | desc->status |= IRQ_PENDING; | 81 | desc->status |= IRQ_PENDING; |
| 81 | } | 82 | } |
| 82 | raw_spin_unlock_irq(&desc->lock); | 83 | raw_spin_unlock_irq(&desc->lock); |
| @@ -98,7 +99,7 @@ unsigned long probe_irq_on(void) | |||
| 98 | /* It triggered already - consider it spurious. */ | 99 | /* It triggered already - consider it spurious. */ |
| 99 | if (!(status & IRQ_WAITING)) { | 100 | if (!(status & IRQ_WAITING)) { |
| 100 | desc->status = status & ~IRQ_AUTODETECT; | 101 | desc->status = status & ~IRQ_AUTODETECT; |
| 101 | desc->chip->shutdown(i); | 102 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); |
| 102 | } else | 103 | } else |
| 103 | if (i < 32) | 104 | if (i < 32) |
| 104 | mask |= 1 << i; | 105 | mask |= 1 << i; |
| @@ -137,7 +138,7 @@ unsigned int probe_irq_mask(unsigned long val) | |||
| 137 | mask |= 1 << i; | 138 | mask |= 1 << i; |
| 138 | 139 | ||
| 139 | desc->status = status & ~IRQ_AUTODETECT; | 140 | desc->status = status & ~IRQ_AUTODETECT; |
| 140 | desc->chip->shutdown(i); | 141 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); |
| 141 | } | 142 | } |
| 142 | raw_spin_unlock_irq(&desc->lock); | 143 | raw_spin_unlock_irq(&desc->lock); |
| 143 | } | 144 | } |
| @@ -181,7 +182,7 @@ int probe_irq_off(unsigned long val) | |||
| 181 | nr_of_irqs++; | 182 | nr_of_irqs++; |
| 182 | } | 183 | } |
| 183 | desc->status = status & ~IRQ_AUTODETECT; | 184 | desc->status = status & ~IRQ_AUTODETECT; |
| 184 | desc->chip->shutdown(i); | 185 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); |
| 185 | } | 186 | } |
| 186 | raw_spin_unlock_irq(&desc->lock); | 187 | raw_spin_unlock_irq(&desc->lock); |
| 187 | } | 188 | } |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index b7091d5ca2f..baa5c4acad8 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -18,108 +18,6 @@ | |||
| 18 | 18 | ||
| 19 | #include "internals.h" | 19 | #include "internals.h" |
| 20 | 20 | ||
| 21 | static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data) | ||
| 22 | { | ||
| 23 | struct irq_desc *desc; | ||
| 24 | unsigned long flags; | ||
| 25 | |||
| 26 | desc = irq_to_desc(irq); | ||
| 27 | if (!desc) { | ||
| 28 | WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); | ||
| 29 | return; | ||
| 30 | } | ||
| 31 | |||
| 32 | /* Ensure we don't have left over values from a previous use of this irq */ | ||
| 33 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 34 | desc->status = IRQ_DISABLED; | ||
| 35 | desc->chip = &no_irq_chip; | ||
| 36 | desc->handle_irq = handle_bad_irq; | ||
| 37 | desc->depth = 1; | ||
| 38 | desc->msi_desc = NULL; | ||
| 39 | desc->handler_data = NULL; | ||
| 40 | if (!keep_chip_data) | ||
| 41 | desc->chip_data = NULL; | ||
| 42 | desc->action = NULL; | ||
| 43 | desc->irq_count = 0; | ||
| 44 | desc->irqs_unhandled = 0; | ||
| 45 | #ifdef CONFIG_SMP | ||
| 46 | cpumask_setall(desc->affinity); | ||
| 47 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
| 48 | cpumask_clear(desc->pending_mask); | ||
| 49 | #endif | ||
| 50 | #endif | ||
| 51 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 52 | } | ||
| 53 | |||
| 54 | /** | ||
| 55 | * dynamic_irq_init - initialize a dynamically allocated irq | ||
| 56 | * @irq: irq number to initialize | ||
| 57 | */ | ||
| 58 | void dynamic_irq_init(unsigned int irq) | ||
| 59 | { | ||
| 60 | dynamic_irq_init_x(irq, false); | ||
| 61 | } | ||
| 62 | |||
| 63 | /** | ||
| 64 | * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq | ||
| 65 | * @irq: irq number to initialize | ||
| 66 | * | ||
| 67 | * does not set irq_to_desc(irq)->chip_data to NULL | ||
| 68 | */ | ||
| 69 | void dynamic_irq_init_keep_chip_data(unsigned int irq) | ||
| 70 | { | ||
| 71 | dynamic_irq_init_x(irq, true); | ||
| 72 | } | ||
| 73 | |||
| 74 | static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data) | ||
| 75 | { | ||
| 76 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 77 | unsigned long flags; | ||
| 78 | |||
| 79 | if (!desc) { | ||
| 80 | WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); | ||
| 81 | return; | ||
| 82 | } | ||
| 83 | |||
| 84 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 85 | if (desc->action) { | ||
| 86 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 87 | WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", | ||
| 88 | irq); | ||
| 89 | return; | ||
| 90 | } | ||
| 91 | desc->msi_desc = NULL; | ||
| 92 | desc->handler_data = NULL; | ||
| 93 | if (!keep_chip_data) | ||
| 94 | desc->chip_data = NULL; | ||
| 95 | desc->handle_irq = handle_bad_irq; | ||
| 96 | desc->chip = &no_irq_chip; | ||
| 97 | desc->name = NULL; | ||
| 98 | clear_kstat_irqs(desc); | ||
| 99 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 100 | } | ||
| 101 | |||
| 102 | /** | ||
| 103 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq | ||
| 104 | * @irq: irq number to initialize | ||
| 105 | */ | ||
| 106 | void dynamic_irq_cleanup(unsigned int irq) | ||
| 107 | { | ||
| 108 | dynamic_irq_cleanup_x(irq, false); | ||
| 109 | } | ||
| 110 | |||
| 111 | /** | ||
| 112 | * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq | ||
| 113 | * @irq: irq number to initialize | ||
| 114 | * | ||
| 115 | * does not set irq_to_desc(irq)->chip_data to NULL | ||
| 116 | */ | ||
| 117 | void dynamic_irq_cleanup_keep_chip_data(unsigned int irq) | ||
| 118 | { | ||
| 119 | dynamic_irq_cleanup_x(irq, true); | ||
| 120 | } | ||
| 121 | |||
| 122 | |||
| 123 | /** | 21 | /** |
| 124 | * set_irq_chip - set the irq chip for an irq | 22 | * set_irq_chip - set the irq chip for an irq |
| 125 | * @irq: irq number | 23 | * @irq: irq number |
| @@ -140,7 +38,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip) | |||
| 140 | 38 | ||
| 141 | raw_spin_lock_irqsave(&desc->lock, flags); | 39 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 142 | irq_chip_set_defaults(chip); | 40 | irq_chip_set_defaults(chip); |
| 143 | desc->chip = chip; | 41 | desc->irq_data.chip = chip; |
| 144 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 42 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 145 | 43 | ||
| 146 | return 0; | 44 | return 0; |
| @@ -193,7 +91,7 @@ int set_irq_data(unsigned int irq, void *data) | |||
| 193 | } | 91 | } |
| 194 | 92 | ||
| 195 | raw_spin_lock_irqsave(&desc->lock, flags); | 93 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 196 | desc->handler_data = data; | 94 | desc->irq_data.handler_data = data; |
| 197 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 95 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 198 | return 0; | 96 | return 0; |
| 199 | } | 97 | } |
| @@ -218,7 +116,7 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry) | |||
| 218 | } | 116 | } |
| 219 | 117 | ||
| 220 | raw_spin_lock_irqsave(&desc->lock, flags); | 118 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 221 | desc->msi_desc = entry; | 119 | desc->irq_data.msi_desc = entry; |
| 222 | if (entry) | 120 | if (entry) |
| 223 | entry->irq = irq; | 121 | entry->irq = irq; |
| 224 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 122 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| @@ -243,19 +141,27 @@ int set_irq_chip_data(unsigned int irq, void *data) | |||
| 243 | return -EINVAL; | 141 | return -EINVAL; |
| 244 | } | 142 | } |
| 245 | 143 | ||
| 246 | if (!desc->chip) { | 144 | if (!desc->irq_data.chip) { |
| 247 | printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); | 145 | printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); |
| 248 | return -EINVAL; | 146 | return -EINVAL; |
| 249 | } | 147 | } |
| 250 | 148 | ||
| 251 | raw_spin_lock_irqsave(&desc->lock, flags); | 149 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 252 | desc->chip_data = data; | 150 | desc->irq_data.chip_data = data; |
| 253 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 151 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 254 | 152 | ||
| 255 | return 0; | 153 | return 0; |
| 256 | } | 154 | } |
| 257 | EXPORT_SYMBOL(set_irq_chip_data); | 155 | EXPORT_SYMBOL(set_irq_chip_data); |
| 258 | 156 | ||
| 157 | struct irq_data *irq_get_irq_data(unsigned int irq) | ||
| 158 | { | ||
| 159 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 160 | |||
| 161 | return desc ? &desc->irq_data : NULL; | ||
| 162 | } | ||
| 163 | EXPORT_SYMBOL_GPL(irq_get_irq_data); | ||
| 164 | |||
| 259 | /** | 165 | /** |
| 260 | * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq | 166 | * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq |
| 261 | * | 167 | * |
| @@ -287,93 +193,216 @@ EXPORT_SYMBOL_GPL(set_irq_nested_thread); | |||
| 287 | /* | 193 | /* |
| 288 | * default enable function | 194 | * default enable function |
| 289 | */ | 195 | */ |
| 290 | static void default_enable(unsigned int irq) | 196 | static void default_enable(struct irq_data *data) |
| 291 | { | 197 | { |
| 292 | struct irq_desc *desc = irq_to_desc(irq); | 198 | struct irq_desc *desc = irq_data_to_desc(data); |
| 293 | 199 | ||
| 294 | desc->chip->unmask(irq); | 200 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
| 295 | desc->status &= ~IRQ_MASKED; | 201 | desc->status &= ~IRQ_MASKED; |
| 296 | } | 202 | } |
| 297 | 203 | ||
| 298 | /* | 204 | /* |
| 299 | * default disable function | 205 | * default disable function |
| 300 | */ | 206 | */ |
| 301 | static void default_disable(unsigned int irq) | 207 | static void default_disable(struct irq_data *data) |
| 302 | { | 208 | { |
| 303 | } | 209 | } |
| 304 | 210 | ||
| 305 | /* | 211 | /* |
| 306 | * default startup function | 212 | * default startup function |
| 307 | */ | 213 | */ |
| 308 | static unsigned int default_startup(unsigned int irq) | 214 | static unsigned int default_startup(struct irq_data *data) |
| 309 | { | 215 | { |
| 310 | struct irq_desc *desc = irq_to_desc(irq); | 216 | struct irq_desc *desc = irq_data_to_desc(data); |
| 311 | 217 | ||
| 312 | desc->chip->enable(irq); | 218 | desc->irq_data.chip->irq_enable(data); |
| 313 | return 0; | 219 | return 0; |
| 314 | } | 220 | } |
| 315 | 221 | ||
| 316 | /* | 222 | /* |
| 317 | * default shutdown function | 223 | * default shutdown function |
| 318 | */ | 224 | */ |
| 319 | static void default_shutdown(unsigned int irq) | 225 | static void default_shutdown(struct irq_data *data) |
| 320 | { | 226 | { |
| 321 | struct irq_desc *desc = irq_to_desc(irq); | 227 | struct irq_desc *desc = irq_data_to_desc(data); |
| 322 | 228 | ||
| 323 | desc->chip->mask(irq); | 229 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
| 324 | desc->status |= IRQ_MASKED; | 230 | desc->status |= IRQ_MASKED; |
| 325 | } | 231 | } |
| 326 | 232 | ||
| 233 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
| 234 | /* Temporary migration helpers */ | ||
| 235 | static void compat_irq_mask(struct irq_data *data) | ||
| 236 | { | ||
| 237 | data->chip->mask(data->irq); | ||
| 238 | } | ||
| 239 | |||
| 240 | static void compat_irq_unmask(struct irq_data *data) | ||
| 241 | { | ||
| 242 | data->chip->unmask(data->irq); | ||
| 243 | } | ||
| 244 | |||
| 245 | static void compat_irq_ack(struct irq_data *data) | ||
| 246 | { | ||
| 247 | data->chip->ack(data->irq); | ||
| 248 | } | ||
| 249 | |||
| 250 | static void compat_irq_mask_ack(struct irq_data *data) | ||
| 251 | { | ||
| 252 | data->chip->mask_ack(data->irq); | ||
| 253 | } | ||
| 254 | |||
| 255 | static void compat_irq_eoi(struct irq_data *data) | ||
| 256 | { | ||
| 257 | data->chip->eoi(data->irq); | ||
| 258 | } | ||
| 259 | |||
| 260 | static void compat_irq_enable(struct irq_data *data) | ||
| 261 | { | ||
| 262 | data->chip->enable(data->irq); | ||
| 263 | } | ||
| 264 | |||
| 265 | static void compat_irq_disable(struct irq_data *data) | ||
| 266 | { | ||
| 267 | data->chip->disable(data->irq); | ||
| 268 | } | ||
| 269 | |||
| 270 | static void compat_irq_shutdown(struct irq_data *data) | ||
| 271 | { | ||
| 272 | data->chip->shutdown(data->irq); | ||
| 273 | } | ||
| 274 | |||
| 275 | static unsigned int compat_irq_startup(struct irq_data *data) | ||
| 276 | { | ||
| 277 | return data->chip->startup(data->irq); | ||
| 278 | } | ||
| 279 | |||
| 280 | static int compat_irq_set_affinity(struct irq_data *data, | ||
| 281 | const struct cpumask *dest, bool force) | ||
| 282 | { | ||
| 283 | return data->chip->set_affinity(data->irq, dest); | ||
| 284 | } | ||
| 285 | |||
| 286 | static int compat_irq_set_type(struct irq_data *data, unsigned int type) | ||
| 287 | { | ||
| 288 | return data->chip->set_type(data->irq, type); | ||
| 289 | } | ||
| 290 | |||
| 291 | static int compat_irq_set_wake(struct irq_data *data, unsigned int on) | ||
| 292 | { | ||
| 293 | return data->chip->set_wake(data->irq, on); | ||
| 294 | } | ||
| 295 | |||
| 296 | static int compat_irq_retrigger(struct irq_data *data) | ||
| 297 | { | ||
| 298 | return data->chip->retrigger(data->irq); | ||
| 299 | } | ||
| 300 | |||
| 301 | static void compat_bus_lock(struct irq_data *data) | ||
| 302 | { | ||
| 303 | data->chip->bus_lock(data->irq); | ||
| 304 | } | ||
| 305 | |||
| 306 | static void compat_bus_sync_unlock(struct irq_data *data) | ||
| 307 | { | ||
| 308 | data->chip->bus_sync_unlock(data->irq); | ||
| 309 | } | ||
| 310 | #endif | ||
| 311 | |||
| 327 | /* | 312 | /* |
| 328 | * Fixup enable/disable function pointers | 313 | * Fixup enable/disable function pointers |
| 329 | */ | 314 | */ |
| 330 | void irq_chip_set_defaults(struct irq_chip *chip) | 315 | void irq_chip_set_defaults(struct irq_chip *chip) |
| 331 | { | 316 | { |
| 332 | if (!chip->enable) | 317 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED |
| 333 | chip->enable = default_enable; | ||
| 334 | if (!chip->disable) | ||
| 335 | chip->disable = default_disable; | ||
| 336 | if (!chip->startup) | ||
| 337 | chip->startup = default_startup; | ||
| 338 | /* | 318 | /* |
| 339 | * We use chip->disable, when the user provided its own. When | 319 | * Compat fixup functions need to be before we set the |
| 340 | * we have default_disable set for chip->disable, then we need | 320 | * defaults for enable/disable/startup/shutdown |
| 321 | */ | ||
| 322 | if (chip->enable) | ||
| 323 | chip->irq_enable = compat_irq_enable; | ||
| 324 | if (chip->disable) | ||
| 325 | chip->irq_disable = compat_irq_disable; | ||
| 326 | if (chip->shutdown) | ||
| 327 | chip->irq_shutdown = compat_irq_shutdown; | ||
| 328 | if (chip->startup) | ||
| 329 | chip->irq_startup = compat_irq_startup; | ||
| 330 | #endif | ||
| 331 | /* | ||
| 332 | * The real defaults | ||
| 333 | */ | ||
| 334 | if (!chip->irq_enable) | ||
| 335 | chip->irq_enable = default_enable; | ||
| 336 | if (!chip->irq_disable) | ||
| 337 | chip->irq_disable = default_disable; | ||
| 338 | if (!chip->irq_startup) | ||
| 339 | chip->irq_startup = default_startup; | ||
| 340 | /* | ||
| 341 | * We use chip->irq_disable, when the user provided its own. When | ||
| 342 | * we have default_disable set for chip->irq_disable, then we need | ||
| 341 | * to use default_shutdown, otherwise the irq line is not | 343 | * to use default_shutdown, otherwise the irq line is not |
| 342 | * disabled on free_irq(): | 344 | * disabled on free_irq(): |
| 343 | */ | 345 | */ |
| 344 | if (!chip->shutdown) | 346 | if (!chip->irq_shutdown) |
| 345 | chip->shutdown = chip->disable != default_disable ? | 347 | chip->irq_shutdown = chip->irq_disable != default_disable ? |
| 346 | chip->disable : default_shutdown; | 348 | chip->irq_disable : default_shutdown; |
| 347 | if (!chip->name) | 349 | |
| 348 | chip->name = chip->typename; | 350 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED |
| 349 | if (!chip->end) | 351 | if (!chip->end) |
| 350 | chip->end = dummy_irq_chip.end; | 352 | chip->end = dummy_irq_chip.end; |
| 353 | |||
| 354 | /* | ||
| 355 | * Now fix up the remaining compat handlers | ||
| 356 | */ | ||
| 357 | if (chip->bus_lock) | ||
| 358 | chip->irq_bus_lock = compat_bus_lock; | ||
| 359 | if (chip->bus_sync_unlock) | ||
| 360 | chip->irq_bus_sync_unlock = compat_bus_sync_unlock; | ||
| 361 | if (chip->mask) | ||
| 362 | chip->irq_mask = compat_irq_mask; | ||
| 363 | if (chip->unmask) | ||
| 364 | chip->irq_unmask = compat_irq_unmask; | ||
| 365 | if (chip->ack) | ||
| 366 | chip->irq_ack = compat_irq_ack; | ||
| 367 | if (chip->mask_ack) | ||
| 368 | chip->irq_mask_ack = compat_irq_mask_ack; | ||
| 369 | if (chip->eoi) | ||
| 370 | chip->irq_eoi = compat_irq_eoi; | ||
| 371 | if (chip->set_affinity) | ||
| 372 | chip->irq_set_affinity = compat_irq_set_affinity; | ||
| 373 | if (chip->set_type) | ||
| 374 | chip->irq_set_type = compat_irq_set_type; | ||
| 375 | if (chip->set_wake) | ||
| 376 | chip->irq_set_wake = compat_irq_set_wake; | ||
| 377 | if (chip->retrigger) | ||
| 378 | chip->irq_retrigger = compat_irq_retrigger; | ||
| 379 | #endif | ||
| 351 | } | 380 | } |
| 352 | 381 | ||
| 353 | static inline void mask_ack_irq(struct irq_desc *desc, int irq) | 382 | static inline void mask_ack_irq(struct irq_desc *desc) |
| 354 | { | 383 | { |
| 355 | if (desc->chip->mask_ack) | 384 | if (desc->irq_data.chip->irq_mask_ack) |
| 356 | desc->chip->mask_ack(irq); | 385 | desc->irq_data.chip->irq_mask_ack(&desc->irq_data); |
| 357 | else { | 386 | else { |
| 358 | desc->chip->mask(irq); | 387 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
| 359 | if (desc->chip->ack) | 388 | if (desc->irq_data.chip->irq_ack) |
| 360 | desc->chip->ack(irq); | 389 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
| 361 | } | 390 | } |
| 362 | desc->status |= IRQ_MASKED; | 391 | desc->status |= IRQ_MASKED; |
| 363 | } | 392 | } |
| 364 | 393 | ||
| 365 | static inline void mask_irq(struct irq_desc *desc, int irq) | 394 | static inline void mask_irq(struct irq_desc *desc) |
| 366 | { | 395 | { |
| 367 | if (desc->chip->mask) { | 396 | if (desc->irq_data.chip->irq_mask) { |
| 368 | desc->chip->mask(irq); | 397 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
| 369 | desc->status |= IRQ_MASKED; | 398 | desc->status |= IRQ_MASKED; |
| 370 | } | 399 | } |
| 371 | } | 400 | } |
| 372 | 401 | ||
| 373 | static inline void unmask_irq(struct irq_desc *desc, int irq) | 402 | static inline void unmask_irq(struct irq_desc *desc) |
| 374 | { | 403 | { |
| 375 | if (desc->chip->unmask) { | 404 | if (desc->irq_data.chip->irq_unmask) { |
| 376 | desc->chip->unmask(irq); | 405 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
| 377 | desc->status &= ~IRQ_MASKED; | 406 | desc->status &= ~IRQ_MASKED; |
| 378 | } | 407 | } |
| 379 | } | 408 | } |
| @@ -476,7 +505,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
| 476 | irqreturn_t action_ret; | 505 | irqreturn_t action_ret; |
| 477 | 506 | ||
| 478 | raw_spin_lock(&desc->lock); | 507 | raw_spin_lock(&desc->lock); |
| 479 | mask_ack_irq(desc, irq); | 508 | mask_ack_irq(desc); |
| 480 | 509 | ||
| 481 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 510 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
| 482 | goto out_unlock; | 511 | goto out_unlock; |
| @@ -502,7 +531,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
| 502 | desc->status &= ~IRQ_INPROGRESS; | 531 | desc->status &= ~IRQ_INPROGRESS; |
| 503 | 532 | ||
| 504 | if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) | 533 | if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) |
| 505 | unmask_irq(desc, irq); | 534 | unmask_irq(desc); |
| 506 | out_unlock: | 535 | out_unlock: |
| 507 | raw_spin_unlock(&desc->lock); | 536 | raw_spin_unlock(&desc->lock); |
| 508 | } | 537 | } |
| @@ -539,7 +568,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
| 539 | action = desc->action; | 568 | action = desc->action; |
| 540 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) { | 569 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) { |
| 541 | desc->status |= IRQ_PENDING; | 570 | desc->status |= IRQ_PENDING; |
| 542 | mask_irq(desc, irq); | 571 | mask_irq(desc); |
| 543 | goto out; | 572 | goto out; |
| 544 | } | 573 | } |
| 545 | 574 | ||
| @@ -554,7 +583,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
| 554 | raw_spin_lock(&desc->lock); | 583 | raw_spin_lock(&desc->lock); |
| 555 | desc->status &= ~IRQ_INPROGRESS; | 584 | desc->status &= ~IRQ_INPROGRESS; |
| 556 | out: | 585 | out: |
| 557 | desc->chip->eoi(irq); | 586 | desc->irq_data.chip->irq_eoi(&desc->irq_data); |
| 558 | 587 | ||
| 559 | raw_spin_unlock(&desc->lock); | 588 | raw_spin_unlock(&desc->lock); |
| 560 | } | 589 | } |
| @@ -590,14 +619,13 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
| 590 | if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || | 619 | if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || |
| 591 | !desc->action)) { | 620 | !desc->action)) { |
| 592 | desc->status |= (IRQ_PENDING | IRQ_MASKED); | 621 | desc->status |= (IRQ_PENDING | IRQ_MASKED); |
| 593 | mask_ack_irq(desc, irq); | 622 | mask_ack_irq(desc); |
| 594 | goto out_unlock; | 623 | goto out_unlock; |
| 595 | } | 624 | } |
| 596 | kstat_incr_irqs_this_cpu(irq, desc); | 625 | kstat_incr_irqs_this_cpu(irq, desc); |
| 597 | 626 | ||
| 598 | /* Start handling the irq */ | 627 | /* Start handling the irq */ |
| 599 | if (desc->chip->ack) | 628 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
| 600 | desc->chip->ack(irq); | ||
| 601 | 629 | ||
| 602 | /* Mark the IRQ currently in progress.*/ | 630 | /* Mark the IRQ currently in progress.*/ |
| 603 | desc->status |= IRQ_INPROGRESS; | 631 | desc->status |= IRQ_INPROGRESS; |
| @@ -607,7 +635,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
| 607 | irqreturn_t action_ret; | 635 | irqreturn_t action_ret; |
| 608 | 636 | ||
| 609 | if (unlikely(!action)) { | 637 | if (unlikely(!action)) { |
| 610 | mask_irq(desc, irq); | 638 | mask_irq(desc); |
| 611 | goto out_unlock; | 639 | goto out_unlock; |
| 612 | } | 640 | } |
| 613 | 641 | ||
| @@ -619,7 +647,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
| 619 | if (unlikely((desc->status & | 647 | if (unlikely((desc->status & |
| 620 | (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == | 648 | (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == |
| 621 | (IRQ_PENDING | IRQ_MASKED))) { | 649 | (IRQ_PENDING | IRQ_MASKED))) { |
| 622 | unmask_irq(desc, irq); | 650 | unmask_irq(desc); |
| 623 | } | 651 | } |
| 624 | 652 | ||
| 625 | desc->status &= ~IRQ_PENDING; | 653 | desc->status &= ~IRQ_PENDING; |
| @@ -650,15 +678,15 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | |||
| 650 | 678 | ||
| 651 | kstat_incr_irqs_this_cpu(irq, desc); | 679 | kstat_incr_irqs_this_cpu(irq, desc); |
| 652 | 680 | ||
| 653 | if (desc->chip->ack) | 681 | if (desc->irq_data.chip->irq_ack) |
| 654 | desc->chip->ack(irq); | 682 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
| 655 | 683 | ||
| 656 | action_ret = handle_IRQ_event(irq, desc->action); | 684 | action_ret = handle_IRQ_event(irq, desc->action); |
| 657 | if (!noirqdebug) | 685 | if (!noirqdebug) |
| 658 | note_interrupt(irq, desc, action_ret); | 686 | note_interrupt(irq, desc, action_ret); |
| 659 | 687 | ||
| 660 | if (desc->chip->eoi) | 688 | if (desc->irq_data.chip->irq_eoi) |
| 661 | desc->chip->eoi(irq); | 689 | desc->irq_data.chip->irq_eoi(&desc->irq_data); |
| 662 | } | 690 | } |
| 663 | 691 | ||
| 664 | void | 692 | void |
| @@ -676,7 +704,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
| 676 | 704 | ||
| 677 | if (!handle) | 705 | if (!handle) |
| 678 | handle = handle_bad_irq; | 706 | handle = handle_bad_irq; |
| 679 | else if (desc->chip == &no_irq_chip) { | 707 | else if (desc->irq_data.chip == &no_irq_chip) { |
| 680 | printk(KERN_WARNING "Trying to install %sinterrupt handler " | 708 | printk(KERN_WARNING "Trying to install %sinterrupt handler " |
| 681 | "for IRQ%d\n", is_chained ? "chained " : "", irq); | 709 | "for IRQ%d\n", is_chained ? "chained " : "", irq); |
| 682 | /* | 710 | /* |
| @@ -686,16 +714,16 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
| 686 | * prevent us to setup the interrupt at all. Switch it to | 714 | * prevent us to setup the interrupt at all. Switch it to |
| 687 | * dummy_irq_chip for easy transition. | 715 | * dummy_irq_chip for easy transition. |
| 688 | */ | 716 | */ |
| 689 | desc->chip = &dummy_irq_chip; | 717 | desc->irq_data.chip = &dummy_irq_chip; |
| 690 | } | 718 | } |
| 691 | 719 | ||
| 692 | chip_bus_lock(irq, desc); | 720 | chip_bus_lock(desc); |
| 693 | raw_spin_lock_irqsave(&desc->lock, flags); | 721 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 694 | 722 | ||
| 695 | /* Uninstall? */ | 723 | /* Uninstall? */ |
| 696 | if (handle == handle_bad_irq) { | 724 | if (handle == handle_bad_irq) { |
| 697 | if (desc->chip != &no_irq_chip) | 725 | if (desc->irq_data.chip != &no_irq_chip) |
| 698 | mask_ack_irq(desc, irq); | 726 | mask_ack_irq(desc); |
| 699 | desc->status |= IRQ_DISABLED; | 727 | desc->status |= IRQ_DISABLED; |
| 700 | desc->depth = 1; | 728 | desc->depth = 1; |
| 701 | } | 729 | } |
| @@ -706,10 +734,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
| 706 | desc->status &= ~IRQ_DISABLED; | 734 | desc->status &= ~IRQ_DISABLED; |
| 707 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; | 735 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; |
| 708 | desc->depth = 0; | 736 | desc->depth = 0; |
| 709 | desc->chip->startup(irq); | 737 | desc->irq_data.chip->irq_startup(&desc->irq_data); |
| 710 | } | 738 | } |
| 711 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 739 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 712 | chip_bus_sync_unlock(irq, desc); | 740 | chip_bus_sync_unlock(desc); |
| 713 | } | 741 | } |
| 714 | EXPORT_SYMBOL_GPL(__set_irq_handler); | 742 | EXPORT_SYMBOL_GPL(__set_irq_handler); |
| 715 | 743 | ||
| @@ -729,32 +757,20 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | |||
| 729 | __set_irq_handler(irq, handle, 0, name); | 757 | __set_irq_handler(irq, handle, 0, name); |
| 730 | } | 758 | } |
| 731 | 759 | ||
| 732 | void set_irq_noprobe(unsigned int irq) | 760 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) |
| 733 | { | 761 | { |
| 734 | struct irq_desc *desc = irq_to_desc(irq); | 762 | struct irq_desc *desc = irq_to_desc(irq); |
| 735 | unsigned long flags; | 763 | unsigned long flags; |
| 736 | 764 | ||
| 737 | if (!desc) { | 765 | if (!desc) |
| 738 | printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq); | ||
| 739 | return; | 766 | return; |
| 740 | } | ||
| 741 | |||
| 742 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 743 | desc->status |= IRQ_NOPROBE; | ||
| 744 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 745 | } | ||
| 746 | |||
| 747 | void set_irq_probe(unsigned int irq) | ||
| 748 | { | ||
| 749 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 750 | unsigned long flags; | ||
| 751 | 767 | ||
| 752 | if (!desc) { | 768 | /* Sanitize flags */ |
| 753 | printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq); | 769 | set &= IRQF_MODIFY_MASK; |
| 754 | return; | 770 | clr &= IRQF_MODIFY_MASK; |
| 755 | } | ||
| 756 | 771 | ||
| 757 | raw_spin_lock_irqsave(&desc->lock, flags); | 772 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 758 | desc->status &= ~IRQ_NOPROBE; | 773 | desc->status &= ~clr; |
| 774 | desc->status |= set; | ||
| 759 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 775 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 760 | } | 776 | } |
diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c new file mode 100644 index 00000000000..20dc5474947 --- /dev/null +++ b/kernel/irq/dummychip.c | |||
| @@ -0,0 +1,68 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | ||
| 3 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | ||
| 4 | * | ||
| 5 | * This file contains the dummy interrupt chip implementation | ||
| 6 | */ | ||
| 7 | #include <linux/interrupt.h> | ||
| 8 | #include <linux/irq.h> | ||
| 9 | |||
| 10 | #include "internals.h" | ||
| 11 | |||
| 12 | /* | ||
| 13 | * What should we do if we get a hw irq event on an illegal vector? | ||
| 14 | * Each architecture has to answer this themself. | ||
| 15 | */ | ||
| 16 | static void ack_bad(struct irq_data *data) | ||
| 17 | { | ||
| 18 | struct irq_desc *desc = irq_data_to_desc(data); | ||
| 19 | |||
| 20 | print_irq_desc(data->irq, desc); | ||
| 21 | ack_bad_irq(data->irq); | ||
| 22 | } | ||
| 23 | |||
| 24 | /* | ||
| 25 | * NOP functions | ||
| 26 | */ | ||
| 27 | static void noop(struct irq_data *data) { } | ||
| 28 | |||
| 29 | static unsigned int noop_ret(struct irq_data *data) | ||
| 30 | { | ||
| 31 | return 0; | ||
| 32 | } | ||
| 33 | |||
| 34 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
| 35 | static void compat_noop(unsigned int irq) { } | ||
| 36 | #define END_INIT .end = compat_noop | ||
| 37 | #else | ||
| 38 | #define END_INIT | ||
| 39 | #endif | ||
| 40 | |||
| 41 | /* | ||
| 42 | * Generic no controller implementation | ||
| 43 | */ | ||
| 44 | struct irq_chip no_irq_chip = { | ||
| 45 | .name = "none", | ||
| 46 | .irq_startup = noop_ret, | ||
| 47 | .irq_shutdown = noop, | ||
| 48 | .irq_enable = noop, | ||
| 49 | .irq_disable = noop, | ||
| 50 | .irq_ack = ack_bad, | ||
| 51 | END_INIT | ||
| 52 | }; | ||
| 53 | |||
| 54 | /* | ||
| 55 | * Generic dummy implementation which can be used for | ||
| 56 | * real dumb interrupt sources | ||
| 57 | */ | ||
| 58 | struct irq_chip dummy_irq_chip = { | ||
| 59 | .name = "dummy", | ||
| 60 | .irq_startup = noop_ret, | ||
| 61 | .irq_shutdown = noop, | ||
| 62 | .irq_enable = noop, | ||
| 63 | .irq_disable = noop, | ||
| 64 | .irq_ack = noop, | ||
| 65 | .irq_mask = noop, | ||
| 66 | .irq_unmask = noop, | ||
| 67 | END_INIT | ||
| 68 | }; | ||
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 27e5c691122..e2347eb6330 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -11,24 +11,15 @@ | |||
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
| 14 | #include <linux/sched.h> | ||
| 15 | #include <linux/slab.h> | ||
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/random.h> | 14 | #include <linux/random.h> |
| 15 | #include <linux/sched.h> | ||
| 18 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
| 19 | #include <linux/kernel_stat.h> | 17 | #include <linux/kernel_stat.h> |
| 20 | #include <linux/rculist.h> | 18 | |
| 21 | #include <linux/hash.h> | ||
| 22 | #include <linux/radix-tree.h> | ||
| 23 | #include <trace/events/irq.h> | 19 | #include <trace/events/irq.h> |
| 24 | 20 | ||
| 25 | #include "internals.h" | 21 | #include "internals.h" |
| 26 | 22 | ||
| 27 | /* | ||
| 28 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | ||
| 29 | */ | ||
| 30 | struct lock_class_key irq_desc_lock_class; | ||
| 31 | |||
| 32 | /** | 23 | /** |
| 33 | * handle_bad_irq - handle spurious and unhandled irqs | 24 | * handle_bad_irq - handle spurious and unhandled irqs |
| 34 | * @irq: the interrupt number | 25 | * @irq: the interrupt number |
| @@ -43,304 +34,6 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc) | |||
| 43 | ack_bad_irq(irq); | 34 | ack_bad_irq(irq); |
| 44 | } | 35 | } |
| 45 | 36 | ||
| 46 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | ||
| 47 | static void __init init_irq_default_affinity(void) | ||
| 48 | { | ||
| 49 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); | ||
| 50 | cpumask_setall(irq_default_affinity); | ||
| 51 | } | ||
| 52 | #else | ||
| 53 | static void __init init_irq_default_affinity(void) | ||
| 54 | { | ||
| 55 | } | ||
| 56 | #endif | ||
| 57 | |||
| 58 | /* | ||
| 59 | * Linux has a controller-independent interrupt architecture. | ||
| 60 | * Every controller has a 'controller-template', that is used | ||
| 61 | * by the main code to do the right thing. Each driver-visible | ||
| 62 | * interrupt source is transparently wired to the appropriate | ||
| 63 | * controller. Thus drivers need not be aware of the | ||
| 64 | * interrupt-controller. | ||
| 65 | * | ||
| 66 | * The code is designed to be easily extended with new/different | ||
| 67 | * interrupt controllers, without having to do assembly magic or | ||
| 68 | * having to touch the generic code. | ||
| 69 | * | ||
| 70 | * Controller mappings for all interrupt sources: | ||
| 71 | */ | ||
| 72 | int nr_irqs = NR_IRQS; | ||
| 73 | EXPORT_SYMBOL_GPL(nr_irqs); | ||
| 74 | |||
| 75 | #ifdef CONFIG_SPARSE_IRQ | ||
| 76 | |||
| 77 | static struct irq_desc irq_desc_init = { | ||
| 78 | .irq = -1, | ||
| 79 | .status = IRQ_DISABLED, | ||
| 80 | .chip = &no_irq_chip, | ||
| 81 | .handle_irq = handle_bad_irq, | ||
| 82 | .depth = 1, | ||
| 83 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | ||
| 84 | }; | ||
| 85 | |||
| 86 | void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) | ||
| 87 | { | ||
| 88 | void *ptr; | ||
| 89 | |||
| 90 | ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), | ||
| 91 | GFP_ATOMIC, node); | ||
| 92 | |||
| 93 | /* | ||
| 94 | * don't overwite if can not get new one | ||
| 95 | * init_copy_kstat_irqs() could still use old one | ||
| 96 | */ | ||
| 97 | if (ptr) { | ||
| 98 | printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); | ||
| 99 | desc->kstat_irqs = ptr; | ||
| 100 | } | ||
| 101 | } | ||
| 102 | |||
| 103 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) | ||
| 104 | { | ||
| 105 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); | ||
| 106 | |||
| 107 | raw_spin_lock_init(&desc->lock); | ||
| 108 | desc->irq = irq; | ||
| 109 | #ifdef CONFIG_SMP | ||
| 110 | desc->node = node; | ||
| 111 | #endif | ||
| 112 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | ||
| 113 | init_kstat_irqs(desc, node, nr_cpu_ids); | ||
| 114 | if (!desc->kstat_irqs) { | ||
| 115 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | ||
| 116 | BUG_ON(1); | ||
| 117 | } | ||
| 118 | if (!alloc_desc_masks(desc, node, false)) { | ||
| 119 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); | ||
| 120 | BUG_ON(1); | ||
| 121 | } | ||
| 122 | init_desc_masks(desc); | ||
| 123 | arch_init_chip_data(desc, node); | ||
| 124 | } | ||
| 125 | |||
| 126 | /* | ||
| 127 | * Protect the sparse_irqs: | ||
| 128 | */ | ||
| 129 | DEFINE_RAW_SPINLOCK(sparse_irq_lock); | ||
| 130 | |||
| 131 | static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); | ||
| 132 | |||
| 133 | static void set_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
| 134 | { | ||
| 135 | radix_tree_insert(&irq_desc_tree, irq, desc); | ||
| 136 | } | ||
| 137 | |||
| 138 | struct irq_desc *irq_to_desc(unsigned int irq) | ||
| 139 | { | ||
| 140 | return radix_tree_lookup(&irq_desc_tree, irq); | ||
| 141 | } | ||
| 142 | |||
| 143 | void replace_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
| 144 | { | ||
| 145 | void **ptr; | ||
| 146 | |||
| 147 | ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); | ||
| 148 | if (ptr) | ||
| 149 | radix_tree_replace_slot(ptr, desc); | ||
| 150 | } | ||
| 151 | |||
| 152 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { | ||
| 153 | [0 ... NR_IRQS_LEGACY-1] = { | ||
| 154 | .irq = -1, | ||
| 155 | .status = IRQ_DISABLED, | ||
| 156 | .chip = &no_irq_chip, | ||
| 157 | .handle_irq = handle_bad_irq, | ||
| 158 | .depth = 1, | ||
| 159 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | ||
| 160 | } | ||
| 161 | }; | ||
| 162 | |||
| 163 | static unsigned int *kstat_irqs_legacy; | ||
| 164 | |||
| 165 | int __init early_irq_init(void) | ||
| 166 | { | ||
| 167 | struct irq_desc *desc; | ||
| 168 | int legacy_count; | ||
| 169 | int node; | ||
| 170 | int i; | ||
| 171 | |||
| 172 | init_irq_default_affinity(); | ||
| 173 | |||
| 174 | /* initialize nr_irqs based on nr_cpu_ids */ | ||
| 175 | arch_probe_nr_irqs(); | ||
| 176 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); | ||
| 177 | |||
| 178 | desc = irq_desc_legacy; | ||
| 179 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | ||
| 180 | node = first_online_node; | ||
| 181 | |||
| 182 | /* allocate based on nr_cpu_ids */ | ||
| 183 | kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * | ||
| 184 | sizeof(int), GFP_NOWAIT, node); | ||
| 185 | |||
| 186 | for (i = 0; i < legacy_count; i++) { | ||
| 187 | desc[i].irq = i; | ||
| 188 | #ifdef CONFIG_SMP | ||
| 189 | desc[i].node = node; | ||
| 190 | #endif | ||
| 191 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; | ||
| 192 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | ||
| 193 | alloc_desc_masks(&desc[i], node, true); | ||
| 194 | init_desc_masks(&desc[i]); | ||
| 195 | set_irq_desc(i, &desc[i]); | ||
| 196 | } | ||
| 197 | |||
| 198 | return arch_early_irq_init(); | ||
| 199 | } | ||
| 200 | |||
| 201 | struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) | ||
| 202 | { | ||
| 203 | struct irq_desc *desc; | ||
| 204 | unsigned long flags; | ||
| 205 | |||
| 206 | if (irq >= nr_irqs) { | ||
| 207 | WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", | ||
| 208 | irq, nr_irqs); | ||
| 209 | return NULL; | ||
| 210 | } | ||
| 211 | |||
| 212 | desc = irq_to_desc(irq); | ||
| 213 | if (desc) | ||
| 214 | return desc; | ||
| 215 | |||
| 216 | raw_spin_lock_irqsave(&sparse_irq_lock, flags); | ||
| 217 | |||
| 218 | /* We have to check it to avoid races with another CPU */ | ||
| 219 | desc = irq_to_desc(irq); | ||
| 220 | if (desc) | ||
| 221 | goto out_unlock; | ||
| 222 | |||
| 223 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | ||
| 224 | |||
| 225 | printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); | ||
| 226 | if (!desc) { | ||
| 227 | printk(KERN_ERR "can not alloc irq_desc\n"); | ||
| 228 | BUG_ON(1); | ||
| 229 | } | ||
| 230 | init_one_irq_desc(irq, desc, node); | ||
| 231 | |||
| 232 | set_irq_desc(irq, desc); | ||
| 233 | |||
| 234 | out_unlock: | ||
| 235 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); | ||
| 236 | |||
| 237 | return desc; | ||
| 238 | } | ||
| 239 | |||
| 240 | #else /* !CONFIG_SPARSE_IRQ */ | ||
| 241 | |||
| 242 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | ||
| 243 | [0 ... NR_IRQS-1] = { | ||
| 244 | .status = IRQ_DISABLED, | ||
| 245 | .chip = &no_irq_chip, | ||
| 246 | .handle_irq = handle_bad_irq, | ||
| 247 | .depth = 1, | ||
| 248 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | ||
| 249 | } | ||
| 250 | }; | ||
| 251 | |||
| 252 | static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; | ||
| 253 | int __init early_irq_init(void) | ||
| 254 | { | ||
| 255 | struct irq_desc *desc; | ||
| 256 | int count; | ||
| 257 | int i; | ||
| 258 | |||
| 259 | init_irq_default_affinity(); | ||
| 260 | |||
| 261 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); | ||
| 262 | |||
| 263 | desc = irq_desc; | ||
| 264 | count = ARRAY_SIZE(irq_desc); | ||
| 265 | |||
| 266 | for (i = 0; i < count; i++) { | ||
| 267 | desc[i].irq = i; | ||
| 268 | alloc_desc_masks(&desc[i], 0, true); | ||
| 269 | init_desc_masks(&desc[i]); | ||
| 270 | desc[i].kstat_irqs = kstat_irqs_all[i]; | ||
| 271 | } | ||
| 272 | return arch_early_irq_init(); | ||
| 273 | } | ||
| 274 | |||
| 275 | struct irq_desc *irq_to_desc(unsigned int irq) | ||
| 276 | { | ||
| 277 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | ||
| 278 | } | ||
| 279 | |||
| 280 | struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) | ||
| 281 | { | ||
| 282 | return irq_to_desc(irq); | ||
| 283 | } | ||
| 284 | #endif /* !CONFIG_SPARSE_IRQ */ | ||
| 285 | |||
| 286 | void clear_kstat_irqs(struct irq_desc *desc) | ||
| 287 | { | ||
| 288 | memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); | ||
| 289 | } | ||
| 290 | |||
| 291 | /* | ||
| 292 | * What should we do if we get a hw irq event on an illegal vector? | ||
| 293 | * Each architecture has to answer this themself. | ||
| 294 | */ | ||
| 295 | static void ack_bad(unsigned int irq) | ||
| 296 | { | ||
| 297 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 298 | |||
| 299 | print_irq_desc(irq, desc); | ||
| 300 | ack_bad_irq(irq); | ||
| 301 | } | ||
| 302 | |||
| 303 | /* | ||
| 304 | * NOP functions | ||
| 305 | */ | ||
| 306 | static void noop(unsigned int irq) | ||
| 307 | { | ||
| 308 | } | ||
| 309 | |||
| 310 | static unsigned int noop_ret(unsigned int irq) | ||
| 311 | { | ||
| 312 | return 0; | ||
| 313 | } | ||
| 314 | |||
| 315 | /* | ||
| 316 | * Generic no controller implementation | ||
| 317 | */ | ||
| 318 | struct irq_chip no_irq_chip = { | ||
| 319 | .name = "none", | ||
| 320 | .startup = noop_ret, | ||
| 321 | .shutdown = noop, | ||
| 322 | .enable = noop, | ||
| 323 | .disable = noop, | ||
| 324 | .ack = ack_bad, | ||
| 325 | .end = noop, | ||
| 326 | }; | ||
| 327 | |||
| 328 | /* | ||
| 329 | * Generic dummy implementation which can be used for | ||
| 330 | * real dumb interrupt sources | ||
| 331 | */ | ||
| 332 | struct irq_chip dummy_irq_chip = { | ||
| 333 | .name = "dummy", | ||
| 334 | .startup = noop_ret, | ||
| 335 | .shutdown = noop, | ||
| 336 | .enable = noop, | ||
| 337 | .disable = noop, | ||
| 338 | .ack = noop, | ||
| 339 | .mask = noop, | ||
| 340 | .unmask = noop, | ||
| 341 | .end = noop, | ||
| 342 | }; | ||
| 343 | |||
| 344 | /* | 37 | /* |
| 345 | * Special, empty irq handler: | 38 | * Special, empty irq handler: |
| 346 | */ | 39 | */ |
| @@ -457,20 +150,20 @@ unsigned int __do_IRQ(unsigned int irq) | |||
| 457 | /* | 150 | /* |
| 458 | * No locking required for CPU-local interrupts: | 151 | * No locking required for CPU-local interrupts: |
| 459 | */ | 152 | */ |
| 460 | if (desc->chip->ack) | 153 | if (desc->irq_data.chip->ack) |
| 461 | desc->chip->ack(irq); | 154 | desc->irq_data.chip->ack(irq); |
| 462 | if (likely(!(desc->status & IRQ_DISABLED))) { | 155 | if (likely(!(desc->status & IRQ_DISABLED))) { |
| 463 | action_ret = handle_IRQ_event(irq, desc->action); | 156 | action_ret = handle_IRQ_event(irq, desc->action); |
| 464 | if (!noirqdebug) | 157 | if (!noirqdebug) |
| 465 | note_interrupt(irq, desc, action_ret); | 158 | note_interrupt(irq, desc, action_ret); |
| 466 | } | 159 | } |
| 467 | desc->chip->end(irq); | 160 | desc->irq_data.chip->end(irq); |
| 468 | return 1; | 161 | return 1; |
| 469 | } | 162 | } |
| 470 | 163 | ||
| 471 | raw_spin_lock(&desc->lock); | 164 | raw_spin_lock(&desc->lock); |
| 472 | if (desc->chip->ack) | 165 | if (desc->irq_data.chip->ack) |
| 473 | desc->chip->ack(irq); | 166 | desc->irq_data.chip->ack(irq); |
| 474 | /* | 167 | /* |
| 475 | * REPLAY is when Linux resends an IRQ that was dropped earlier | 168 | * REPLAY is when Linux resends an IRQ that was dropped earlier |
| 476 | * WAITING is used by probe to mark irqs that are being tested | 169 | * WAITING is used by probe to mark irqs that are being tested |
| @@ -530,27 +223,9 @@ out: | |||
| 530 | * The ->end() handler has to deal with interrupts which got | 223 | * The ->end() handler has to deal with interrupts which got |
| 531 | * disabled while the handler was running. | 224 | * disabled while the handler was running. |
| 532 | */ | 225 | */ |
| 533 | desc->chip->end(irq); | 226 | desc->irq_data.chip->end(irq); |
| 534 | raw_spin_unlock(&desc->lock); | 227 | raw_spin_unlock(&desc->lock); |
| 535 | 228 | ||
| 536 | return 1; | 229 | return 1; |
| 537 | } | 230 | } |
| 538 | #endif | 231 | #endif |
| 539 | |||
| 540 | void early_init_irq_lock_class(void) | ||
| 541 | { | ||
| 542 | struct irq_desc *desc; | ||
| 543 | int i; | ||
| 544 | |||
| 545 | for_each_irq_desc(i, desc) { | ||
| 546 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | ||
| 547 | } | ||
| 548 | } | ||
| 549 | |||
| 550 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | ||
| 551 | { | ||
| 552 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 553 | return desc ? desc->kstat_irqs[cpu] : 0; | ||
| 554 | } | ||
| 555 | EXPORT_SYMBOL(kstat_irqs_cpu); | ||
| 556 | |||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index c63f3bc88f0..4571ae7e085 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
| @@ -1,9 +1,12 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * IRQ subsystem internal functions and variables: | 2 | * IRQ subsystem internal functions and variables: |
| 3 | */ | 3 | */ |
| 4 | #include <linux/irqdesc.h> | ||
| 4 | 5 | ||
| 5 | extern int noirqdebug; | 6 | extern int noirqdebug; |
| 6 | 7 | ||
| 8 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) | ||
| 9 | |||
| 7 | /* Set default functions for irq_chip structures: */ | 10 | /* Set default functions for irq_chip structures: */ |
| 8 | extern void irq_chip_set_defaults(struct irq_chip *chip); | 11 | extern void irq_chip_set_defaults(struct irq_chip *chip); |
| 9 | 12 | ||
| @@ -15,21 +18,19 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 15 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); | 18 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); |
| 16 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); | 19 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); |
| 17 | 20 | ||
| 18 | extern struct lock_class_key irq_desc_lock_class; | ||
| 19 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); | 21 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
| 20 | extern void clear_kstat_irqs(struct irq_desc *desc); | ||
| 21 | extern raw_spinlock_t sparse_irq_lock; | ||
| 22 | 22 | ||
| 23 | #ifdef CONFIG_SPARSE_IRQ | 23 | /* Resending of interrupts :*/ |
| 24 | void replace_irq_desc(unsigned int irq, struct irq_desc *desc); | 24 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); |
| 25 | #endif | ||
| 26 | 25 | ||
| 27 | #ifdef CONFIG_PROC_FS | 26 | #ifdef CONFIG_PROC_FS |
| 28 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); | 27 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
| 28 | extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); | ||
| 29 | extern void register_handler_proc(unsigned int irq, struct irqaction *action); | 29 | extern void register_handler_proc(unsigned int irq, struct irqaction *action); |
| 30 | extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); | 30 | extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); |
| 31 | #else | 31 | #else |
| 32 | static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } | 32 | static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } |
| 33 | static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { } | ||
| 33 | static inline void register_handler_proc(unsigned int irq, | 34 | static inline void register_handler_proc(unsigned int irq, |
| 34 | struct irqaction *action) { } | 35 | struct irqaction *action) { } |
| 35 | static inline void unregister_handler_proc(unsigned int irq, | 36 | static inline void unregister_handler_proc(unsigned int irq, |
| @@ -40,17 +41,27 @@ extern int irq_select_affinity_usr(unsigned int irq); | |||
| 40 | 41 | ||
| 41 | extern void irq_set_thread_affinity(struct irq_desc *desc); | 42 | extern void irq_set_thread_affinity(struct irq_desc *desc); |
| 42 | 43 | ||
| 44 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
| 45 | static inline void irq_end(unsigned int irq, struct irq_desc *desc) | ||
| 46 | { | ||
| 47 | if (desc->irq_data.chip && desc->irq_data.chip->end) | ||
| 48 | desc->irq_data.chip->end(irq); | ||
| 49 | } | ||
| 50 | #else | ||
| 51 | static inline void irq_end(unsigned int irq, struct irq_desc *desc) { } | ||
| 52 | #endif | ||
| 53 | |||
| 43 | /* Inline functions for support of irq chips on slow busses */ | 54 | /* Inline functions for support of irq chips on slow busses */ |
| 44 | static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc) | 55 | static inline void chip_bus_lock(struct irq_desc *desc) |
| 45 | { | 56 | { |
| 46 | if (unlikely(desc->chip->bus_lock)) | 57 | if (unlikely(desc->irq_data.chip->irq_bus_lock)) |
| 47 | desc->chip->bus_lock(irq); | 58 | desc->irq_data.chip->irq_bus_lock(&desc->irq_data); |
| 48 | } | 59 | } |
| 49 | 60 | ||
| 50 | static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc) | 61 | static inline void chip_bus_sync_unlock(struct irq_desc *desc) |
| 51 | { | 62 | { |
| 52 | if (unlikely(desc->chip->bus_sync_unlock)) | 63 | if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock)) |
| 53 | desc->chip->bus_sync_unlock(irq); | 64 | desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); |
| 54 | } | 65 | } |
| 55 | 66 | ||
| 56 | /* | 67 | /* |
| @@ -67,8 +78,8 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) | |||
| 67 | irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); | 78 | irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); |
| 68 | printk("->handle_irq(): %p, ", desc->handle_irq); | 79 | printk("->handle_irq(): %p, ", desc->handle_irq); |
| 69 | print_symbol("%s\n", (unsigned long)desc->handle_irq); | 80 | print_symbol("%s\n", (unsigned long)desc->handle_irq); |
| 70 | printk("->chip(): %p, ", desc->chip); | 81 | printk("->irq_data.chip(): %p, ", desc->irq_data.chip); |
| 71 | print_symbol("%s\n", (unsigned long)desc->chip); | 82 | print_symbol("%s\n", (unsigned long)desc->irq_data.chip); |
| 72 | printk("->action(): %p\n", desc->action); | 83 | printk("->action(): %p\n", desc->action); |
| 73 | if (desc->action) { | 84 | if (desc->action) { |
| 74 | printk("->action->handler(): %p, ", desc->action->handler); | 85 | printk("->action->handler(): %p, ", desc->action->handler); |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c new file mode 100644 index 00000000000..9d917ff7267 --- /dev/null +++ b/kernel/irq/irqdesc.c | |||
| @@ -0,0 +1,395 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | ||
| 3 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | ||
| 4 | * | ||
| 5 | * This file contains the interrupt descriptor management code | ||
| 6 | * | ||
| 7 | * Detailed information is available in Documentation/DocBook/genericirq | ||
| 8 | * | ||
| 9 | */ | ||
| 10 | #include <linux/irq.h> | ||
| 11 | #include <linux/slab.h> | ||
| 12 | #include <linux/module.h> | ||
| 13 | #include <linux/interrupt.h> | ||
| 14 | #include <linux/kernel_stat.h> | ||
| 15 | #include <linux/radix-tree.h> | ||
| 16 | #include <linux/bitmap.h> | ||
| 17 | |||
| 18 | #include "internals.h" | ||
| 19 | |||
| 20 | /* | ||
| 21 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | ||
| 22 | */ | ||
| 23 | static struct lock_class_key irq_desc_lock_class; | ||
| 24 | |||
| 25 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | ||
| 26 | static void __init init_irq_default_affinity(void) | ||
| 27 | { | ||
| 28 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); | ||
| 29 | cpumask_setall(irq_default_affinity); | ||
| 30 | } | ||
| 31 | #else | ||
| 32 | static void __init init_irq_default_affinity(void) | ||
| 33 | { | ||
| 34 | } | ||
| 35 | #endif | ||
| 36 | |||
| 37 | #ifdef CONFIG_SMP | ||
| 38 | static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) | ||
| 39 | { | ||
| 40 | if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) | ||
| 41 | return -ENOMEM; | ||
| 42 | |||
| 43 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
| 44 | if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | ||
| 45 | free_cpumask_var(desc->irq_data.affinity); | ||
| 46 | return -ENOMEM; | ||
| 47 | } | ||
| 48 | #endif | ||
| 49 | return 0; | ||
| 50 | } | ||
| 51 | |||
| 52 | static void desc_smp_init(struct irq_desc *desc, int node) | ||
| 53 | { | ||
| 54 | desc->irq_data.node = node; | ||
| 55 | cpumask_copy(desc->irq_data.affinity, irq_default_affinity); | ||
| 56 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
| 57 | cpumask_clear(desc->pending_mask); | ||
| 58 | #endif | ||
| 59 | } | ||
| 60 | |||
| 61 | static inline int desc_node(struct irq_desc *desc) | ||
| 62 | { | ||
| 63 | return desc->irq_data.node; | ||
| 64 | } | ||
| 65 | |||
| 66 | #else | ||
| 67 | static inline int | ||
| 68 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } | ||
| 69 | static inline void desc_smp_init(struct irq_desc *desc, int node) { } | ||
| 70 | static inline int desc_node(struct irq_desc *desc) { return 0; } | ||
| 71 | #endif | ||
| 72 | |||
| 73 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | ||
| 74 | { | ||
| 75 | desc->irq_data.irq = irq; | ||
| 76 | desc->irq_data.chip = &no_irq_chip; | ||
| 77 | desc->irq_data.chip_data = NULL; | ||
| 78 | desc->irq_data.handler_data = NULL; | ||
| 79 | desc->irq_data.msi_desc = NULL; | ||
| 80 | desc->status = IRQ_DEFAULT_INIT_FLAGS; | ||
| 81 | desc->handle_irq = handle_bad_irq; | ||
| 82 | desc->depth = 1; | ||
| 83 | desc->irq_count = 0; | ||
| 84 | desc->irqs_unhandled = 0; | ||
| 85 | desc->name = NULL; | ||
| 86 | memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); | ||
| 87 | desc_smp_init(desc, node); | ||
| 88 | } | ||
| 89 | |||
| 90 | int nr_irqs = NR_IRQS; | ||
| 91 | EXPORT_SYMBOL_GPL(nr_irqs); | ||
| 92 | |||
| 93 | static DEFINE_MUTEX(sparse_irq_lock); | ||
| 94 | static DECLARE_BITMAP(allocated_irqs, NR_IRQS); | ||
| 95 | |||
| 96 | #ifdef CONFIG_SPARSE_IRQ | ||
| 97 | |||
| 98 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); | ||
| 99 | |||
| 100 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) | ||
| 101 | { | ||
| 102 | radix_tree_insert(&irq_desc_tree, irq, desc); | ||
| 103 | } | ||
| 104 | |||
| 105 | struct irq_desc *irq_to_desc(unsigned int irq) | ||
| 106 | { | ||
| 107 | return radix_tree_lookup(&irq_desc_tree, irq); | ||
| 108 | } | ||
| 109 | |||
| 110 | static void delete_irq_desc(unsigned int irq) | ||
| 111 | { | ||
| 112 | radix_tree_delete(&irq_desc_tree, irq); | ||
| 113 | } | ||
| 114 | |||
| 115 | #ifdef CONFIG_SMP | ||
| 116 | static void free_masks(struct irq_desc *desc) | ||
| 117 | { | ||
| 118 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
| 119 | free_cpumask_var(desc->pending_mask); | ||
| 120 | #endif | ||
| 121 | free_cpumask_var(desc->irq_data.affinity); | ||
| 122 | } | ||
| 123 | #else | ||
| 124 | static inline void free_masks(struct irq_desc *desc) { } | ||
| 125 | #endif | ||
| 126 | |||
| 127 | static struct irq_desc *alloc_desc(int irq, int node) | ||
| 128 | { | ||
| 129 | struct irq_desc *desc; | ||
| 130 | gfp_t gfp = GFP_KERNEL; | ||
| 131 | |||
| 132 | desc = kzalloc_node(sizeof(*desc), gfp, node); | ||
| 133 | if (!desc) | ||
| 134 | return NULL; | ||
| 135 | /* allocate based on nr_cpu_ids */ | ||
| 136 | desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs), | ||
| 137 | gfp, node); | ||
| 138 | if (!desc->kstat_irqs) | ||
| 139 | goto err_desc; | ||
| 140 | |||
| 141 | if (alloc_masks(desc, gfp, node)) | ||
| 142 | goto err_kstat; | ||
| 143 | |||
| 144 | raw_spin_lock_init(&desc->lock); | ||
| 145 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | ||
| 146 | |||
| 147 | desc_set_defaults(irq, desc, node); | ||
| 148 | |||
| 149 | return desc; | ||
| 150 | |||
| 151 | err_kstat: | ||
| 152 | kfree(desc->kstat_irqs); | ||
| 153 | err_desc: | ||
| 154 | kfree(desc); | ||
| 155 | return NULL; | ||
| 156 | } | ||
| 157 | |||
| 158 | static void free_desc(unsigned int irq) | ||
| 159 | { | ||
| 160 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 161 | |||
| 162 | unregister_irq_proc(irq, desc); | ||
| 163 | |||
| 164 | mutex_lock(&sparse_irq_lock); | ||
| 165 | delete_irq_desc(irq); | ||
| 166 | mutex_unlock(&sparse_irq_lock); | ||
| 167 | |||
| 168 | free_masks(desc); | ||
| 169 | kfree(desc->kstat_irqs); | ||
| 170 | kfree(desc); | ||
| 171 | } | ||
| 172 | |||
| 173 | static int alloc_descs(unsigned int start, unsigned int cnt, int node) | ||
| 174 | { | ||
| 175 | struct irq_desc *desc; | ||
| 176 | int i; | ||
| 177 | |||
| 178 | for (i = 0; i < cnt; i++) { | ||
| 179 | desc = alloc_desc(start + i, node); | ||
| 180 | if (!desc) | ||
| 181 | goto err; | ||
| 182 | mutex_lock(&sparse_irq_lock); | ||
| 183 | irq_insert_desc(start + i, desc); | ||
| 184 | mutex_unlock(&sparse_irq_lock); | ||
| 185 | } | ||
| 186 | return start; | ||
| 187 | |||
| 188 | err: | ||
| 189 | for (i--; i >= 0; i--) | ||
| 190 | free_desc(start + i); | ||
| 191 | |||
| 192 | mutex_lock(&sparse_irq_lock); | ||
| 193 | bitmap_clear(allocated_irqs, start, cnt); | ||
| 194 | mutex_unlock(&sparse_irq_lock); | ||
| 195 | return -ENOMEM; | ||
| 196 | } | ||
| 197 | |||
| 198 | struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) | ||
| 199 | { | ||
| 200 | int res = irq_alloc_descs(irq, irq, 1, node); | ||
| 201 | |||
| 202 | if (res == -EEXIST || res == irq) | ||
| 203 | return irq_to_desc(irq); | ||
| 204 | return NULL; | ||
| 205 | } | ||
| 206 | |||
| 207 | int __init early_irq_init(void) | ||
| 208 | { | ||
| 209 | int i, initcnt, node = first_online_node; | ||
| 210 | struct irq_desc *desc; | ||
| 211 | |||
| 212 | init_irq_default_affinity(); | ||
| 213 | |||
| 214 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ | ||
| 215 | initcnt = arch_probe_nr_irqs(); | ||
| 216 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); | ||
| 217 | |||
| 218 | for (i = 0; i < initcnt; i++) { | ||
| 219 | desc = alloc_desc(i, node); | ||
| 220 | set_bit(i, allocated_irqs); | ||
| 221 | irq_insert_desc(i, desc); | ||
| 222 | } | ||
| 223 | return arch_early_irq_init(); | ||
| 224 | } | ||
| 225 | |||
| 226 | #else /* !CONFIG_SPARSE_IRQ */ | ||
| 227 | |||
| 228 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | ||
| 229 | [0 ... NR_IRQS-1] = { | ||
| 230 | .status = IRQ_DEFAULT_INIT_FLAGS, | ||
| 231 | .handle_irq = handle_bad_irq, | ||
| 232 | .depth = 1, | ||
| 233 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | ||
| 234 | } | ||
| 235 | }; | ||
| 236 | |||
| 237 | static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; | ||
| 238 | int __init early_irq_init(void) | ||
| 239 | { | ||
| 240 | int count, i, node = first_online_node; | ||
| 241 | struct irq_desc *desc; | ||
| 242 | |||
| 243 | init_irq_default_affinity(); | ||
| 244 | |||
| 245 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); | ||
| 246 | |||
| 247 | desc = irq_desc; | ||
| 248 | count = ARRAY_SIZE(irq_desc); | ||
| 249 | |||
| 250 | for (i = 0; i < count; i++) { | ||
| 251 | desc[i].irq_data.irq = i; | ||
| 252 | desc[i].irq_data.chip = &no_irq_chip; | ||
| 253 | desc[i].kstat_irqs = kstat_irqs_all[i]; | ||
| 254 | alloc_masks(desc + i, GFP_KERNEL, node); | ||
| 255 | desc_smp_init(desc + i, node); | ||
| 256 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | ||
| 257 | } | ||
| 258 | return arch_early_irq_init(); | ||
| 259 | } | ||
| 260 | |||
| 261 | struct irq_desc *irq_to_desc(unsigned int irq) | ||
| 262 | { | ||
| 263 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | ||
| 264 | } | ||
| 265 | |||
| 266 | struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) | ||
| 267 | { | ||
| 268 | return irq_to_desc(irq); | ||
| 269 | } | ||
| 270 | |||
| 271 | static void free_desc(unsigned int irq) | ||
| 272 | { | ||
| 273 | dynamic_irq_cleanup(irq); | ||
| 274 | } | ||
| 275 | |||
| 276 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) | ||
| 277 | { | ||
| 278 | return start; | ||
| 279 | } | ||
| 280 | #endif /* !CONFIG_SPARSE_IRQ */ | ||
| 281 | |||
| 282 | /* Dynamic interrupt handling */ | ||
| 283 | |||
| 284 | /** | ||
| 285 | * irq_free_descs - free irq descriptors | ||
| 286 | * @from: Start of descriptor range | ||
| 287 | * @cnt: Number of consecutive irqs to free | ||
| 288 | */ | ||
| 289 | void irq_free_descs(unsigned int from, unsigned int cnt) | ||
| 290 | { | ||
| 291 | int i; | ||
| 292 | |||
| 293 | if (from >= nr_irqs || (from + cnt) > nr_irqs) | ||
| 294 | return; | ||
| 295 | |||
| 296 | for (i = 0; i < cnt; i++) | ||
| 297 | free_desc(from + i); | ||
| 298 | |||
| 299 | mutex_lock(&sparse_irq_lock); | ||
| 300 | bitmap_clear(allocated_irqs, from, cnt); | ||
| 301 | mutex_unlock(&sparse_irq_lock); | ||
| 302 | } | ||
| 303 | |||
| 304 | /** | ||
| 305 | * irq_alloc_descs - allocate and initialize a range of irq descriptors | ||
| 306 | * @irq: Allocate for specific irq number if irq >= 0 | ||
| 307 | * @from: Start the search from this irq number | ||
| 308 | * @cnt: Number of consecutive irqs to allocate. | ||
| 309 | * @node: Preferred node on which the irq descriptor should be allocated | ||
| 310 | * | ||
| 311 | * Returns the first irq number or error code | ||
| 312 | */ | ||
| 313 | int __ref | ||
| 314 | irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) | ||
| 315 | { | ||
| 316 | int start, ret; | ||
| 317 | |||
| 318 | if (!cnt) | ||
| 319 | return -EINVAL; | ||
| 320 | |||
| 321 | mutex_lock(&sparse_irq_lock); | ||
| 322 | |||
| 323 | start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); | ||
| 324 | ret = -EEXIST; | ||
| 325 | if (irq >=0 && start != irq) | ||
| 326 | goto err; | ||
| 327 | |||
| 328 | ret = -ENOMEM; | ||
| 329 | if (start >= nr_irqs) | ||
| 330 | goto err; | ||
| 331 | |||
| 332 | bitmap_set(allocated_irqs, start, cnt); | ||
| 333 | mutex_unlock(&sparse_irq_lock); | ||
| 334 | return alloc_descs(start, cnt, node); | ||
| 335 | |||
| 336 | err: | ||
| 337 | mutex_unlock(&sparse_irq_lock); | ||
| 338 | return ret; | ||
| 339 | } | ||
| 340 | |||
| 341 | /** | ||
| 342 | * irq_reserve_irqs - mark irqs allocated | ||
| 343 | * @from: mark from irq number | ||
| 344 | * @cnt: number of irqs to mark | ||
| 345 | * | ||
| 346 | * Returns 0 on success or an appropriate error code | ||
| 347 | */ | ||
| 348 | int irq_reserve_irqs(unsigned int from, unsigned int cnt) | ||
| 349 | { | ||
| 350 | unsigned int start; | ||
| 351 | int ret = 0; | ||
| 352 | |||
| 353 | if (!cnt || (from + cnt) > nr_irqs) | ||
| 354 | return -EINVAL; | ||
| 355 | |||
| 356 | mutex_lock(&sparse_irq_lock); | ||
| 357 | start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); | ||
| 358 | if (start == from) | ||
| 359 | bitmap_set(allocated_irqs, start, cnt); | ||
| 360 | else | ||
| 361 | ret = -EEXIST; | ||
| 362 | mutex_unlock(&sparse_irq_lock); | ||
| 363 | return ret; | ||
| 364 | } | ||
| 365 | |||
| 366 | /** | ||
| 367 | * irq_get_next_irq - get next allocated irq number | ||
| 368 | * @offset: where to start the search | ||
| 369 | * | ||
| 370 | * Returns next irq number after offset or nr_irqs if none is found. | ||
| 371 | */ | ||
| 372 | unsigned int irq_get_next_irq(unsigned int offset) | ||
| 373 | { | ||
| 374 | return find_next_bit(allocated_irqs, nr_irqs, offset); | ||
| 375 | } | ||
| 376 | |||
| 377 | /** | ||
| 378 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq | ||
| 379 | * @irq: irq number to initialize | ||
| 380 | */ | ||
| 381 | void dynamic_irq_cleanup(unsigned int irq) | ||
| 382 | { | ||
| 383 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 384 | unsigned long flags; | ||
| 385 | |||
| 386 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 387 | desc_set_defaults(irq, desc, desc_node(desc)); | ||
| 388 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 389 | } | ||
| 390 | |||
| 391 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | ||
| 392 | { | ||
| 393 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 394 | return desc ? desc->kstat_irqs[cpu] : 0; | ||
| 395 | } | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index c3003e9d91a..644e8d5fa36 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -73,8 +73,8 @@ int irq_can_set_affinity(unsigned int irq) | |||
| 73 | { | 73 | { |
| 74 | struct irq_desc *desc = irq_to_desc(irq); | 74 | struct irq_desc *desc = irq_to_desc(irq); |
| 75 | 75 | ||
| 76 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || | 76 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip || |
| 77 | !desc->chip->set_affinity) | 77 | !desc->irq_data.chip->irq_set_affinity) |
| 78 | return 0; | 78 | return 0; |
| 79 | 79 | ||
| 80 | return 1; | 80 | return 1; |
| @@ -109,17 +109,18 @@ void irq_set_thread_affinity(struct irq_desc *desc) | |||
| 109 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | 109 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) |
| 110 | { | 110 | { |
| 111 | struct irq_desc *desc = irq_to_desc(irq); | 111 | struct irq_desc *desc = irq_to_desc(irq); |
| 112 | struct irq_chip *chip = desc->irq_data.chip; | ||
| 112 | unsigned long flags; | 113 | unsigned long flags; |
| 113 | 114 | ||
| 114 | if (!desc->chip->set_affinity) | 115 | if (!chip->irq_set_affinity) |
| 115 | return -EINVAL; | 116 | return -EINVAL; |
| 116 | 117 | ||
| 117 | raw_spin_lock_irqsave(&desc->lock, flags); | 118 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 118 | 119 | ||
| 119 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 120 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 120 | if (desc->status & IRQ_MOVE_PCNTXT) { | 121 | if (desc->status & IRQ_MOVE_PCNTXT) { |
| 121 | if (!desc->chip->set_affinity(irq, cpumask)) { | 122 | if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { |
| 122 | cpumask_copy(desc->affinity, cpumask); | 123 | cpumask_copy(desc->irq_data.affinity, cpumask); |
| 123 | irq_set_thread_affinity(desc); | 124 | irq_set_thread_affinity(desc); |
| 124 | } | 125 | } |
| 125 | } | 126 | } |
| @@ -128,8 +129,8 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 128 | cpumask_copy(desc->pending_mask, cpumask); | 129 | cpumask_copy(desc->pending_mask, cpumask); |
| 129 | } | 130 | } |
| 130 | #else | 131 | #else |
| 131 | if (!desc->chip->set_affinity(irq, cpumask)) { | 132 | if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { |
| 132 | cpumask_copy(desc->affinity, cpumask); | 133 | cpumask_copy(desc->irq_data.affinity, cpumask); |
| 133 | irq_set_thread_affinity(desc); | 134 | irq_set_thread_affinity(desc); |
| 134 | } | 135 | } |
| 135 | #endif | 136 | #endif |
| @@ -168,16 +169,16 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc) | |||
| 168 | * one of the targets is online. | 169 | * one of the targets is online. |
| 169 | */ | 170 | */ |
| 170 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 171 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { |
| 171 | if (cpumask_any_and(desc->affinity, cpu_online_mask) | 172 | if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask) |
| 172 | < nr_cpu_ids) | 173 | < nr_cpu_ids) |
| 173 | goto set_affinity; | 174 | goto set_affinity; |
| 174 | else | 175 | else |
| 175 | desc->status &= ~IRQ_AFFINITY_SET; | 176 | desc->status &= ~IRQ_AFFINITY_SET; |
| 176 | } | 177 | } |
| 177 | 178 | ||
| 178 | cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); | 179 | cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); |
| 179 | set_affinity: | 180 | set_affinity: |
| 180 | desc->chip->set_affinity(irq, desc->affinity); | 181 | desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false); |
| 181 | 182 | ||
| 182 | return 0; | 183 | return 0; |
| 183 | } | 184 | } |
| @@ -223,7 +224,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | |||
| 223 | 224 | ||
| 224 | if (!desc->depth++) { | 225 | if (!desc->depth++) { |
| 225 | desc->status |= IRQ_DISABLED; | 226 | desc->status |= IRQ_DISABLED; |
| 226 | desc->chip->disable(irq); | 227 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
| 227 | } | 228 | } |
| 228 | } | 229 | } |
| 229 | 230 | ||
| @@ -246,11 +247,11 @@ void disable_irq_nosync(unsigned int irq) | |||
| 246 | if (!desc) | 247 | if (!desc) |
| 247 | return; | 248 | return; |
| 248 | 249 | ||
| 249 | chip_bus_lock(irq, desc); | 250 | chip_bus_lock(desc); |
| 250 | raw_spin_lock_irqsave(&desc->lock, flags); | 251 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 251 | __disable_irq(desc, irq, false); | 252 | __disable_irq(desc, irq, false); |
| 252 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 253 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 253 | chip_bus_sync_unlock(irq, desc); | 254 | chip_bus_sync_unlock(desc); |
| 254 | } | 255 | } |
| 255 | EXPORT_SYMBOL(disable_irq_nosync); | 256 | EXPORT_SYMBOL(disable_irq_nosync); |
| 256 | 257 | ||
| @@ -313,7 +314,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
| 313 | * IRQ line is re-enabled. | 314 | * IRQ line is re-enabled. |
| 314 | * | 315 | * |
| 315 | * This function may be called from IRQ context only when | 316 | * This function may be called from IRQ context only when |
| 316 | * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! | 317 | * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! |
| 317 | */ | 318 | */ |
| 318 | void enable_irq(unsigned int irq) | 319 | void enable_irq(unsigned int irq) |
| 319 | { | 320 | { |
| @@ -323,11 +324,11 @@ void enable_irq(unsigned int irq) | |||
| 323 | if (!desc) | 324 | if (!desc) |
| 324 | return; | 325 | return; |
| 325 | 326 | ||
| 326 | chip_bus_lock(irq, desc); | 327 | chip_bus_lock(desc); |
| 327 | raw_spin_lock_irqsave(&desc->lock, flags); | 328 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 328 | __enable_irq(desc, irq, false); | 329 | __enable_irq(desc, irq, false); |
| 329 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 330 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 330 | chip_bus_sync_unlock(irq, desc); | 331 | chip_bus_sync_unlock(desc); |
| 331 | } | 332 | } |
| 332 | EXPORT_SYMBOL(enable_irq); | 333 | EXPORT_SYMBOL(enable_irq); |
| 333 | 334 | ||
| @@ -336,8 +337,8 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) | |||
| 336 | struct irq_desc *desc = irq_to_desc(irq); | 337 | struct irq_desc *desc = irq_to_desc(irq); |
| 337 | int ret = -ENXIO; | 338 | int ret = -ENXIO; |
| 338 | 339 | ||
| 339 | if (desc->chip->set_wake) | 340 | if (desc->irq_data.chip->irq_set_wake) |
| 340 | ret = desc->chip->set_wake(irq, on); | 341 | ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); |
| 341 | 342 | ||
| 342 | return ret; | 343 | return ret; |
| 343 | } | 344 | } |
| @@ -429,12 +430,12 @@ void compat_irq_chip_set_default_handler(struct irq_desc *desc) | |||
| 429 | } | 430 | } |
| 430 | 431 | ||
| 431 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 432 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
| 432 | unsigned long flags) | 433 | unsigned long flags) |
| 433 | { | 434 | { |
| 434 | int ret; | 435 | int ret; |
| 435 | struct irq_chip *chip = desc->chip; | 436 | struct irq_chip *chip = desc->irq_data.chip; |
| 436 | 437 | ||
| 437 | if (!chip || !chip->set_type) { | 438 | if (!chip || !chip->irq_set_type) { |
| 438 | /* | 439 | /* |
| 439 | * IRQF_TRIGGER_* but the PIC does not support multiple | 440 | * IRQF_TRIGGER_* but the PIC does not support multiple |
| 440 | * flow-types? | 441 | * flow-types? |
| @@ -445,11 +446,11 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 445 | } | 446 | } |
| 446 | 447 | ||
| 447 | /* caller masked out all except trigger mode flags */ | 448 | /* caller masked out all except trigger mode flags */ |
| 448 | ret = chip->set_type(irq, flags); | 449 | ret = chip->irq_set_type(&desc->irq_data, flags); |
| 449 | 450 | ||
| 450 | if (ret) | 451 | if (ret) |
| 451 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", | 452 | pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", |
| 452 | (int)flags, irq, chip->set_type); | 453 | flags, irq, chip->irq_set_type); |
| 453 | else { | 454 | else { |
| 454 | if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | 455 | if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) |
| 455 | flags |= IRQ_LEVEL; | 456 | flags |= IRQ_LEVEL; |
| @@ -457,8 +458,8 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 457 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); | 458 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); |
| 458 | desc->status |= flags; | 459 | desc->status |= flags; |
| 459 | 460 | ||
| 460 | if (chip != desc->chip) | 461 | if (chip != desc->irq_data.chip) |
| 461 | irq_chip_set_defaults(desc->chip); | 462 | irq_chip_set_defaults(desc->irq_data.chip); |
| 462 | } | 463 | } |
| 463 | 464 | ||
| 464 | return ret; | 465 | return ret; |
| @@ -507,7 +508,7 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
| 507 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | 508 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) |
| 508 | { | 509 | { |
| 509 | again: | 510 | again: |
| 510 | chip_bus_lock(irq, desc); | 511 | chip_bus_lock(desc); |
| 511 | raw_spin_lock_irq(&desc->lock); | 512 | raw_spin_lock_irq(&desc->lock); |
| 512 | 513 | ||
| 513 | /* | 514 | /* |
| @@ -521,17 +522,17 @@ again: | |||
| 521 | */ | 522 | */ |
| 522 | if (unlikely(desc->status & IRQ_INPROGRESS)) { | 523 | if (unlikely(desc->status & IRQ_INPROGRESS)) { |
| 523 | raw_spin_unlock_irq(&desc->lock); | 524 | raw_spin_unlock_irq(&desc->lock); |
| 524 | chip_bus_sync_unlock(irq, desc); | 525 | chip_bus_sync_unlock(desc); |
| 525 | cpu_relax(); | 526 | cpu_relax(); |
| 526 | goto again; | 527 | goto again; |
| 527 | } | 528 | } |
| 528 | 529 | ||
| 529 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { | 530 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { |
| 530 | desc->status &= ~IRQ_MASKED; | 531 | desc->status &= ~IRQ_MASKED; |
| 531 | desc->chip->unmask(irq); | 532 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
| 532 | } | 533 | } |
| 533 | raw_spin_unlock_irq(&desc->lock); | 534 | raw_spin_unlock_irq(&desc->lock); |
| 534 | chip_bus_sync_unlock(irq, desc); | 535 | chip_bus_sync_unlock(desc); |
| 535 | } | 536 | } |
| 536 | 537 | ||
| 537 | #ifdef CONFIG_SMP | 538 | #ifdef CONFIG_SMP |
| @@ -556,7 +557,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | |||
| 556 | } | 557 | } |
| 557 | 558 | ||
| 558 | raw_spin_lock_irq(&desc->lock); | 559 | raw_spin_lock_irq(&desc->lock); |
| 559 | cpumask_copy(mask, desc->affinity); | 560 | cpumask_copy(mask, desc->irq_data.affinity); |
| 560 | raw_spin_unlock_irq(&desc->lock); | 561 | raw_spin_unlock_irq(&desc->lock); |
| 561 | 562 | ||
| 562 | set_cpus_allowed_ptr(current, mask); | 563 | set_cpus_allowed_ptr(current, mask); |
| @@ -657,7 +658,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 657 | if (!desc) | 658 | if (!desc) |
| 658 | return -EINVAL; | 659 | return -EINVAL; |
| 659 | 660 | ||
| 660 | if (desc->chip == &no_irq_chip) | 661 | if (desc->irq_data.chip == &no_irq_chip) |
| 661 | return -ENOSYS; | 662 | return -ENOSYS; |
| 662 | /* | 663 | /* |
| 663 | * Some drivers like serial.c use request_irq() heavily, | 664 | * Some drivers like serial.c use request_irq() heavily, |
| @@ -752,7 +753,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 752 | } | 753 | } |
| 753 | 754 | ||
| 754 | if (!shared) { | 755 | if (!shared) { |
| 755 | irq_chip_set_defaults(desc->chip); | 756 | irq_chip_set_defaults(desc->irq_data.chip); |
| 756 | 757 | ||
| 757 | init_waitqueue_head(&desc->wait_for_threads); | 758 | init_waitqueue_head(&desc->wait_for_threads); |
| 758 | 759 | ||
| @@ -779,7 +780,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 779 | if (!(desc->status & IRQ_NOAUTOEN)) { | 780 | if (!(desc->status & IRQ_NOAUTOEN)) { |
| 780 | desc->depth = 0; | 781 | desc->depth = 0; |
| 781 | desc->status &= ~IRQ_DISABLED; | 782 | desc->status &= ~IRQ_DISABLED; |
| 782 | desc->chip->startup(irq); | 783 | desc->irq_data.chip->irq_startup(&desc->irq_data); |
| 783 | } else | 784 | } else |
| 784 | /* Undo nested disables: */ | 785 | /* Undo nested disables: */ |
| 785 | desc->depth = 1; | 786 | desc->depth = 1; |
| @@ -912,17 +913,17 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
| 912 | 913 | ||
| 913 | /* Currently used only by UML, might disappear one day: */ | 914 | /* Currently used only by UML, might disappear one day: */ |
| 914 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 915 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
| 915 | if (desc->chip->release) | 916 | if (desc->irq_data.chip->release) |
| 916 | desc->chip->release(irq, dev_id); | 917 | desc->irq_data.chip->release(irq, dev_id); |
| 917 | #endif | 918 | #endif |
| 918 | 919 | ||
| 919 | /* If this was the last handler, shut down the IRQ line: */ | 920 | /* If this was the last handler, shut down the IRQ line: */ |
| 920 | if (!desc->action) { | 921 | if (!desc->action) { |
| 921 | desc->status |= IRQ_DISABLED; | 922 | desc->status |= IRQ_DISABLED; |
| 922 | if (desc->chip->shutdown) | 923 | if (desc->irq_data.chip->irq_shutdown) |
| 923 | desc->chip->shutdown(irq); | 924 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); |
| 924 | else | 925 | else |
| 925 | desc->chip->disable(irq); | 926 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
| 926 | } | 927 | } |
| 927 | 928 | ||
| 928 | #ifdef CONFIG_SMP | 929 | #ifdef CONFIG_SMP |
| @@ -997,9 +998,9 @@ void free_irq(unsigned int irq, void *dev_id) | |||
| 997 | if (!desc) | 998 | if (!desc) |
| 998 | return; | 999 | return; |
| 999 | 1000 | ||
| 1000 | chip_bus_lock(irq, desc); | 1001 | chip_bus_lock(desc); |
| 1001 | kfree(__free_irq(irq, dev_id)); | 1002 | kfree(__free_irq(irq, dev_id)); |
| 1002 | chip_bus_sync_unlock(irq, desc); | 1003 | chip_bus_sync_unlock(desc); |
| 1003 | } | 1004 | } |
| 1004 | EXPORT_SYMBOL(free_irq); | 1005 | EXPORT_SYMBOL(free_irq); |
| 1005 | 1006 | ||
| @@ -1086,9 +1087,9 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
| 1086 | action->name = devname; | 1087 | action->name = devname; |
| 1087 | action->dev_id = dev_id; | 1088 | action->dev_id = dev_id; |
| 1088 | 1089 | ||
| 1089 | chip_bus_lock(irq, desc); | 1090 | chip_bus_lock(desc); |
| 1090 | retval = __setup_irq(irq, desc, action); | 1091 | retval = __setup_irq(irq, desc, action); |
| 1091 | chip_bus_sync_unlock(irq, desc); | 1092 | chip_bus_sync_unlock(desc); |
| 1092 | 1093 | ||
| 1093 | if (retval) | 1094 | if (retval) |
| 1094 | kfree(action); | 1095 | kfree(action); |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 24196228083..1d254194048 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | void move_masked_irq(int irq) | 7 | void move_masked_irq(int irq) |
| 8 | { | 8 | { |
| 9 | struct irq_desc *desc = irq_to_desc(irq); | 9 | struct irq_desc *desc = irq_to_desc(irq); |
| 10 | struct irq_chip *chip = desc->irq_data.chip; | ||
| 10 | 11 | ||
| 11 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) | 12 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) |
| 12 | return; | 13 | return; |
| @@ -24,7 +25,7 @@ void move_masked_irq(int irq) | |||
| 24 | if (unlikely(cpumask_empty(desc->pending_mask))) | 25 | if (unlikely(cpumask_empty(desc->pending_mask))) |
| 25 | return; | 26 | return; |
| 26 | 27 | ||
| 27 | if (!desc->chip->set_affinity) | 28 | if (!chip->irq_set_affinity) |
| 28 | return; | 29 | return; |
| 29 | 30 | ||
| 30 | assert_raw_spin_locked(&desc->lock); | 31 | assert_raw_spin_locked(&desc->lock); |
| @@ -43,8 +44,9 @@ void move_masked_irq(int irq) | |||
| 43 | */ | 44 | */ |
| 44 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) | 45 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) |
| 45 | < nr_cpu_ids)) | 46 | < nr_cpu_ids)) |
| 46 | if (!desc->chip->set_affinity(irq, desc->pending_mask)) { | 47 | if (!chip->irq_set_affinity(&desc->irq_data, |
| 47 | cpumask_copy(desc->affinity, desc->pending_mask); | 48 | desc->pending_mask, false)) { |
| 49 | cpumask_copy(desc->irq_data.affinity, desc->pending_mask); | ||
| 48 | irq_set_thread_affinity(desc); | 50 | irq_set_thread_affinity(desc); |
| 49 | } | 51 | } |
| 50 | 52 | ||
| @@ -61,8 +63,8 @@ void move_native_irq(int irq) | |||
| 61 | if (unlikely(desc->status & IRQ_DISABLED)) | 63 | if (unlikely(desc->status & IRQ_DISABLED)) |
| 62 | return; | 64 | return; |
| 63 | 65 | ||
| 64 | desc->chip->mask(irq); | 66 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
| 65 | move_masked_irq(irq); | 67 | move_masked_irq(irq); |
| 66 | desc->chip->unmask(irq); | 68 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
| 67 | } | 69 | } |
| 68 | 70 | ||
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c deleted file mode 100644 index 65d3845665a..00000000000 --- a/kernel/irq/numa_migrate.c +++ /dev/null | |||
| @@ -1,120 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * NUMA irq-desc migration code | ||
| 3 | * | ||
| 4 | * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to | ||
| 5 | * the new "home node" of the IRQ. | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include <linux/irq.h> | ||
| 9 | #include <linux/slab.h> | ||
| 10 | #include <linux/module.h> | ||
| 11 | #include <linux/random.h> | ||
| 12 | #include <linux/interrupt.h> | ||
| 13 | #include <linux/kernel_stat.h> | ||
| 14 | |||
| 15 | #include "internals.h" | ||
| 16 | |||
| 17 | static void init_copy_kstat_irqs(struct irq_desc *old_desc, | ||
| 18 | struct irq_desc *desc, | ||
| 19 | int node, int nr) | ||
| 20 | { | ||
| 21 | init_kstat_irqs(desc, node, nr); | ||
| 22 | |||
| 23 | if (desc->kstat_irqs != old_desc->kstat_irqs) | ||
| 24 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, | ||
| 25 | nr * sizeof(*desc->kstat_irqs)); | ||
| 26 | } | ||
| 27 | |||
| 28 | static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) | ||
| 29 | { | ||
| 30 | if (old_desc->kstat_irqs == desc->kstat_irqs) | ||
| 31 | return; | ||
| 32 | |||
| 33 | kfree(old_desc->kstat_irqs); | ||
| 34 | old_desc->kstat_irqs = NULL; | ||
| 35 | } | ||
| 36 | |||
| 37 | static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, | ||
| 38 | struct irq_desc *desc, int node) | ||
| 39 | { | ||
| 40 | memcpy(desc, old_desc, sizeof(struct irq_desc)); | ||
| 41 | if (!alloc_desc_masks(desc, node, false)) { | ||
| 42 | printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " | ||
| 43 | "for migration.\n", irq); | ||
| 44 | return false; | ||
| 45 | } | ||
| 46 | raw_spin_lock_init(&desc->lock); | ||
| 47 | desc->node = node; | ||
| 48 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | ||
| 49 | init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); | ||
| 50 | init_copy_desc_masks(old_desc, desc); | ||
| 51 | arch_init_copy_chip_data(old_desc, desc, node); | ||
| 52 | return true; | ||
| 53 | } | ||
| 54 | |||
| 55 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) | ||
| 56 | { | ||
| 57 | free_kstat_irqs(old_desc, desc); | ||
| 58 | free_desc_masks(old_desc, desc); | ||
| 59 | arch_free_chip_data(old_desc, desc); | ||
| 60 | } | ||
| 61 | |||
| 62 | static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | ||
| 63 | int node) | ||
| 64 | { | ||
| 65 | struct irq_desc *desc; | ||
| 66 | unsigned int irq; | ||
| 67 | unsigned long flags; | ||
| 68 | |||
| 69 | irq = old_desc->irq; | ||
| 70 | |||
| 71 | raw_spin_lock_irqsave(&sparse_irq_lock, flags); | ||
| 72 | |||
| 73 | /* We have to check it to avoid races with another CPU */ | ||
| 74 | desc = irq_to_desc(irq); | ||
| 75 | |||
| 76 | if (desc && old_desc != desc) | ||
| 77 | goto out_unlock; | ||
| 78 | |||
| 79 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | ||
| 80 | if (!desc) { | ||
| 81 | printk(KERN_ERR "irq %d: can not get new irq_desc " | ||
| 82 | "for migration.\n", irq); | ||
| 83 | /* still use old one */ | ||
| 84 | desc = old_desc; | ||
| 85 | goto out_unlock; | ||
| 86 | } | ||
| 87 | if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) { | ||
| 88 | /* still use old one */ | ||
| 89 | kfree(desc); | ||
| 90 | desc = old_desc; | ||
| 91 | goto out_unlock; | ||
| 92 | } | ||
| 93 | |||
| 94 | replace_irq_desc(irq, desc); | ||
| 95 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); | ||
| 96 | |||
| 97 | /* free the old one */ | ||
| 98 | free_one_irq_desc(old_desc, desc); | ||
| 99 | kfree(old_desc); | ||
| 100 | |||
| 101 | return desc; | ||
| 102 | |||
| 103 | out_unlock: | ||
| 104 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); | ||
| 105 | |||
| 106 | return desc; | ||
| 107 | } | ||
| 108 | |||
| 109 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | ||
| 110 | { | ||
| 111 | /* those static or target node is -1, do not move them */ | ||
| 112 | if (desc->irq < NR_IRQS_LEGACY || node == -1) | ||
| 113 | return desc; | ||
| 114 | |||
| 115 | if (desc->node != node) | ||
| 116 | desc = __real_move_irq_desc(desc, node); | ||
| 117 | |||
| 118 | return desc; | ||
| 119 | } | ||
| 120 | |||
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 09a2ee540bd..01b1d3a8898 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
| @@ -21,7 +21,7 @@ static struct proc_dir_entry *root_irq_dir; | |||
| 21 | static int irq_affinity_proc_show(struct seq_file *m, void *v) | 21 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
| 22 | { | 22 | { |
| 23 | struct irq_desc *desc = irq_to_desc((long)m->private); | 23 | struct irq_desc *desc = irq_to_desc((long)m->private); |
| 24 | const struct cpumask *mask = desc->affinity; | 24 | const struct cpumask *mask = desc->irq_data.affinity; |
| 25 | 25 | ||
| 26 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 26 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 27 | if (desc->status & IRQ_MOVE_PENDING) | 27 | if (desc->status & IRQ_MOVE_PENDING) |
| @@ -65,7 +65,7 @@ static ssize_t irq_affinity_proc_write(struct file *file, | |||
| 65 | cpumask_var_t new_value; | 65 | cpumask_var_t new_value; |
| 66 | int err; | 66 | int err; |
| 67 | 67 | ||
| 68 | if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || | 68 | if (!irq_to_desc(irq)->irq_data.chip->irq_set_affinity || no_irq_affinity || |
| 69 | irq_balancing_disabled(irq)) | 69 | irq_balancing_disabled(irq)) |
| 70 | return -EIO; | 70 | return -EIO; |
| 71 | 71 | ||
| @@ -185,7 +185,7 @@ static int irq_node_proc_show(struct seq_file *m, void *v) | |||
| 185 | { | 185 | { |
| 186 | struct irq_desc *desc = irq_to_desc((long) m->private); | 186 | struct irq_desc *desc = irq_to_desc((long) m->private); |
| 187 | 187 | ||
| 188 | seq_printf(m, "%d\n", desc->node); | 188 | seq_printf(m, "%d\n", desc->irq_data.node); |
| 189 | return 0; | 189 | return 0; |
| 190 | } | 190 | } |
| 191 | 191 | ||
| @@ -269,7 +269,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
| 269 | { | 269 | { |
| 270 | char name [MAX_NAMELEN]; | 270 | char name [MAX_NAMELEN]; |
| 271 | 271 | ||
| 272 | if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) | 272 | if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) |
| 273 | return; | 273 | return; |
| 274 | 274 | ||
| 275 | memset(name, 0, MAX_NAMELEN); | 275 | memset(name, 0, MAX_NAMELEN); |
| @@ -297,6 +297,24 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
| 297 | &irq_spurious_proc_fops, (void *)(long)irq); | 297 | &irq_spurious_proc_fops, (void *)(long)irq); |
| 298 | } | 298 | } |
| 299 | 299 | ||
| 300 | void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) | ||
| 301 | { | ||
| 302 | char name [MAX_NAMELEN]; | ||
| 303 | |||
| 304 | if (!root_irq_dir || !desc->dir) | ||
| 305 | return; | ||
| 306 | #ifdef CONFIG_SMP | ||
| 307 | remove_proc_entry("smp_affinity", desc->dir); | ||
| 308 | remove_proc_entry("affinity_hint", desc->dir); | ||
| 309 | remove_proc_entry("node", desc->dir); | ||
| 310 | #endif | ||
| 311 | remove_proc_entry("spurious", desc->dir); | ||
| 312 | |||
| 313 | memset(name, 0, MAX_NAMELEN); | ||
| 314 | sprintf(name, "%u", irq); | ||
| 315 | remove_proc_entry(name, root_irq_dir); | ||
| 316 | } | ||
| 317 | |||
| 300 | #undef MAX_NAMELEN | 318 | #undef MAX_NAMELEN |
| 301 | 319 | ||
| 302 | void unregister_handler_proc(unsigned int irq, struct irqaction *action) | 320 | void unregister_handler_proc(unsigned int irq, struct irqaction *action) |
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 090c3763f3a..891115a929a 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
| @@ -60,7 +60,7 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) | |||
| 60 | /* | 60 | /* |
| 61 | * Make sure the interrupt is enabled, before resending it: | 61 | * Make sure the interrupt is enabled, before resending it: |
| 62 | */ | 62 | */ |
| 63 | desc->chip->enable(irq); | 63 | desc->irq_data.chip->irq_enable(&desc->irq_data); |
| 64 | 64 | ||
| 65 | /* | 65 | /* |
| 66 | * We do not resend level type interrupts. Level type | 66 | * We do not resend level type interrupts. Level type |
| @@ -70,7 +70,8 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) | |||
| 70 | if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { | 70 | if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { |
| 71 | desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; | 71 | desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; |
| 72 | 72 | ||
| 73 | if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) { | 73 | if (!desc->irq_data.chip->irq_retrigger || |
| 74 | !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { | ||
| 74 | #ifdef CONFIG_HARDIRQS_SW_RESEND | 75 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
| 75 | /* Set it pending and activate the softirq: */ | 76 | /* Set it pending and activate the softirq: */ |
| 76 | set_bit(irq, irqs_resend); | 77 | set_bit(irq, irqs_resend); |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 89fb90ae534..3089d3b9d5f 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
| @@ -14,6 +14,8 @@ | |||
| 14 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
| 15 | #include <linux/timer.h> | 15 | #include <linux/timer.h> |
| 16 | 16 | ||
| 17 | #include "internals.h" | ||
| 18 | |||
| 17 | static int irqfixup __read_mostly; | 19 | static int irqfixup __read_mostly; |
| 18 | 20 | ||
| 19 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) | 21 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) |
| @@ -78,8 +80,8 @@ static int try_one_irq(int irq, struct irq_desc *desc) | |||
| 78 | * If we did actual work for the real IRQ line we must let the | 80 | * If we did actual work for the real IRQ line we must let the |
| 79 | * IRQ controller clean up too | 81 | * IRQ controller clean up too |
| 80 | */ | 82 | */ |
| 81 | if (work && desc->chip && desc->chip->end) | 83 | if (work) |
| 82 | desc->chip->end(irq); | 84 | irq_end(irq, desc); |
| 83 | raw_spin_unlock(&desc->lock); | 85 | raw_spin_unlock(&desc->lock); |
| 84 | 86 | ||
| 85 | return ok; | 87 | return ok; |
| @@ -254,7 +256,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
| 254 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); | 256 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); |
| 255 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; | 257 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; |
| 256 | desc->depth++; | 258 | desc->depth++; |
| 257 | desc->chip->disable(irq); | 259 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
| 258 | 260 | ||
| 259 | mod_timer(&poll_spurious_irq_timer, | 261 | mod_timer(&poll_spurious_irq_timer, |
| 260 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 262 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 79ee8f1fc0e..fc978889b19 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -910,17 +910,14 @@ int __init __weak early_irq_init(void) | |||
| 910 | return 0; | 910 | return 0; |
| 911 | } | 911 | } |
| 912 | 912 | ||
| 913 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
| 913 | int __init __weak arch_probe_nr_irqs(void) | 914 | int __init __weak arch_probe_nr_irqs(void) |
| 914 | { | 915 | { |
| 915 | return 0; | 916 | return NR_IRQS_LEGACY; |
| 916 | } | 917 | } |
| 917 | 918 | ||
| 918 | int __init __weak arch_early_irq_init(void) | 919 | int __init __weak arch_early_irq_init(void) |
| 919 | { | 920 | { |
| 920 | return 0; | 921 | return 0; |
| 921 | } | 922 | } |
| 922 | 923 | #endif | |
| 923 | int __weak arch_init_chip_data(struct irq_desc *desc, int node) | ||
| 924 | { | ||
| 925 | return 0; | ||
| 926 | } | ||
