diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2010-10-01 06:58:38 -0400 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2010-10-04 06:36:26 -0400 |
| commit | 6b8ff3120c758340505dddf08ad685ebb841d5d5 (patch) | |
| tree | 794eed27c6f9a8931b8fdf4a7ae60a1560b237fc | |
| parent | ff7dcd44dd446db2c3e13bdedf2d52b8e0127f16 (diff) | |
genirq: Convert core code to irq_data
Convert all references in the core code to orq, chip, handler_data,
chip_data, msi_desc, affinity to irq_data.*
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | include/linux/irq.h | 10 | ||||
| -rw-r--r-- | kernel/irq/autoprobe.c | 14 | ||||
| -rw-r--r-- | kernel/irq/chip.c | 78 | ||||
| -rw-r--r-- | kernel/irq/handle.c | 16 | ||||
| -rw-r--r-- | kernel/irq/internals.h | 12 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 54 | ||||
| -rw-r--r-- | kernel/irq/migration.c | 10 | ||||
| -rw-r--r-- | kernel/irq/numa_migrate.c | 8 | ||||
| -rw-r--r-- | kernel/irq/proc.c | 8 | ||||
| -rw-r--r-- | kernel/irq/resend.c | 5 | ||||
| -rw-r--r-- | kernel/irq/spurious.c | 6 |
11 files changed, 111 insertions, 110 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h index 363c76ff82c8..002351d83c3f 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -475,12 +475,12 @@ static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | |||
| 475 | gfp = GFP_NOWAIT; | 475 | gfp = GFP_NOWAIT; |
| 476 | 476 | ||
| 477 | #ifdef CONFIG_CPUMASK_OFFSTACK | 477 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 478 | if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) | 478 | if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) |
| 479 | return false; | 479 | return false; |
| 480 | 480 | ||
| 481 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 481 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 482 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | 482 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { |
| 483 | free_cpumask_var(desc->affinity); | 483 | free_cpumask_var(desc->irq_data.affinity); |
| 484 | return false; | 484 | return false; |
| 485 | } | 485 | } |
| 486 | #endif | 486 | #endif |
| @@ -490,7 +490,7 @@ static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | |||
| 490 | 490 | ||
| 491 | static inline void init_desc_masks(struct irq_desc *desc) | 491 | static inline void init_desc_masks(struct irq_desc *desc) |
| 492 | { | 492 | { |
| 493 | cpumask_setall(desc->affinity); | 493 | cpumask_setall(desc->irq_data.affinity); |
| 494 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 494 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 495 | cpumask_clear(desc->pending_mask); | 495 | cpumask_clear(desc->pending_mask); |
| 496 | #endif | 496 | #endif |
| @@ -510,7 +510,7 @@ static inline void init_copy_desc_masks(struct irq_desc *old_desc, | |||
| 510 | struct irq_desc *new_desc) | 510 | struct irq_desc *new_desc) |
| 511 | { | 511 | { |
| 512 | #ifdef CONFIG_CPUMASK_OFFSTACK | 512 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 513 | cpumask_copy(new_desc->affinity, old_desc->affinity); | 513 | cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity); |
| 514 | 514 | ||
| 515 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 515 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 516 | cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); | 516 | cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); |
| @@ -521,7 +521,7 @@ static inline void init_copy_desc_masks(struct irq_desc *old_desc, | |||
| 521 | static inline void free_desc_masks(struct irq_desc *old_desc, | 521 | static inline void free_desc_masks(struct irq_desc *old_desc, |
| 522 | struct irq_desc *new_desc) | 522 | struct irq_desc *new_desc) |
| 523 | { | 523 | { |
| 524 | free_cpumask_var(old_desc->affinity); | 524 | free_cpumask_var(old_desc->irq_data.affinity); |
| 525 | 525 | ||
| 526 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 526 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 527 | free_cpumask_var(old_desc->pending_mask); | 527 | free_cpumask_var(old_desc->pending_mask); |
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 2295a31ef110..f9bf9b228033 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c | |||
| @@ -57,9 +57,9 @@ unsigned long probe_irq_on(void) | |||
| 57 | * Some chips need to know about probing in | 57 | * Some chips need to know about probing in |
| 58 | * progress: | 58 | * progress: |
| 59 | */ | 59 | */ |
| 60 | if (desc->chip->set_type) | 60 | if (desc->irq_data.chip->set_type) |
| 61 | desc->chip->set_type(i, IRQ_TYPE_PROBE); | 61 | desc->irq_data.chip->set_type(i, IRQ_TYPE_PROBE); |
| 62 | desc->chip->startup(i); | 62 | desc->irq_data.chip->startup(i); |
| 63 | } | 63 | } |
| 64 | raw_spin_unlock_irq(&desc->lock); | 64 | raw_spin_unlock_irq(&desc->lock); |
| 65 | } | 65 | } |
| @@ -76,7 +76,7 @@ unsigned long probe_irq_on(void) | |||
| 76 | raw_spin_lock_irq(&desc->lock); | 76 | raw_spin_lock_irq(&desc->lock); |
| 77 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { | 77 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { |
| 78 | desc->status |= IRQ_AUTODETECT | IRQ_WAITING; | 78 | desc->status |= IRQ_AUTODETECT | IRQ_WAITING; |
| 79 | if (desc->chip->startup(i)) | 79 | if (desc->irq_data.chip->startup(i)) |
| 80 | desc->status |= IRQ_PENDING; | 80 | desc->status |= IRQ_PENDING; |
| 81 | } | 81 | } |
| 82 | raw_spin_unlock_irq(&desc->lock); | 82 | raw_spin_unlock_irq(&desc->lock); |
| @@ -98,7 +98,7 @@ unsigned long probe_irq_on(void) | |||
| 98 | /* It triggered already - consider it spurious. */ | 98 | /* It triggered already - consider it spurious. */ |
| 99 | if (!(status & IRQ_WAITING)) { | 99 | if (!(status & IRQ_WAITING)) { |
| 100 | desc->status = status & ~IRQ_AUTODETECT; | 100 | desc->status = status & ~IRQ_AUTODETECT; |
| 101 | desc->chip->shutdown(i); | 101 | desc->irq_data.chip->shutdown(i); |
| 102 | } else | 102 | } else |
| 103 | if (i < 32) | 103 | if (i < 32) |
| 104 | mask |= 1 << i; | 104 | mask |= 1 << i; |
| @@ -137,7 +137,7 @@ unsigned int probe_irq_mask(unsigned long val) | |||
| 137 | mask |= 1 << i; | 137 | mask |= 1 << i; |
| 138 | 138 | ||
| 139 | desc->status = status & ~IRQ_AUTODETECT; | 139 | desc->status = status & ~IRQ_AUTODETECT; |
| 140 | desc->chip->shutdown(i); | 140 | desc->irq_data.chip->shutdown(i); |
| 141 | } | 141 | } |
| 142 | raw_spin_unlock_irq(&desc->lock); | 142 | raw_spin_unlock_irq(&desc->lock); |
| 143 | } | 143 | } |
| @@ -181,7 +181,7 @@ int probe_irq_off(unsigned long val) | |||
| 181 | nr_of_irqs++; | 181 | nr_of_irqs++; |
| 182 | } | 182 | } |
| 183 | desc->status = status & ~IRQ_AUTODETECT; | 183 | desc->status = status & ~IRQ_AUTODETECT; |
| 184 | desc->chip->shutdown(i); | 184 | desc->irq_data.chip->shutdown(i); |
| 185 | } | 185 | } |
| 186 | raw_spin_unlock_irq(&desc->lock); | 186 | raw_spin_unlock_irq(&desc->lock); |
| 187 | } | 187 | } |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 4ea775cc60f0..e0e93ff10afd 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -32,18 +32,18 @@ static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data) | |||
| 32 | /* Ensure we don't have left over values from a previous use of this irq */ | 32 | /* Ensure we don't have left over values from a previous use of this irq */ |
| 33 | raw_spin_lock_irqsave(&desc->lock, flags); | 33 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 34 | desc->status = IRQ_DISABLED; | 34 | desc->status = IRQ_DISABLED; |
| 35 | desc->chip = &no_irq_chip; | 35 | desc->irq_data.chip = &no_irq_chip; |
| 36 | desc->handle_irq = handle_bad_irq; | 36 | desc->handle_irq = handle_bad_irq; |
| 37 | desc->depth = 1; | 37 | desc->depth = 1; |
| 38 | desc->msi_desc = NULL; | 38 | desc->irq_data.msi_desc = NULL; |
| 39 | desc->handler_data = NULL; | 39 | desc->irq_data.handler_data = NULL; |
| 40 | if (!keep_chip_data) | 40 | if (!keep_chip_data) |
| 41 | desc->chip_data = NULL; | 41 | desc->irq_data.chip_data = NULL; |
| 42 | desc->action = NULL; | 42 | desc->action = NULL; |
| 43 | desc->irq_count = 0; | 43 | desc->irq_count = 0; |
| 44 | desc->irqs_unhandled = 0; | 44 | desc->irqs_unhandled = 0; |
| 45 | #ifdef CONFIG_SMP | 45 | #ifdef CONFIG_SMP |
| 46 | cpumask_setall(desc->affinity); | 46 | cpumask_setall(desc->irq_data.affinity); |
| 47 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 47 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 48 | cpumask_clear(desc->pending_mask); | 48 | cpumask_clear(desc->pending_mask); |
| 49 | #endif | 49 | #endif |
| @@ -64,7 +64,7 @@ void dynamic_irq_init(unsigned int irq) | |||
| 64 | * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq | 64 | * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq |
| 65 | * @irq: irq number to initialize | 65 | * @irq: irq number to initialize |
| 66 | * | 66 | * |
| 67 | * does not set irq_to_desc(irq)->chip_data to NULL | 67 | * does not set irq_to_desc(irq)->irq_data.chip_data to NULL |
| 68 | */ | 68 | */ |
| 69 | void dynamic_irq_init_keep_chip_data(unsigned int irq) | 69 | void dynamic_irq_init_keep_chip_data(unsigned int irq) |
| 70 | { | 70 | { |
| @@ -88,12 +88,12 @@ static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data) | |||
| 88 | irq); | 88 | irq); |
| 89 | return; | 89 | return; |
| 90 | } | 90 | } |
| 91 | desc->msi_desc = NULL; | 91 | desc->irq_data.msi_desc = NULL; |
| 92 | desc->handler_data = NULL; | 92 | desc->irq_data.handler_data = NULL; |
| 93 | if (!keep_chip_data) | 93 | if (!keep_chip_data) |
| 94 | desc->chip_data = NULL; | 94 | desc->irq_data.chip_data = NULL; |
| 95 | desc->handle_irq = handle_bad_irq; | 95 | desc->handle_irq = handle_bad_irq; |
| 96 | desc->chip = &no_irq_chip; | 96 | desc->irq_data.chip = &no_irq_chip; |
| 97 | desc->name = NULL; | 97 | desc->name = NULL; |
| 98 | clear_kstat_irqs(desc); | 98 | clear_kstat_irqs(desc); |
| 99 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 99 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| @@ -112,7 +112,7 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
| 112 | * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq | 112 | * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq |
| 113 | * @irq: irq number to initialize | 113 | * @irq: irq number to initialize |
| 114 | * | 114 | * |
| 115 | * does not set irq_to_desc(irq)->chip_data to NULL | 115 | * does not set irq_to_desc(irq)->irq_data.chip_data to NULL |
| 116 | */ | 116 | */ |
| 117 | void dynamic_irq_cleanup_keep_chip_data(unsigned int irq) | 117 | void dynamic_irq_cleanup_keep_chip_data(unsigned int irq) |
| 118 | { | 118 | { |
| @@ -140,7 +140,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip) | |||
| 140 | 140 | ||
| 141 | raw_spin_lock_irqsave(&desc->lock, flags); | 141 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 142 | irq_chip_set_defaults(chip); | 142 | irq_chip_set_defaults(chip); |
| 143 | desc->chip = chip; | 143 | desc->irq_data.chip = chip; |
| 144 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 144 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 145 | 145 | ||
| 146 | return 0; | 146 | return 0; |
| @@ -193,7 +193,7 @@ int set_irq_data(unsigned int irq, void *data) | |||
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | raw_spin_lock_irqsave(&desc->lock, flags); | 195 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 196 | desc->handler_data = data; | 196 | desc->irq_data.handler_data = data; |
| 197 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 197 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 198 | return 0; | 198 | return 0; |
| 199 | } | 199 | } |
| @@ -218,7 +218,7 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry) | |||
| 218 | } | 218 | } |
| 219 | 219 | ||
| 220 | raw_spin_lock_irqsave(&desc->lock, flags); | 220 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 221 | desc->msi_desc = entry; | 221 | desc->irq_data.msi_desc = entry; |
| 222 | if (entry) | 222 | if (entry) |
| 223 | entry->irq = irq; | 223 | entry->irq = irq; |
| 224 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 224 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| @@ -243,13 +243,13 @@ int set_irq_chip_data(unsigned int irq, void *data) | |||
| 243 | return -EINVAL; | 243 | return -EINVAL; |
| 244 | } | 244 | } |
| 245 | 245 | ||
| 246 | if (!desc->chip) { | 246 | if (!desc->irq_data.chip) { |
| 247 | printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); | 247 | printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); |
| 248 | return -EINVAL; | 248 | return -EINVAL; |
| 249 | } | 249 | } |
| 250 | 250 | ||
| 251 | raw_spin_lock_irqsave(&desc->lock, flags); | 251 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 252 | desc->chip_data = data; | 252 | desc->irq_data.chip_data = data; |
| 253 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 253 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 254 | 254 | ||
| 255 | return 0; | 255 | return 0; |
| @@ -291,7 +291,7 @@ static void default_enable(unsigned int irq) | |||
| 291 | { | 291 | { |
| 292 | struct irq_desc *desc = irq_to_desc(irq); | 292 | struct irq_desc *desc = irq_to_desc(irq); |
| 293 | 293 | ||
| 294 | desc->chip->unmask(irq); | 294 | desc->irq_data.chip->unmask(irq); |
| 295 | desc->status &= ~IRQ_MASKED; | 295 | desc->status &= ~IRQ_MASKED; |
| 296 | } | 296 | } |
| 297 | 297 | ||
| @@ -309,7 +309,7 @@ static unsigned int default_startup(unsigned int irq) | |||
| 309 | { | 309 | { |
| 310 | struct irq_desc *desc = irq_to_desc(irq); | 310 | struct irq_desc *desc = irq_to_desc(irq); |
| 311 | 311 | ||
| 312 | desc->chip->enable(irq); | 312 | desc->irq_data.chip->enable(irq); |
| 313 | return 0; | 313 | return 0; |
| 314 | } | 314 | } |
| 315 | 315 | ||
| @@ -320,7 +320,7 @@ static void default_shutdown(unsigned int irq) | |||
| 320 | { | 320 | { |
| 321 | struct irq_desc *desc = irq_to_desc(irq); | 321 | struct irq_desc *desc = irq_to_desc(irq); |
| 322 | 322 | ||
| 323 | desc->chip->mask(irq); | 323 | desc->irq_data.chip->mask(irq); |
| 324 | desc->status |= IRQ_MASKED; | 324 | desc->status |= IRQ_MASKED; |
| 325 | } | 325 | } |
| 326 | 326 | ||
| @@ -350,28 +350,28 @@ void irq_chip_set_defaults(struct irq_chip *chip) | |||
| 350 | 350 | ||
| 351 | static inline void mask_ack_irq(struct irq_desc *desc, int irq) | 351 | static inline void mask_ack_irq(struct irq_desc *desc, int irq) |
| 352 | { | 352 | { |
| 353 | if (desc->chip->mask_ack) | 353 | if (desc->irq_data.chip->mask_ack) |
| 354 | desc->chip->mask_ack(irq); | 354 | desc->irq_data.chip->mask_ack(irq); |
| 355 | else { | 355 | else { |
| 356 | desc->chip->mask(irq); | 356 | desc->irq_data.chip->mask(irq); |
| 357 | if (desc->chip->ack) | 357 | if (desc->irq_data.chip->ack) |
| 358 | desc->chip->ack(irq); | 358 | desc->irq_data.chip->ack(irq); |
| 359 | } | 359 | } |
| 360 | desc->status |= IRQ_MASKED; | 360 | desc->status |= IRQ_MASKED; |
| 361 | } | 361 | } |
| 362 | 362 | ||
| 363 | static inline void mask_irq(struct irq_desc *desc, int irq) | 363 | static inline void mask_irq(struct irq_desc *desc, int irq) |
| 364 | { | 364 | { |
| 365 | if (desc->chip->mask) { | 365 | if (desc->irq_data.chip->mask) { |
| 366 | desc->chip->mask(irq); | 366 | desc->irq_data.chip->mask(irq); |
| 367 | desc->status |= IRQ_MASKED; | 367 | desc->status |= IRQ_MASKED; |
| 368 | } | 368 | } |
| 369 | } | 369 | } |
| 370 | 370 | ||
| 371 | static inline void unmask_irq(struct irq_desc *desc, int irq) | 371 | static inline void unmask_irq(struct irq_desc *desc, int irq) |
| 372 | { | 372 | { |
| 373 | if (desc->chip->unmask) { | 373 | if (desc->irq_data.chip->unmask) { |
| 374 | desc->chip->unmask(irq); | 374 | desc->irq_data.chip->unmask(irq); |
| 375 | desc->status &= ~IRQ_MASKED; | 375 | desc->status &= ~IRQ_MASKED; |
| 376 | } | 376 | } |
| 377 | } | 377 | } |
| @@ -552,7 +552,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
| 552 | raw_spin_lock(&desc->lock); | 552 | raw_spin_lock(&desc->lock); |
| 553 | desc->status &= ~IRQ_INPROGRESS; | 553 | desc->status &= ~IRQ_INPROGRESS; |
| 554 | out: | 554 | out: |
| 555 | desc->chip->eoi(irq); | 555 | desc->irq_data.chip->eoi(irq); |
| 556 | 556 | ||
| 557 | raw_spin_unlock(&desc->lock); | 557 | raw_spin_unlock(&desc->lock); |
| 558 | } | 558 | } |
| @@ -594,8 +594,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
| 594 | kstat_incr_irqs_this_cpu(irq, desc); | 594 | kstat_incr_irqs_this_cpu(irq, desc); |
| 595 | 595 | ||
| 596 | /* Start handling the irq */ | 596 | /* Start handling the irq */ |
| 597 | if (desc->chip->ack) | 597 | if (desc->irq_data.chip->ack) |
| 598 | desc->chip->ack(irq); | 598 | desc->irq_data.chip->ack(irq); |
| 599 | 599 | ||
| 600 | /* Mark the IRQ currently in progress.*/ | 600 | /* Mark the IRQ currently in progress.*/ |
| 601 | desc->status |= IRQ_INPROGRESS; | 601 | desc->status |= IRQ_INPROGRESS; |
| @@ -648,15 +648,15 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | |||
| 648 | 648 | ||
| 649 | kstat_incr_irqs_this_cpu(irq, desc); | 649 | kstat_incr_irqs_this_cpu(irq, desc); |
| 650 | 650 | ||
| 651 | if (desc->chip->ack) | 651 | if (desc->irq_data.chip->ack) |
| 652 | desc->chip->ack(irq); | 652 | desc->irq_data.chip->ack(irq); |
| 653 | 653 | ||
| 654 | action_ret = handle_IRQ_event(irq, desc->action); | 654 | action_ret = handle_IRQ_event(irq, desc->action); |
| 655 | if (!noirqdebug) | 655 | if (!noirqdebug) |
| 656 | note_interrupt(irq, desc, action_ret); | 656 | note_interrupt(irq, desc, action_ret); |
| 657 | 657 | ||
| 658 | if (desc->chip->eoi) | 658 | if (desc->irq_data.chip->eoi) |
| 659 | desc->chip->eoi(irq); | 659 | desc->irq_data.chip->eoi(irq); |
| 660 | } | 660 | } |
| 661 | 661 | ||
| 662 | void | 662 | void |
| @@ -674,7 +674,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
| 674 | 674 | ||
| 675 | if (!handle) | 675 | if (!handle) |
| 676 | handle = handle_bad_irq; | 676 | handle = handle_bad_irq; |
| 677 | else if (desc->chip == &no_irq_chip) { | 677 | else if (desc->irq_data.chip == &no_irq_chip) { |
| 678 | printk(KERN_WARNING "Trying to install %sinterrupt handler " | 678 | printk(KERN_WARNING "Trying to install %sinterrupt handler " |
| 679 | "for IRQ%d\n", is_chained ? "chained " : "", irq); | 679 | "for IRQ%d\n", is_chained ? "chained " : "", irq); |
| 680 | /* | 680 | /* |
| @@ -684,7 +684,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
| 684 | * prevent us to setup the interrupt at all. Switch it to | 684 | * prevent us to setup the interrupt at all. Switch it to |
| 685 | * dummy_irq_chip for easy transition. | 685 | * dummy_irq_chip for easy transition. |
| 686 | */ | 686 | */ |
| 687 | desc->chip = &dummy_irq_chip; | 687 | desc->irq_data.chip = &dummy_irq_chip; |
| 688 | } | 688 | } |
| 689 | 689 | ||
| 690 | chip_bus_lock(irq, desc); | 690 | chip_bus_lock(irq, desc); |
| @@ -692,7 +692,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
| 692 | 692 | ||
| 693 | /* Uninstall? */ | 693 | /* Uninstall? */ |
| 694 | if (handle == handle_bad_irq) { | 694 | if (handle == handle_bad_irq) { |
| 695 | if (desc->chip != &no_irq_chip) | 695 | if (desc->irq_data.chip != &no_irq_chip) |
| 696 | mask_ack_irq(desc, irq); | 696 | mask_ack_irq(desc, irq); |
| 697 | desc->status |= IRQ_DISABLED; | 697 | desc->status |= IRQ_DISABLED; |
| 698 | desc->depth = 1; | 698 | desc->depth = 1; |
| @@ -704,7 +704,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
| 704 | desc->status &= ~IRQ_DISABLED; | 704 | desc->status &= ~IRQ_DISABLED; |
| 705 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; | 705 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; |
| 706 | desc->depth = 0; | 706 | desc->depth = 0; |
| 707 | desc->chip->startup(irq); | 707 | desc->irq_data.chip->startup(irq); |
| 708 | } | 708 | } |
| 709 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 709 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 710 | chip_bus_sync_unlock(irq, desc); | 710 | chip_bus_sync_unlock(irq, desc); |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 099d4fc368c3..fc27d76e83ef 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -105,7 +105,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) | |||
| 105 | raw_spin_lock_init(&desc->lock); | 105 | raw_spin_lock_init(&desc->lock); |
| 106 | desc->irq_data.irq = irq; | 106 | desc->irq_data.irq = irq; |
| 107 | #ifdef CONFIG_SMP | 107 | #ifdef CONFIG_SMP |
| 108 | desc->node = node; | 108 | desc->irq_data.node = node; |
| 109 | #endif | 109 | #endif |
| 110 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 110 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
| 111 | init_kstat_irqs(desc, node, nr_cpu_ids); | 111 | init_kstat_irqs(desc, node, nr_cpu_ids); |
| @@ -185,7 +185,7 @@ int __init early_irq_init(void) | |||
| 185 | desc[i].irq_data.irq = i; | 185 | desc[i].irq_data.irq = i; |
| 186 | desc[i].irq_data.chip = &no_irq_chip; | 186 | desc[i].irq_data.chip = &no_irq_chip; |
| 187 | #ifdef CONFIG_SMP | 187 | #ifdef CONFIG_SMP |
| 188 | desc[i].node = node; | 188 | desc[i].irq_data.node = node; |
| 189 | #endif | 189 | #endif |
| 190 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; | 190 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; |
| 191 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 191 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
| @@ -456,20 +456,20 @@ unsigned int __do_IRQ(unsigned int irq) | |||
| 456 | /* | 456 | /* |
| 457 | * No locking required for CPU-local interrupts: | 457 | * No locking required for CPU-local interrupts: |
| 458 | */ | 458 | */ |
| 459 | if (desc->chip->ack) | 459 | if (desc->irq_data.chip->ack) |
| 460 | desc->chip->ack(irq); | 460 | desc->irq_data.chip->ack(irq); |
| 461 | if (likely(!(desc->status & IRQ_DISABLED))) { | 461 | if (likely(!(desc->status & IRQ_DISABLED))) { |
| 462 | action_ret = handle_IRQ_event(irq, desc->action); | 462 | action_ret = handle_IRQ_event(irq, desc->action); |
| 463 | if (!noirqdebug) | 463 | if (!noirqdebug) |
| 464 | note_interrupt(irq, desc, action_ret); | 464 | note_interrupt(irq, desc, action_ret); |
| 465 | } | 465 | } |
| 466 | desc->chip->end(irq); | 466 | desc->irq_data.chip->end(irq); |
| 467 | return 1; | 467 | return 1; |
| 468 | } | 468 | } |
| 469 | 469 | ||
| 470 | raw_spin_lock(&desc->lock); | 470 | raw_spin_lock(&desc->lock); |
| 471 | if (desc->chip->ack) | 471 | if (desc->irq_data.chip->ack) |
| 472 | desc->chip->ack(irq); | 472 | desc->irq_data.chip->ack(irq); |
| 473 | /* | 473 | /* |
| 474 | * REPLAY is when Linux resends an IRQ that was dropped earlier | 474 | * REPLAY is when Linux resends an IRQ that was dropped earlier |
| 475 | * WAITING is used by probe to mark irqs that are being tested | 475 | * WAITING is used by probe to mark irqs that are being tested |
| @@ -529,7 +529,7 @@ out: | |||
| 529 | * The ->end() handler has to deal with interrupts which got | 529 | * The ->end() handler has to deal with interrupts which got |
| 530 | * disabled while the handler was running. | 530 | * disabled while the handler was running. |
| 531 | */ | 531 | */ |
| 532 | desc->chip->end(irq); | 532 | desc->irq_data.chip->end(irq); |
| 533 | raw_spin_unlock(&desc->lock); | 533 | raw_spin_unlock(&desc->lock); |
| 534 | 534 | ||
| 535 | return 1; | 535 | return 1; |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index c63f3bc88f0b..a805a00cfd28 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
| @@ -43,14 +43,14 @@ extern void irq_set_thread_affinity(struct irq_desc *desc); | |||
| 43 | /* Inline functions for support of irq chips on slow busses */ | 43 | /* Inline functions for support of irq chips on slow busses */ |
| 44 | static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc) | 44 | static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc) |
| 45 | { | 45 | { |
| 46 | if (unlikely(desc->chip->bus_lock)) | 46 | if (unlikely(desc->irq_data.chip->bus_lock)) |
| 47 | desc->chip->bus_lock(irq); | 47 | desc->irq_data.chip->bus_lock(irq); |
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc) | 50 | static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc) |
| 51 | { | 51 | { |
| 52 | if (unlikely(desc->chip->bus_sync_unlock)) | 52 | if (unlikely(desc->irq_data.chip->bus_sync_unlock)) |
| 53 | desc->chip->bus_sync_unlock(irq); | 53 | desc->irq_data.chip->bus_sync_unlock(irq); |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | /* | 56 | /* |
| @@ -67,8 +67,8 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) | |||
| 67 | irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); | 67 | irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); |
| 68 | printk("->handle_irq(): %p, ", desc->handle_irq); | 68 | printk("->handle_irq(): %p, ", desc->handle_irq); |
| 69 | print_symbol("%s\n", (unsigned long)desc->handle_irq); | 69 | print_symbol("%s\n", (unsigned long)desc->handle_irq); |
| 70 | printk("->chip(): %p, ", desc->chip); | 70 | printk("->irq_data.chip(): %p, ", desc->irq_data.chip); |
| 71 | print_symbol("%s\n", (unsigned long)desc->chip); | 71 | print_symbol("%s\n", (unsigned long)desc->irq_data.chip); |
| 72 | printk("->action(): %p\n", desc->action); | 72 | printk("->action(): %p\n", desc->action); |
| 73 | if (desc->action) { | 73 | if (desc->action) { |
| 74 | printk("->action->handler(): %p, ", desc->action->handler); | 74 | printk("->action->handler(): %p, ", desc->action->handler); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index c3003e9d91a3..4dfb19521d9f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -73,8 +73,8 @@ int irq_can_set_affinity(unsigned int irq) | |||
| 73 | { | 73 | { |
| 74 | struct irq_desc *desc = irq_to_desc(irq); | 74 | struct irq_desc *desc = irq_to_desc(irq); |
| 75 | 75 | ||
| 76 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || | 76 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip || |
| 77 | !desc->chip->set_affinity) | 77 | !desc->irq_data.chip->set_affinity) |
| 78 | return 0; | 78 | return 0; |
| 79 | 79 | ||
| 80 | return 1; | 80 | return 1; |
| @@ -111,15 +111,15 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 111 | struct irq_desc *desc = irq_to_desc(irq); | 111 | struct irq_desc *desc = irq_to_desc(irq); |
| 112 | unsigned long flags; | 112 | unsigned long flags; |
| 113 | 113 | ||
| 114 | if (!desc->chip->set_affinity) | 114 | if (!desc->irq_data.chip->set_affinity) |
| 115 | return -EINVAL; | 115 | return -EINVAL; |
| 116 | 116 | ||
| 117 | raw_spin_lock_irqsave(&desc->lock, flags); | 117 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 118 | 118 | ||
| 119 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 119 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 120 | if (desc->status & IRQ_MOVE_PCNTXT) { | 120 | if (desc->status & IRQ_MOVE_PCNTXT) { |
| 121 | if (!desc->chip->set_affinity(irq, cpumask)) { | 121 | if (!desc->irq_data.chip->set_affinity(irq, cpumask)) { |
| 122 | cpumask_copy(desc->affinity, cpumask); | 122 | cpumask_copy(desc->irq_data.affinity, cpumask); |
| 123 | irq_set_thread_affinity(desc); | 123 | irq_set_thread_affinity(desc); |
| 124 | } | 124 | } |
| 125 | } | 125 | } |
| @@ -128,8 +128,8 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 128 | cpumask_copy(desc->pending_mask, cpumask); | 128 | cpumask_copy(desc->pending_mask, cpumask); |
| 129 | } | 129 | } |
| 130 | #else | 130 | #else |
| 131 | if (!desc->chip->set_affinity(irq, cpumask)) { | 131 | if (!desc->irq_data.chip->set_affinity(irq, cpumask)) { |
| 132 | cpumask_copy(desc->affinity, cpumask); | 132 | cpumask_copy(desc->irq_data.affinity, cpumask); |
| 133 | irq_set_thread_affinity(desc); | 133 | irq_set_thread_affinity(desc); |
| 134 | } | 134 | } |
| 135 | #endif | 135 | #endif |
| @@ -168,16 +168,16 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc) | |||
| 168 | * one of the targets is online. | 168 | * one of the targets is online. |
| 169 | */ | 169 | */ |
| 170 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 170 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { |
| 171 | if (cpumask_any_and(desc->affinity, cpu_online_mask) | 171 | if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask) |
| 172 | < nr_cpu_ids) | 172 | < nr_cpu_ids) |
| 173 | goto set_affinity; | 173 | goto set_affinity; |
| 174 | else | 174 | else |
| 175 | desc->status &= ~IRQ_AFFINITY_SET; | 175 | desc->status &= ~IRQ_AFFINITY_SET; |
| 176 | } | 176 | } |
| 177 | 177 | ||
| 178 | cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); | 178 | cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); |
| 179 | set_affinity: | 179 | set_affinity: |
| 180 | desc->chip->set_affinity(irq, desc->affinity); | 180 | desc->irq_data.chip->set_affinity(irq, desc->irq_data.affinity); |
| 181 | 181 | ||
| 182 | return 0; | 182 | return 0; |
| 183 | } | 183 | } |
| @@ -223,7 +223,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | |||
| 223 | 223 | ||
| 224 | if (!desc->depth++) { | 224 | if (!desc->depth++) { |
| 225 | desc->status |= IRQ_DISABLED; | 225 | desc->status |= IRQ_DISABLED; |
| 226 | desc->chip->disable(irq); | 226 | desc->irq_data.chip->disable(irq); |
| 227 | } | 227 | } |
| 228 | } | 228 | } |
| 229 | 229 | ||
| @@ -313,7 +313,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
| 313 | * IRQ line is re-enabled. | 313 | * IRQ line is re-enabled. |
| 314 | * | 314 | * |
| 315 | * This function may be called from IRQ context only when | 315 | * This function may be called from IRQ context only when |
| 316 | * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! | 316 | * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! |
| 317 | */ | 317 | */ |
| 318 | void enable_irq(unsigned int irq) | 318 | void enable_irq(unsigned int irq) |
| 319 | { | 319 | { |
| @@ -336,8 +336,8 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) | |||
| 336 | struct irq_desc *desc = irq_to_desc(irq); | 336 | struct irq_desc *desc = irq_to_desc(irq); |
| 337 | int ret = -ENXIO; | 337 | int ret = -ENXIO; |
| 338 | 338 | ||
| 339 | if (desc->chip->set_wake) | 339 | if (desc->irq_data.chip->set_wake) |
| 340 | ret = desc->chip->set_wake(irq, on); | 340 | ret = desc->irq_data.chip->set_wake(irq, on); |
| 341 | 341 | ||
| 342 | return ret; | 342 | return ret; |
| 343 | } | 343 | } |
| @@ -432,7 +432,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 432 | unsigned long flags) | 432 | unsigned long flags) |
| 433 | { | 433 | { |
| 434 | int ret; | 434 | int ret; |
| 435 | struct irq_chip *chip = desc->chip; | 435 | struct irq_chip *chip = desc->irq_data.chip; |
| 436 | 436 | ||
| 437 | if (!chip || !chip->set_type) { | 437 | if (!chip || !chip->set_type) { |
| 438 | /* | 438 | /* |
| @@ -457,8 +457,8 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 457 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); | 457 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); |
| 458 | desc->status |= flags; | 458 | desc->status |= flags; |
| 459 | 459 | ||
| 460 | if (chip != desc->chip) | 460 | if (chip != desc->irq_data.chip) |
| 461 | irq_chip_set_defaults(desc->chip); | 461 | irq_chip_set_defaults(desc->irq_data.chip); |
| 462 | } | 462 | } |
| 463 | 463 | ||
| 464 | return ret; | 464 | return ret; |
| @@ -528,7 +528,7 @@ again: | |||
| 528 | 528 | ||
| 529 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { | 529 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { |
| 530 | desc->status &= ~IRQ_MASKED; | 530 | desc->status &= ~IRQ_MASKED; |
| 531 | desc->chip->unmask(irq); | 531 | desc->irq_data.chip->unmask(irq); |
| 532 | } | 532 | } |
| 533 | raw_spin_unlock_irq(&desc->lock); | 533 | raw_spin_unlock_irq(&desc->lock); |
| 534 | chip_bus_sync_unlock(irq, desc); | 534 | chip_bus_sync_unlock(irq, desc); |
| @@ -556,7 +556,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | |||
| 556 | } | 556 | } |
| 557 | 557 | ||
| 558 | raw_spin_lock_irq(&desc->lock); | 558 | raw_spin_lock_irq(&desc->lock); |
| 559 | cpumask_copy(mask, desc->affinity); | 559 | cpumask_copy(mask, desc->irq_data.affinity); |
| 560 | raw_spin_unlock_irq(&desc->lock); | 560 | raw_spin_unlock_irq(&desc->lock); |
| 561 | 561 | ||
| 562 | set_cpus_allowed_ptr(current, mask); | 562 | set_cpus_allowed_ptr(current, mask); |
| @@ -657,7 +657,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 657 | if (!desc) | 657 | if (!desc) |
| 658 | return -EINVAL; | 658 | return -EINVAL; |
| 659 | 659 | ||
| 660 | if (desc->chip == &no_irq_chip) | 660 | if (desc->irq_data.chip == &no_irq_chip) |
| 661 | return -ENOSYS; | 661 | return -ENOSYS; |
| 662 | /* | 662 | /* |
| 663 | * Some drivers like serial.c use request_irq() heavily, | 663 | * Some drivers like serial.c use request_irq() heavily, |
| @@ -752,7 +752,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 752 | } | 752 | } |
| 753 | 753 | ||
| 754 | if (!shared) { | 754 | if (!shared) { |
| 755 | irq_chip_set_defaults(desc->chip); | 755 | irq_chip_set_defaults(desc->irq_data.chip); |
| 756 | 756 | ||
| 757 | init_waitqueue_head(&desc->wait_for_threads); | 757 | init_waitqueue_head(&desc->wait_for_threads); |
| 758 | 758 | ||
| @@ -779,7 +779,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 779 | if (!(desc->status & IRQ_NOAUTOEN)) { | 779 | if (!(desc->status & IRQ_NOAUTOEN)) { |
| 780 | desc->depth = 0; | 780 | desc->depth = 0; |
| 781 | desc->status &= ~IRQ_DISABLED; | 781 | desc->status &= ~IRQ_DISABLED; |
| 782 | desc->chip->startup(irq); | 782 | desc->irq_data.chip->startup(irq); |
| 783 | } else | 783 | } else |
| 784 | /* Undo nested disables: */ | 784 | /* Undo nested disables: */ |
| 785 | desc->depth = 1; | 785 | desc->depth = 1; |
| @@ -912,17 +912,17 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
| 912 | 912 | ||
| 913 | /* Currently used only by UML, might disappear one day: */ | 913 | /* Currently used only by UML, might disappear one day: */ |
| 914 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 914 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
| 915 | if (desc->chip->release) | 915 | if (desc->irq_data.chip->release) |
| 916 | desc->chip->release(irq, dev_id); | 916 | desc->irq_data.chip->release(irq, dev_id); |
| 917 | #endif | 917 | #endif |
| 918 | 918 | ||
| 919 | /* If this was the last handler, shut down the IRQ line: */ | 919 | /* If this was the last handler, shut down the IRQ line: */ |
| 920 | if (!desc->action) { | 920 | if (!desc->action) { |
| 921 | desc->status |= IRQ_DISABLED; | 921 | desc->status |= IRQ_DISABLED; |
| 922 | if (desc->chip->shutdown) | 922 | if (desc->irq_data.chip->shutdown) |
| 923 | desc->chip->shutdown(irq); | 923 | desc->irq_data.chip->shutdown(irq); |
| 924 | else | 924 | else |
| 925 | desc->chip->disable(irq); | 925 | desc->irq_data.chip->disable(irq); |
| 926 | } | 926 | } |
| 927 | 927 | ||
| 928 | #ifdef CONFIG_SMP | 928 | #ifdef CONFIG_SMP |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 241962280836..f923c37e651a 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
| @@ -24,7 +24,7 @@ void move_masked_irq(int irq) | |||
| 24 | if (unlikely(cpumask_empty(desc->pending_mask))) | 24 | if (unlikely(cpumask_empty(desc->pending_mask))) |
| 25 | return; | 25 | return; |
| 26 | 26 | ||
| 27 | if (!desc->chip->set_affinity) | 27 | if (!desc->irq_data.chip->set_affinity) |
| 28 | return; | 28 | return; |
| 29 | 29 | ||
| 30 | assert_raw_spin_locked(&desc->lock); | 30 | assert_raw_spin_locked(&desc->lock); |
| @@ -43,8 +43,8 @@ void move_masked_irq(int irq) | |||
| 43 | */ | 43 | */ |
| 44 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) | 44 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) |
| 45 | < nr_cpu_ids)) | 45 | < nr_cpu_ids)) |
| 46 | if (!desc->chip->set_affinity(irq, desc->pending_mask)) { | 46 | if (!desc->irq_data.chip->set_affinity(irq, desc->pending_mask)) { |
| 47 | cpumask_copy(desc->affinity, desc->pending_mask); | 47 | cpumask_copy(desc->irq_data.affinity, desc->pending_mask); |
| 48 | irq_set_thread_affinity(desc); | 48 | irq_set_thread_affinity(desc); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| @@ -61,8 +61,8 @@ void move_native_irq(int irq) | |||
| 61 | if (unlikely(desc->status & IRQ_DISABLED)) | 61 | if (unlikely(desc->status & IRQ_DISABLED)) |
| 62 | return; | 62 | return; |
| 63 | 63 | ||
| 64 | desc->chip->mask(irq); | 64 | desc->irq_data.chip->mask(irq); |
| 65 | move_masked_irq(irq); | 65 | move_masked_irq(irq); |
| 66 | desc->chip->unmask(irq); | 66 | desc->irq_data.chip->unmask(irq); |
| 67 | } | 67 | } |
| 68 | 68 | ||
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index 65d3845665ac..e7f1f16402c1 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c | |||
| @@ -44,7 +44,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, | |||
| 44 | return false; | 44 | return false; |
| 45 | } | 45 | } |
| 46 | raw_spin_lock_init(&desc->lock); | 46 | raw_spin_lock_init(&desc->lock); |
| 47 | desc->node = node; | 47 | desc->irq_data.node = node; |
| 48 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 48 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
| 49 | init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); | 49 | init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); |
| 50 | init_copy_desc_masks(old_desc, desc); | 50 | init_copy_desc_masks(old_desc, desc); |
| @@ -66,7 +66,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
| 66 | unsigned int irq; | 66 | unsigned int irq; |
| 67 | unsigned long flags; | 67 | unsigned long flags; |
| 68 | 68 | ||
| 69 | irq = old_desc->irq; | 69 | irq = old_desc->irq_data.irq; |
| 70 | 70 | ||
| 71 | raw_spin_lock_irqsave(&sparse_irq_lock, flags); | 71 | raw_spin_lock_irqsave(&sparse_irq_lock, flags); |
| 72 | 72 | ||
| @@ -109,10 +109,10 @@ out_unlock: | |||
| 109 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | 109 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) |
| 110 | { | 110 | { |
| 111 | /* those static or target node is -1, do not move them */ | 111 | /* those static or target node is -1, do not move them */ |
| 112 | if (desc->irq < NR_IRQS_LEGACY || node == -1) | 112 | if (desc->irq_data.irq < NR_IRQS_LEGACY || node == -1) |
| 113 | return desc; | 113 | return desc; |
| 114 | 114 | ||
| 115 | if (desc->node != node) | 115 | if (desc->irq_data.node != node) |
| 116 | desc = __real_move_irq_desc(desc, node); | 116 | desc = __real_move_irq_desc(desc, node); |
| 117 | 117 | ||
| 118 | return desc; | 118 | return desc; |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 09a2ee540bd2..9b0da94b5b2b 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
| @@ -21,7 +21,7 @@ static struct proc_dir_entry *root_irq_dir; | |||
| 21 | static int irq_affinity_proc_show(struct seq_file *m, void *v) | 21 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
| 22 | { | 22 | { |
| 23 | struct irq_desc *desc = irq_to_desc((long)m->private); | 23 | struct irq_desc *desc = irq_to_desc((long)m->private); |
| 24 | const struct cpumask *mask = desc->affinity; | 24 | const struct cpumask *mask = desc->irq_data.affinity; |
| 25 | 25 | ||
| 26 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 26 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 27 | if (desc->status & IRQ_MOVE_PENDING) | 27 | if (desc->status & IRQ_MOVE_PENDING) |
| @@ -65,7 +65,7 @@ static ssize_t irq_affinity_proc_write(struct file *file, | |||
| 65 | cpumask_var_t new_value; | 65 | cpumask_var_t new_value; |
| 66 | int err; | 66 | int err; |
| 67 | 67 | ||
| 68 | if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || | 68 | if (!irq_to_desc(irq)->irq_data.chip->set_affinity || no_irq_affinity || |
| 69 | irq_balancing_disabled(irq)) | 69 | irq_balancing_disabled(irq)) |
| 70 | return -EIO; | 70 | return -EIO; |
| 71 | 71 | ||
| @@ -185,7 +185,7 @@ static int irq_node_proc_show(struct seq_file *m, void *v) | |||
| 185 | { | 185 | { |
| 186 | struct irq_desc *desc = irq_to_desc((long) m->private); | 186 | struct irq_desc *desc = irq_to_desc((long) m->private); |
| 187 | 187 | ||
| 188 | seq_printf(m, "%d\n", desc->node); | 188 | seq_printf(m, "%d\n", desc->irq_data.node); |
| 189 | return 0; | 189 | return 0; |
| 190 | } | 190 | } |
| 191 | 191 | ||
| @@ -269,7 +269,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
| 269 | { | 269 | { |
| 270 | char name [MAX_NAMELEN]; | 270 | char name [MAX_NAMELEN]; |
| 271 | 271 | ||
| 272 | if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) | 272 | if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) |
| 273 | return; | 273 | return; |
| 274 | 274 | ||
| 275 | memset(name, 0, MAX_NAMELEN); | 275 | memset(name, 0, MAX_NAMELEN); |
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 090c3763f3a2..47c56a097928 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
| @@ -60,7 +60,7 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) | |||
| 60 | /* | 60 | /* |
| 61 | * Make sure the interrupt is enabled, before resending it: | 61 | * Make sure the interrupt is enabled, before resending it: |
| 62 | */ | 62 | */ |
| 63 | desc->chip->enable(irq); | 63 | desc->irq_data.chip->enable(irq); |
| 64 | 64 | ||
| 65 | /* | 65 | /* |
| 66 | * We do not resend level type interrupts. Level type | 66 | * We do not resend level type interrupts. Level type |
| @@ -70,7 +70,8 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) | |||
| 70 | if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { | 70 | if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { |
| 71 | desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; | 71 | desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; |
| 72 | 72 | ||
| 73 | if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) { | 73 | if (!desc->irq_data.chip->retrigger || |
| 74 | !desc->irq_data.chip->retrigger(irq)) { | ||
| 74 | #ifdef CONFIG_HARDIRQS_SW_RESEND | 75 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
| 75 | /* Set it pending and activate the softirq: */ | 76 | /* Set it pending and activate the softirq: */ |
| 76 | set_bit(irq, irqs_resend); | 77 | set_bit(irq, irqs_resend); |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 89fb90ae534f..36c2c9289e2b 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
| @@ -78,8 +78,8 @@ static int try_one_irq(int irq, struct irq_desc *desc) | |||
| 78 | * If we did actual work for the real IRQ line we must let the | 78 | * If we did actual work for the real IRQ line we must let the |
| 79 | * IRQ controller clean up too | 79 | * IRQ controller clean up too |
| 80 | */ | 80 | */ |
| 81 | if (work && desc->chip && desc->chip->end) | 81 | if (work && desc->irq_data.chip && desc->irq_data.chip->end) |
| 82 | desc->chip->end(irq); | 82 | desc->irq_data.chip->end(irq); |
| 83 | raw_spin_unlock(&desc->lock); | 83 | raw_spin_unlock(&desc->lock); |
| 84 | 84 | ||
| 85 | return ok; | 85 | return ok; |
| @@ -254,7 +254,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
| 254 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); | 254 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); |
| 255 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; | 255 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; |
| 256 | desc->depth++; | 256 | desc->depth++; |
| 257 | desc->chip->disable(irq); | 257 | desc->irq_data.chip->disable(irq); |
| 258 | 258 | ||
| 259 | mod_timer(&poll_spurious_irq_timer, | 259 | mod_timer(&poll_spurious_irq_timer, |
| 260 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 260 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
