diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-03-28 08:10:52 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-03-28 10:55:10 -0400 |
commit | 32f4125ebffee4f3c4dbc6a437fc656129eb9e60 (patch) | |
tree | d64c6bb7ba40c33734896303734416ea5b4f3290 /kernel | |
parent | c2d0c555c22242c3a76e366074c4d83ef9fa3b8c (diff) |
genirq: Move INPROGRESS, MASKED and DISABLED state flags to irq_data
We really need these flags for some of the interrupt chips. Move it
from internal state to irq_data and provide proper accessors.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: David Daney <ddaney@caviumnetworks.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/irq/chip.c | 40 | ||||
-rw-r--r-- | kernel/irq/debug.h | 10 | ||||
-rw-r--r-- | kernel/irq/handle.c | 4 | ||||
-rw-r--r-- | kernel/irq/internals.h | 6 | ||||
-rw-r--r-- | kernel/irq/irqdesc.c | 2 | ||||
-rw-r--r-- | kernel/irq/manage.c | 30 | ||||
-rw-r--r-- | kernel/irq/migration.c | 4 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 10 |
8 files changed, 49 insertions, 57 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 9283d3300ea9..e00bdc56269f 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -140,27 +140,25 @@ EXPORT_SYMBOL_GPL(irq_get_irq_data); | |||
140 | 140 | ||
141 | static void irq_state_clr_disabled(struct irq_desc *desc) | 141 | static void irq_state_clr_disabled(struct irq_desc *desc) |
142 | { | 142 | { |
143 | desc->istate &= ~IRQS_DISABLED; | ||
144 | irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); | 143 | irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); |
145 | irq_compat_clr_disabled(desc); | 144 | irq_compat_clr_disabled(desc); |
146 | } | 145 | } |
147 | 146 | ||
148 | static void irq_state_set_disabled(struct irq_desc *desc) | 147 | static void irq_state_set_disabled(struct irq_desc *desc) |
149 | { | 148 | { |
150 | desc->istate |= IRQS_DISABLED; | ||
151 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); | 149 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
152 | irq_compat_set_disabled(desc); | 150 | irq_compat_set_disabled(desc); |
153 | } | 151 | } |
154 | 152 | ||
155 | static void irq_state_clr_masked(struct irq_desc *desc) | 153 | static void irq_state_clr_masked(struct irq_desc *desc) |
156 | { | 154 | { |
157 | desc->istate &= ~IRQS_MASKED; | 155 | irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); |
158 | irq_compat_clr_masked(desc); | 156 | irq_compat_clr_masked(desc); |
159 | } | 157 | } |
160 | 158 | ||
161 | static void irq_state_set_masked(struct irq_desc *desc) | 159 | static void irq_state_set_masked(struct irq_desc *desc) |
162 | { | 160 | { |
163 | desc->istate |= IRQS_MASKED; | 161 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
164 | irq_compat_set_masked(desc); | 162 | irq_compat_set_masked(desc); |
165 | } | 163 | } |
166 | 164 | ||
@@ -380,11 +378,11 @@ void handle_nested_irq(unsigned int irq) | |||
380 | kstat_incr_irqs_this_cpu(irq, desc); | 378 | kstat_incr_irqs_this_cpu(irq, desc); |
381 | 379 | ||
382 | action = desc->action; | 380 | action = desc->action; |
383 | if (unlikely(!action || (desc->istate & IRQS_DISABLED))) | 381 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) |
384 | goto out_unlock; | 382 | goto out_unlock; |
385 | 383 | ||
386 | irq_compat_set_progress(desc); | 384 | irq_compat_set_progress(desc); |
387 | desc->istate |= IRQS_INPROGRESS; | 385 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
388 | raw_spin_unlock_irq(&desc->lock); | 386 | raw_spin_unlock_irq(&desc->lock); |
389 | 387 | ||
390 | action_ret = action->thread_fn(action->irq, action->dev_id); | 388 | action_ret = action->thread_fn(action->irq, action->dev_id); |
@@ -392,7 +390,7 @@ void handle_nested_irq(unsigned int irq) | |||
392 | note_interrupt(irq, desc, action_ret); | 390 | note_interrupt(irq, desc, action_ret); |
393 | 391 | ||
394 | raw_spin_lock_irq(&desc->lock); | 392 | raw_spin_lock_irq(&desc->lock); |
395 | desc->istate &= ~IRQS_INPROGRESS; | 393 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
396 | irq_compat_clr_progress(desc); | 394 | irq_compat_clr_progress(desc); |
397 | 395 | ||
398 | out_unlock: | 396 | out_unlock: |
@@ -424,14 +422,14 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
424 | { | 422 | { |
425 | raw_spin_lock(&desc->lock); | 423 | raw_spin_lock(&desc->lock); |
426 | 424 | ||
427 | if (unlikely(desc->istate & IRQS_INPROGRESS)) | 425 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
428 | if (!irq_check_poll(desc)) | 426 | if (!irq_check_poll(desc)) |
429 | goto out_unlock; | 427 | goto out_unlock; |
430 | 428 | ||
431 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 429 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
432 | kstat_incr_irqs_this_cpu(irq, desc); | 430 | kstat_incr_irqs_this_cpu(irq, desc); |
433 | 431 | ||
434 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) | 432 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) |
435 | goto out_unlock; | 433 | goto out_unlock; |
436 | 434 | ||
437 | handle_irq_event(desc); | 435 | handle_irq_event(desc); |
@@ -456,7 +454,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
456 | raw_spin_lock(&desc->lock); | 454 | raw_spin_lock(&desc->lock); |
457 | mask_ack_irq(desc); | 455 | mask_ack_irq(desc); |
458 | 456 | ||
459 | if (unlikely(desc->istate & IRQS_INPROGRESS)) | 457 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
460 | if (!irq_check_poll(desc)) | 458 | if (!irq_check_poll(desc)) |
461 | goto out_unlock; | 459 | goto out_unlock; |
462 | 460 | ||
@@ -467,12 +465,12 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
467 | * If its disabled or no action available | 465 | * If its disabled or no action available |
468 | * keep it masked and get out of here | 466 | * keep it masked and get out of here |
469 | */ | 467 | */ |
470 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) | 468 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) |
471 | goto out_unlock; | 469 | goto out_unlock; |
472 | 470 | ||
473 | handle_irq_event(desc); | 471 | handle_irq_event(desc); |
474 | 472 | ||
475 | if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT))) | 473 | if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT)) |
476 | unmask_irq(desc); | 474 | unmask_irq(desc); |
477 | out_unlock: | 475 | out_unlock: |
478 | raw_spin_unlock(&desc->lock); | 476 | raw_spin_unlock(&desc->lock); |
@@ -504,7 +502,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
504 | { | 502 | { |
505 | raw_spin_lock(&desc->lock); | 503 | raw_spin_lock(&desc->lock); |
506 | 504 | ||
507 | if (unlikely(desc->istate & IRQS_INPROGRESS)) | 505 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
508 | if (!irq_check_poll(desc)) | 506 | if (!irq_check_poll(desc)) |
509 | goto out; | 507 | goto out; |
510 | 508 | ||
@@ -515,7 +513,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
515 | * If its disabled or no action available | 513 | * If its disabled or no action available |
516 | * then mask it and get out of here: | 514 | * then mask it and get out of here: |
517 | */ | 515 | */ |
518 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) { | 516 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
519 | irq_compat_set_pending(desc); | 517 | irq_compat_set_pending(desc); |
520 | desc->istate |= IRQS_PENDING; | 518 | desc->istate |= IRQS_PENDING; |
521 | mask_irq(desc); | 519 | mask_irq(desc); |
@@ -566,8 +564,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
566 | * we shouldn't process the IRQ. Mark it pending, handle | 564 | * we shouldn't process the IRQ. Mark it pending, handle |
567 | * the necessary masking and go out | 565 | * the necessary masking and go out |
568 | */ | 566 | */ |
569 | if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) || | 567 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || |
570 | !desc->action))) { | 568 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { |
571 | if (!irq_check_poll(desc)) { | 569 | if (!irq_check_poll(desc)) { |
572 | irq_compat_set_pending(desc); | 570 | irq_compat_set_pending(desc); |
573 | desc->istate |= IRQS_PENDING; | 571 | desc->istate |= IRQS_PENDING; |
@@ -592,15 +590,15 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
592 | * Renable it, if it was not disabled in meantime. | 590 | * Renable it, if it was not disabled in meantime. |
593 | */ | 591 | */ |
594 | if (unlikely(desc->istate & IRQS_PENDING)) { | 592 | if (unlikely(desc->istate & IRQS_PENDING)) { |
595 | if (!(desc->istate & IRQS_DISABLED) && | 593 | if (!irqd_irq_disabled(&desc->irq_data) && |
596 | (desc->istate & IRQS_MASKED)) | 594 | irqd_irq_masked(&desc->irq_data)) |
597 | unmask_irq(desc); | 595 | unmask_irq(desc); |
598 | } | 596 | } |
599 | 597 | ||
600 | handle_irq_event(desc); | 598 | handle_irq_event(desc); |
601 | 599 | ||
602 | } while ((desc->istate & IRQS_PENDING) && | 600 | } while ((desc->istate & IRQS_PENDING) && |
603 | !(desc->istate & IRQS_DISABLED)); | 601 | !irqd_irq_disabled(&desc->irq_data)); |
604 | 602 | ||
605 | out_unlock: | 603 | out_unlock: |
606 | raw_spin_unlock(&desc->lock); | 604 | raw_spin_unlock(&desc->lock); |
@@ -720,7 +718,7 @@ void irq_cpu_online(void) | |||
720 | chip = irq_data_get_irq_chip(&desc->irq_data); | 718 | chip = irq_data_get_irq_chip(&desc->irq_data); |
721 | if (chip && chip->irq_cpu_online && | 719 | if (chip && chip->irq_cpu_online && |
722 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | 720 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || |
723 | !(desc->istate & IRQS_DISABLED))) | 721 | !irqd_irq_disabled(&desc->irq_data))) |
724 | chip->irq_cpu_online(&desc->irq_data); | 722 | chip->irq_cpu_online(&desc->irq_data); |
725 | 723 | ||
726 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 724 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
@@ -750,7 +748,7 @@ void irq_cpu_offline(void) | |||
750 | chip = irq_data_get_irq_chip(&desc->irq_data); | 748 | chip = irq_data_get_irq_chip(&desc->irq_data); |
751 | if (chip && chip->irq_cpu_offline && | 749 | if (chip && chip->irq_cpu_offline && |
752 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | 750 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || |
753 | !(desc->istate & IRQS_DISABLED))) | 751 | !irqd_irq_disabled(&desc->irq_data))) |
754 | chip->irq_cpu_offline(&desc->irq_data); | 752 | chip->irq_cpu_offline(&desc->irq_data); |
755 | 753 | ||
756 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 754 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h index d1a33b7fa61d..a0bd875ba3d5 100644 --- a/kernel/irq/debug.h +++ b/kernel/irq/debug.h | |||
@@ -6,6 +6,8 @@ | |||
6 | 6 | ||
7 | #define P(f) if (desc->status & f) printk("%14s set\n", #f) | 7 | #define P(f) if (desc->status & f) printk("%14s set\n", #f) |
8 | #define PS(f) if (desc->istate & f) printk("%14s set\n", #f) | 8 | #define PS(f) if (desc->istate & f) printk("%14s set\n", #f) |
9 | /* FIXME */ | ||
10 | #define PD(f) do { } while (0) | ||
9 | 11 | ||
10 | static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) | 12 | static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) |
11 | { | 13 | { |
@@ -28,13 +30,15 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) | |||
28 | P(IRQ_NOAUTOEN); | 30 | P(IRQ_NOAUTOEN); |
29 | 31 | ||
30 | PS(IRQS_AUTODETECT); | 32 | PS(IRQS_AUTODETECT); |
31 | PS(IRQS_INPROGRESS); | ||
32 | PS(IRQS_REPLAY); | 33 | PS(IRQS_REPLAY); |
33 | PS(IRQS_WAITING); | 34 | PS(IRQS_WAITING); |
34 | PS(IRQS_DISABLED); | ||
35 | PS(IRQS_PENDING); | 35 | PS(IRQS_PENDING); |
36 | PS(IRQS_MASKED); | 36 | |
37 | PD(IRQS_INPROGRESS); | ||
38 | PD(IRQS_DISABLED); | ||
39 | PD(IRQS_MASKED); | ||
37 | } | 40 | } |
38 | 41 | ||
39 | #undef P | 42 | #undef P |
40 | #undef PS | 43 | #undef PS |
44 | #undef PD | ||
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 517561fc7317..60fd5cd75c77 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -178,13 +178,13 @@ irqreturn_t handle_irq_event(struct irq_desc *desc) | |||
178 | irq_compat_clr_pending(desc); | 178 | irq_compat_clr_pending(desc); |
179 | desc->istate &= ~IRQS_PENDING; | 179 | desc->istate &= ~IRQS_PENDING; |
180 | irq_compat_set_progress(desc); | 180 | irq_compat_set_progress(desc); |
181 | desc->istate |= IRQS_INPROGRESS; | 181 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
182 | raw_spin_unlock(&desc->lock); | 182 | raw_spin_unlock(&desc->lock); |
183 | 183 | ||
184 | ret = handle_irq_event_percpu(desc, action); | 184 | ret = handle_irq_event_percpu(desc, action); |
185 | 185 | ||
186 | raw_spin_lock(&desc->lock); | 186 | raw_spin_lock(&desc->lock); |
187 | desc->istate &= ~IRQS_INPROGRESS; | 187 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
188 | irq_compat_clr_progress(desc); | 188 | irq_compat_clr_progress(desc); |
189 | return ret; | 189 | return ret; |
190 | } | 190 | } |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 6c6ec9a49027..6b8b9713e28d 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -44,26 +44,20 @@ enum { | |||
44 | * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt | 44 | * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt |
45 | * detection | 45 | * detection |
46 | * IRQS_POLL_INPROGRESS - polling in progress | 46 | * IRQS_POLL_INPROGRESS - polling in progress |
47 | * IRQS_INPROGRESS - Interrupt in progress | ||
48 | * IRQS_ONESHOT - irq is not unmasked in primary handler | 47 | * IRQS_ONESHOT - irq is not unmasked in primary handler |
49 | * IRQS_REPLAY - irq is replayed | 48 | * IRQS_REPLAY - irq is replayed |
50 | * IRQS_WAITING - irq is waiting | 49 | * IRQS_WAITING - irq is waiting |
51 | * IRQS_DISABLED - irq is disabled | ||
52 | * IRQS_PENDING - irq is pending and replayed later | 50 | * IRQS_PENDING - irq is pending and replayed later |
53 | * IRQS_MASKED - irq is masked | ||
54 | * IRQS_SUSPENDED - irq is suspended | 51 | * IRQS_SUSPENDED - irq is suspended |
55 | */ | 52 | */ |
56 | enum { | 53 | enum { |
57 | IRQS_AUTODETECT = 0x00000001, | 54 | IRQS_AUTODETECT = 0x00000001, |
58 | IRQS_SPURIOUS_DISABLED = 0x00000002, | 55 | IRQS_SPURIOUS_DISABLED = 0x00000002, |
59 | IRQS_POLL_INPROGRESS = 0x00000008, | 56 | IRQS_POLL_INPROGRESS = 0x00000008, |
60 | IRQS_INPROGRESS = 0x00000010, | ||
61 | IRQS_ONESHOT = 0x00000020, | 57 | IRQS_ONESHOT = 0x00000020, |
62 | IRQS_REPLAY = 0x00000040, | 58 | IRQS_REPLAY = 0x00000040, |
63 | IRQS_WAITING = 0x00000080, | 59 | IRQS_WAITING = 0x00000080, |
64 | IRQS_DISABLED = 0x00000100, | ||
65 | IRQS_PENDING = 0x00000200, | 60 | IRQS_PENDING = 0x00000200, |
66 | IRQS_MASKED = 0x00000400, | ||
67 | IRQS_SUSPENDED = 0x00000800, | 61 | IRQS_SUSPENDED = 0x00000800, |
68 | }; | 62 | }; |
69 | 63 | ||
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 96c3268a509d..2c039c9b9383 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -81,7 +81,6 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | |||
81 | desc->irq_data.msi_desc = NULL; | 81 | desc->irq_data.msi_desc = NULL; |
82 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); | 82 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); |
83 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); | 83 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
84 | desc->istate = IRQS_DISABLED; | ||
85 | desc->handle_irq = handle_bad_irq; | 84 | desc->handle_irq = handle_bad_irq; |
86 | desc->depth = 1; | 85 | desc->depth = 1; |
87 | desc->irq_count = 0; | 86 | desc->irq_count = 0; |
@@ -239,7 +238,6 @@ int __init early_irq_init(void) | |||
239 | 238 | ||
240 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 239 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
241 | [0 ... NR_IRQS-1] = { | 240 | [0 ... NR_IRQS-1] = { |
242 | .istate = IRQS_DISABLED, | ||
243 | .handle_irq = handle_bad_irq, | 241 | .handle_irq = handle_bad_irq, |
244 | .depth = 1, | 242 | .depth = 1, |
245 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | 243 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 3d151fd762ad..6e8acb755993 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -41,7 +41,7 @@ early_param("threadirqs", setup_forced_irqthreads); | |||
41 | void synchronize_irq(unsigned int irq) | 41 | void synchronize_irq(unsigned int irq) |
42 | { | 42 | { |
43 | struct irq_desc *desc = irq_to_desc(irq); | 43 | struct irq_desc *desc = irq_to_desc(irq); |
44 | unsigned int state; | 44 | bool inprogress; |
45 | 45 | ||
46 | if (!desc) | 46 | if (!desc) |
47 | return; | 47 | return; |
@@ -53,16 +53,16 @@ void synchronize_irq(unsigned int irq) | |||
53 | * Wait until we're out of the critical section. This might | 53 | * Wait until we're out of the critical section. This might |
54 | * give the wrong answer due to the lack of memory barriers. | 54 | * give the wrong answer due to the lack of memory barriers. |
55 | */ | 55 | */ |
56 | while (desc->istate & IRQS_INPROGRESS) | 56 | while (irqd_irq_inprogress(&desc->irq_data)) |
57 | cpu_relax(); | 57 | cpu_relax(); |
58 | 58 | ||
59 | /* Ok, that indicated we're done: double-check carefully. */ | 59 | /* Ok, that indicated we're done: double-check carefully. */ |
60 | raw_spin_lock_irqsave(&desc->lock, flags); | 60 | raw_spin_lock_irqsave(&desc->lock, flags); |
61 | state = desc->istate; | 61 | inprogress = irqd_irq_inprogress(&desc->irq_data); |
62 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 62 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
63 | 63 | ||
64 | /* Oops, that failed? */ | 64 | /* Oops, that failed? */ |
65 | } while (state & IRQS_INPROGRESS); | 65 | } while (inprogress); |
66 | 66 | ||
67 | /* | 67 | /* |
68 | * We made sure that no hardirq handler is running. Now verify | 68 | * We made sure that no hardirq handler is running. Now verify |
@@ -563,9 +563,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
563 | flags &= IRQ_TYPE_SENSE_MASK; | 563 | flags &= IRQ_TYPE_SENSE_MASK; |
564 | 564 | ||
565 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { | 565 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { |
566 | if (!(desc->istate & IRQS_MASKED)) | 566 | if (!irqd_irq_masked(&desc->irq_data)) |
567 | mask_irq(desc); | 567 | mask_irq(desc); |
568 | if (!(desc->istate & IRQS_DISABLED)) | 568 | if (!irqd_irq_disabled(&desc->irq_data)) |
569 | unmask = 1; | 569 | unmask = 1; |
570 | } | 570 | } |
571 | 571 | ||
@@ -663,7 +663,7 @@ again: | |||
663 | * irq_wake_thread(). See the comment there which explains the | 663 | * irq_wake_thread(). See the comment there which explains the |
664 | * serialization. | 664 | * serialization. |
665 | */ | 665 | */ |
666 | if (unlikely(desc->istate & IRQS_INPROGRESS)) { | 666 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { |
667 | raw_spin_unlock_irq(&desc->lock); | 667 | raw_spin_unlock_irq(&desc->lock); |
668 | chip_bus_sync_unlock(desc); | 668 | chip_bus_sync_unlock(desc); |
669 | cpu_relax(); | 669 | cpu_relax(); |
@@ -680,12 +680,10 @@ again: | |||
680 | 680 | ||
681 | desc->threads_oneshot &= ~action->thread_mask; | 681 | desc->threads_oneshot &= ~action->thread_mask; |
682 | 682 | ||
683 | if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) && | 683 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && |
684 | (desc->istate & IRQS_MASKED)) { | 684 | irqd_irq_masked(&desc->irq_data)) |
685 | irq_compat_clr_masked(desc); | 685 | unmask_irq(desc); |
686 | desc->istate &= ~IRQS_MASKED; | 686 | |
687 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | ||
688 | } | ||
689 | out_unlock: | 687 | out_unlock: |
690 | raw_spin_unlock_irq(&desc->lock); | 688 | raw_spin_unlock_irq(&desc->lock); |
691 | chip_bus_sync_unlock(desc); | 689 | chip_bus_sync_unlock(desc); |
@@ -779,7 +777,7 @@ static int irq_thread(void *data) | |||
779 | atomic_inc(&desc->threads_active); | 777 | atomic_inc(&desc->threads_active); |
780 | 778 | ||
781 | raw_spin_lock_irq(&desc->lock); | 779 | raw_spin_lock_irq(&desc->lock); |
782 | if (unlikely(desc->istate & IRQS_DISABLED)) { | 780 | if (unlikely(irqd_irq_disabled(&desc->irq_data))) { |
783 | /* | 781 | /* |
784 | * CHECKME: We might need a dedicated | 782 | * CHECKME: We might need a dedicated |
785 | * IRQ_THREAD_PENDING flag here, which | 783 | * IRQ_THREAD_PENDING flag here, which |
@@ -997,8 +995,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
997 | } | 995 | } |
998 | 996 | ||
999 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ | 997 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ |
1000 | IRQS_INPROGRESS | IRQS_ONESHOT | \ | 998 | IRQS_ONESHOT | IRQS_WAITING); |
1001 | IRQS_WAITING); | 999 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
1002 | 1000 | ||
1003 | if (new->flags & IRQF_PERCPU) { | 1001 | if (new->flags & IRQF_PERCPU) { |
1004 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | 1002 | irqd_set(&desc->irq_data, IRQD_PER_CPU); |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index ec4806d4778b..5e81d34b08d6 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -66,7 +66,7 @@ void irq_move_irq(struct irq_data *idata) | |||
66 | if (likely(!irqd_is_setaffinity_pending(idata))) | 66 | if (likely(!irqd_is_setaffinity_pending(idata))) |
67 | return; | 67 | return; |
68 | 68 | ||
69 | if (unlikely(desc->istate & IRQS_DISABLED)) | 69 | if (unlikely(irqd_irq_disabled(idata))) |
70 | return; | 70 | return; |
71 | 71 | ||
72 | /* | 72 | /* |
@@ -74,7 +74,7 @@ void irq_move_irq(struct irq_data *idata) | |||
74 | * threaded interrupt with ONESHOT set, we can end up with an | 74 | * threaded interrupt with ONESHOT set, we can end up with an |
75 | * interrupt storm. | 75 | * interrupt storm. |
76 | */ | 76 | */ |
77 | masked = desc->istate & IRQS_MASKED; | 77 | masked = irqd_irq_masked(idata); |
78 | if (!masked) | 78 | if (!masked) |
79 | idata->chip->irq_mask(idata); | 79 | idata->chip->irq_mask(idata); |
80 | irq_move_masked_irq(idata); | 80 | irq_move_masked_irq(idata); |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index dd586ebf9c8c..cd424cdf17fc 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -45,12 +45,12 @@ bool irq_wait_for_poll(struct irq_desc *desc) | |||
45 | #ifdef CONFIG_SMP | 45 | #ifdef CONFIG_SMP |
46 | do { | 46 | do { |
47 | raw_spin_unlock(&desc->lock); | 47 | raw_spin_unlock(&desc->lock); |
48 | while (desc->istate & IRQS_INPROGRESS) | 48 | while (irqd_irq_inprogress(&desc->irq_data)) |
49 | cpu_relax(); | 49 | cpu_relax(); |
50 | raw_spin_lock(&desc->lock); | 50 | raw_spin_lock(&desc->lock); |
51 | } while (desc->istate & IRQS_INPROGRESS); | 51 | } while irqd_irq_inprogress(&desc->irq_data); |
52 | /* Might have been disabled in meantime */ | 52 | /* Might have been disabled in meantime */ |
53 | return !(desc->istate & IRQS_DISABLED) && desc->action; | 53 | return !irqd_irq_disabled(&desc->irq_data) && desc->action; |
54 | #else | 54 | #else |
55 | return false; | 55 | return false; |
56 | #endif | 56 | #endif |
@@ -75,7 +75,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) | |||
75 | * Do not poll disabled interrupts unless the spurious | 75 | * Do not poll disabled interrupts unless the spurious |
76 | * disabled poller asks explicitely. | 76 | * disabled poller asks explicitely. |
77 | */ | 77 | */ |
78 | if ((desc->istate & IRQS_DISABLED) && !force) | 78 | if (irqd_irq_disabled(&desc->irq_data) && !force) |
79 | goto out; | 79 | goto out; |
80 | 80 | ||
81 | /* | 81 | /* |
@@ -88,7 +88,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) | |||
88 | goto out; | 88 | goto out; |
89 | 89 | ||
90 | /* Already running on another processor */ | 90 | /* Already running on another processor */ |
91 | if (desc->istate & IRQS_INPROGRESS) { | 91 | if (irqd_irq_inprogress(&desc->irq_data)) { |
92 | /* | 92 | /* |
93 | * Already running: If it is shared get the other | 93 | * Already running: If it is shared get the other |
94 | * CPU to go looking for our mystery interrupt too | 94 | * CPU to go looking for our mystery interrupt too |