diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-02-08 11:28:12 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-02-19 06:58:20 -0500 |
commit | 876dbd4cc1b35c1a4cb96a2be1d43ea0eabce3b4 (patch) | |
tree | 9be1e7e4cd4a4c9fadd98a9ac637020417215521 | |
parent | 2bdd10558c8d93009cb6c32ce9e30800fbb08add (diff) |
genirq: Mirror irq trigger type bits in irq_data.state
That's the data structure chip functions get provided. Also allow them
to signal the core code that they updated the flags in irq_data.state
by returning IRQ_SET_MASK_OK_NOCOPY. The default is unchanged.
The type bits should be accessed via:
val = irqd_get_trigger_type(irqdata);
and
irqd_set_trigger_type(irqdata, val);
Coders who access them directly will be tracked down and slapped with
stinking trouts.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | include/linux/irq.h | 26 | ||||
-rw-r--r-- | kernel/irq/chip.c | 5 | ||||
-rw-r--r-- | kernel/irq/manage.c | 44 | ||||
-rw-r--r-- | kernel/irq/resend.c | 2 | ||||
-rw-r--r-- | kernel/irq/settings.h | 30 |
5 files changed, 87 insertions, 20 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h index 8da1782ecfca..be73c0a3c19d 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -46,7 +46,9 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
46 | #define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) | 46 | #define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) |
47 | #define IRQ_TYPE_LEVEL_HIGH 0x00000004 /* Level high type */ | 47 | #define IRQ_TYPE_LEVEL_HIGH 0x00000004 /* Level high type */ |
48 | #define IRQ_TYPE_LEVEL_LOW 0x00000008 /* Level low type */ | 48 | #define IRQ_TYPE_LEVEL_LOW 0x00000008 /* Level low type */ |
49 | #define IRQ_TYPE_LEVEL_MASK (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH) | ||
49 | #define IRQ_TYPE_SENSE_MASK 0x0000000f /* Mask of the above */ | 50 | #define IRQ_TYPE_SENSE_MASK 0x0000000f /* Mask of the above */ |
51 | |||
50 | #define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */ | 52 | #define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */ |
51 | 53 | ||
52 | /* Internal flags */ | 54 | /* Internal flags */ |
@@ -131,17 +133,20 @@ struct irq_data { | |||
131 | /* | 133 | /* |
132 | * Bit masks for irq_data.state | 134 | * Bit masks for irq_data.state |
133 | * | 135 | * |
136 | * IRQD_TRIGGER_MASK - Mask for the trigger type bits | ||
134 | * IRQD_SETAFFINITY_PENDING - Affinity setting is pending | 137 | * IRQD_SETAFFINITY_PENDING - Affinity setting is pending |
135 | * IRQD_NO_BALANCING - Balancing disabled for this IRQ | 138 | * IRQD_NO_BALANCING - Balancing disabled for this IRQ |
136 | * IRQD_PER_CPU - Interrupt is per cpu | 139 | * IRQD_PER_CPU - Interrupt is per cpu |
137 | * IRQD_AFFINITY_SET - Interrupt affinity was set | 140 | * IRQD_AFFINITY_SET - Interrupt affinity was set |
141 | * IRQD_LEVEL - Interrupt is level triggered | ||
138 | */ | 142 | */ |
139 | enum { | 143 | enum { |
140 | /* Bit 0 - 7 reserved for TYPE will use later */ | 144 | IRQD_TRIGGER_MASK = 0xf, |
141 | IRQD_SETAFFINITY_PENDING = (1 << 8), | 145 | IRQD_SETAFFINITY_PENDING = (1 << 8), |
142 | IRQD_NO_BALANCING = (1 << 10), | 146 | IRQD_NO_BALANCING = (1 << 10), |
143 | IRQD_PER_CPU = (1 << 11), | 147 | IRQD_PER_CPU = (1 << 11), |
144 | IRQD_AFFINITY_SET = (1 << 12), | 148 | IRQD_AFFINITY_SET = (1 << 12), |
149 | IRQD_LEVEL = (1 << 13), | ||
145 | }; | 150 | }; |
146 | 151 | ||
147 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) | 152 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) |
@@ -164,6 +169,25 @@ static inline bool irqd_affinity_was_set(struct irq_data *d) | |||
164 | return d->state_use_accessors & IRQD_AFFINITY_SET; | 169 | return d->state_use_accessors & IRQD_AFFINITY_SET; |
165 | } | 170 | } |
166 | 171 | ||
172 | static inline u32 irqd_get_trigger_type(struct irq_data *d) | ||
173 | { | ||
174 | return d->state_use_accessors & IRQD_TRIGGER_MASK; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * Must only be called inside irq_chip.irq_set_type() functions. | ||
179 | */ | ||
180 | static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) | ||
181 | { | ||
182 | d->state_use_accessors &= ~IRQD_TRIGGER_MASK; | ||
183 | d->state_use_accessors |= type & IRQD_TRIGGER_MASK; | ||
184 | } | ||
185 | |||
186 | static inline bool irqd_is_level_type(struct irq_data *d) | ||
187 | { | ||
188 | return d->state_use_accessors & IRQD_LEVEL; | ||
189 | } | ||
190 | |||
167 | /** | 191 | /** |
168 | * struct irq_chip - hardware interrupt chip descriptor | 192 | * struct irq_chip - hardware interrupt chip descriptor |
169 | * | 193 | * |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index b8aa3dfe8301..9c9b573a718e 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -710,11 +710,14 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) | |||
710 | 710 | ||
711 | irq_settings_clr_and_set(desc, clr, set); | 711 | irq_settings_clr_and_set(desc, clr, set); |
712 | 712 | ||
713 | irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU); | 713 | irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | |
714 | IRQD_TRIGGER_MASK | IRQD_LEVEL); | ||
714 | if (irq_settings_has_no_balance_set(desc)) | 715 | if (irq_settings_has_no_balance_set(desc)) |
715 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | 716 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); |
716 | if (irq_settings_is_per_cpu(desc)) | 717 | if (irq_settings_is_per_cpu(desc)) |
717 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | 718 | irqd_set(&desc->irq_data, IRQD_PER_CPU); |
718 | 719 | ||
720 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); | ||
721 | |||
719 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 722 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
720 | } | 723 | } |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 8246afc81956..9ae758ed8e66 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -567,23 +567,32 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
567 | return 0; | 567 | return 0; |
568 | } | 568 | } |
569 | 569 | ||
570 | flags &= IRQ_TYPE_SENSE_MASK; | ||
570 | /* caller masked out all except trigger mode flags */ | 571 | /* caller masked out all except trigger mode flags */ |
571 | ret = chip->irq_set_type(&desc->irq_data, flags); | 572 | ret = chip->irq_set_type(&desc->irq_data, flags); |
572 | 573 | ||
573 | if (ret) | 574 | switch (ret) { |
574 | pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", | 575 | case IRQ_SET_MASK_OK: |
575 | flags, irq, chip->irq_set_type); | 576 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); |
576 | else { | 577 | irqd_set(&desc->irq_data, flags); |
577 | if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | 578 | |
578 | flags |= IRQ_LEVEL; | 579 | case IRQ_SET_MASK_OK_NOCOPY: |
579 | /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ | 580 | flags = irqd_get_trigger_type(&desc->irq_data); |
580 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); | 581 | irq_settings_set_trigger_mask(desc, flags); |
581 | desc->status |= flags; | 582 | irqd_clear(&desc->irq_data, IRQD_LEVEL); |
583 | irq_settings_clr_level(desc); | ||
584 | if (flags & IRQ_TYPE_LEVEL_MASK) { | ||
585 | irq_settings_set_level(desc); | ||
586 | irqd_set(&desc->irq_data, IRQD_LEVEL); | ||
587 | } | ||
582 | 588 | ||
583 | if (chip != desc->irq_data.chip) | 589 | if (chip != desc->irq_data.chip) |
584 | irq_chip_set_defaults(desc->irq_data.chip); | 590 | irq_chip_set_defaults(desc->irq_data.chip); |
591 | return 0; | ||
592 | default: | ||
593 | pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", | ||
594 | flags, irq, chip->irq_set_type); | ||
585 | } | 595 | } |
586 | |||
587 | return ret; | 596 | return ret; |
588 | } | 597 | } |
589 | 598 | ||
@@ -923,13 +932,14 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
923 | /* Set default affinity mask once everything is setup */ | 932 | /* Set default affinity mask once everything is setup */ |
924 | setup_affinity(irq, desc, mask); | 933 | setup_affinity(irq, desc, mask); |
925 | 934 | ||
926 | } else if ((new->flags & IRQF_TRIGGER_MASK) | 935 | } else if (new->flags & IRQF_TRIGGER_MASK) { |
927 | && (new->flags & IRQF_TRIGGER_MASK) | 936 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; |
928 | != (desc->status & IRQ_TYPE_SENSE_MASK)) { | 937 | unsigned int omsk = irq_settings_get_trigger_mask(desc); |
929 | /* hope the handler works with the actual trigger mode... */ | 938 | |
930 | pr_warning("IRQ %d uses trigger mode %d; requested %d\n", | 939 | if (nmsk != omsk) |
931 | irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), | 940 | /* hope the handler works with current trigger mode */ |
932 | (int)(new->flags & IRQF_TRIGGER_MASK)); | 941 | pr_warning("IRQ %d uses trigger mode %u; requested %u\n", |
942 | irq, nmsk, omsk); | ||
933 | } | 943 | } |
934 | 944 | ||
935 | new->irq = irq; | 945 | new->irq = irq; |
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index ff1fea060014..ad683a99b1ec 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
@@ -60,7 +60,7 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) | |||
60 | * interrupts are resent by hardware when they are still | 60 | * interrupts are resent by hardware when they are still |
61 | * active. | 61 | * active. |
62 | */ | 62 | */ |
63 | if (desc->status & IRQ_LEVEL) | 63 | if (irq_settings_is_level(desc)) |
64 | return; | 64 | return; |
65 | if (desc->istate & IRQS_REPLAY) | 65 | if (desc->istate & IRQS_REPLAY) |
66 | return; | 66 | return; |
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index da5acb446b1c..2201f2aaa9a0 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h | |||
@@ -5,6 +5,7 @@ | |||
5 | enum { | 5 | enum { |
6 | _IRQ_DEFAULT_INIT_FLAGS = IRQ_DEFAULT_INIT_FLAGS, | 6 | _IRQ_DEFAULT_INIT_FLAGS = IRQ_DEFAULT_INIT_FLAGS, |
7 | _IRQ_PER_CPU = IRQ_PER_CPU, | 7 | _IRQ_PER_CPU = IRQ_PER_CPU, |
8 | _IRQ_LEVEL = IRQ_LEVEL, | ||
8 | _IRQ_NO_BALANCING = IRQ_NO_BALANCING, | 9 | _IRQ_NO_BALANCING = IRQ_NO_BALANCING, |
9 | _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, | 10 | _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, |
10 | }; | 11 | }; |
@@ -31,6 +32,8 @@ enum { | |||
31 | #define IRQ_NO_BALANCING GOT_YOU_MORON | 32 | #define IRQ_NO_BALANCING GOT_YOU_MORON |
32 | #undef IRQ_AFFINITY_SET | 33 | #undef IRQ_AFFINITY_SET |
33 | #define IRQ_AFFINITY_SET GOT_YOU_MORON | 34 | #define IRQ_AFFINITY_SET GOT_YOU_MORON |
35 | #undef IRQ_LEVEL | ||
36 | #define IRQ_LEVEL GOT_YOU_MORON | ||
34 | #undef IRQF_MODIFY_MASK | 37 | #undef IRQF_MODIFY_MASK |
35 | #define IRQF_MODIFY_MASK GOT_YOU_MORON | 38 | #define IRQF_MODIFY_MASK GOT_YOU_MORON |
36 | 39 | ||
@@ -60,3 +63,30 @@ static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc) | |||
60 | { | 63 | { |
61 | return desc->status & _IRQ_NO_BALANCING; | 64 | return desc->status & _IRQ_NO_BALANCING; |
62 | } | 65 | } |
66 | |||
67 | static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc) | ||
68 | { | ||
69 | return desc->status & IRQ_TYPE_SENSE_MASK; | ||
70 | } | ||
71 | |||
72 | static inline void | ||
73 | irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) | ||
74 | { | ||
75 | desc->status &= ~IRQ_TYPE_SENSE_MASK; | ||
76 | desc->status |= mask & IRQ_TYPE_SENSE_MASK; | ||
77 | } | ||
78 | |||
79 | static inline bool irq_settings_is_level(struct irq_desc *desc) | ||
80 | { | ||
81 | return desc->status & _IRQ_LEVEL; | ||
82 | } | ||
83 | |||
84 | static inline void irq_settings_clr_level(struct irq_desc *desc) | ||
85 | { | ||
86 | desc->status &= ~_IRQ_LEVEL; | ||
87 | } | ||
88 | |||
89 | static inline void irq_settings_set_level(struct irq_desc *desc) | ||
90 | { | ||
91 | desc->status |= _IRQ_LEVEL; | ||
92 | } | ||