diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/dmar.h | 10 | ||||
-rw-r--r-- | include/linux/htirq.h | 5 | ||||
-rw-r--r-- | include/linux/interrupt.h | 3 | ||||
-rw-r--r-- | include/linux/irq.h | 447 | ||||
-rw-r--r-- | include/linux/irqdesc.h | 159 | ||||
-rw-r--r-- | include/linux/irqnr.h | 5 | ||||
-rw-r--r-- | include/linux/lockdep.h | 8 | ||||
-rw-r--r-- | include/linux/msi.h | 13 |
8 files changed, 349 insertions, 301 deletions
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index d7cecc90ed34..51651b76d40f 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
@@ -106,6 +106,7 @@ struct irte { | |||
106 | __u64 high; | 106 | __u64 high; |
107 | }; | 107 | }; |
108 | }; | 108 | }; |
109 | |||
109 | #ifdef CONFIG_INTR_REMAP | 110 | #ifdef CONFIG_INTR_REMAP |
110 | extern int intr_remapping_enabled; | 111 | extern int intr_remapping_enabled; |
111 | extern int intr_remapping_supported(void); | 112 | extern int intr_remapping_supported(void); |
@@ -119,11 +120,8 @@ extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); | |||
119 | extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, | 120 | extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, |
120 | u16 sub_handle); | 121 | u16 sub_handle); |
121 | extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); | 122 | extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); |
122 | extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index); | ||
123 | extern int flush_irte(int irq); | ||
124 | extern int free_irte(int irq); | 123 | extern int free_irte(int irq); |
125 | 124 | ||
126 | extern int irq_remapped(int irq); | ||
127 | extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); | 125 | extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); |
128 | extern struct intel_iommu *map_ioapic_to_ir(int apic); | 126 | extern struct intel_iommu *map_ioapic_to_ir(int apic); |
129 | extern struct intel_iommu *map_hpet_to_ir(u8 id); | 127 | extern struct intel_iommu *map_hpet_to_ir(u8 id); |
@@ -177,7 +175,6 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) | |||
177 | return 0; | 175 | return 0; |
178 | } | 176 | } |
179 | 177 | ||
180 | #define irq_remapped(irq) (0) | ||
181 | #define enable_intr_remapping(mode) (-1) | 178 | #define enable_intr_remapping(mode) (-1) |
182 | #define disable_intr_remapping() (0) | 179 | #define disable_intr_remapping() (0) |
183 | #define reenable_intr_remapping(mode) (0) | 180 | #define reenable_intr_remapping(mode) (0) |
@@ -187,8 +184,9 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) | |||
187 | /* Can't use the common MSI interrupt functions | 184 | /* Can't use the common MSI interrupt functions |
188 | * since DMAR is not a pci device | 185 | * since DMAR is not a pci device |
189 | */ | 186 | */ |
190 | extern void dmar_msi_unmask(unsigned int irq); | 187 | struct irq_data; |
191 | extern void dmar_msi_mask(unsigned int irq); | 188 | extern void dmar_msi_unmask(struct irq_data *data); |
189 | extern void dmar_msi_mask(struct irq_data *data); | ||
192 | extern void dmar_msi_read(int irq, struct msi_msg *msg); | 190 | extern void dmar_msi_read(int irq, struct msi_msg *msg); |
193 | extern void dmar_msi_write(int irq, struct msi_msg *msg); | 191 | extern void dmar_msi_write(int irq, struct msi_msg *msg); |
194 | extern int dmar_set_interrupt(struct intel_iommu *iommu); | 192 | extern int dmar_set_interrupt(struct intel_iommu *iommu); |
diff --git a/include/linux/htirq.h b/include/linux/htirq.h index c96ea46737d0..70a1dbbf2093 100644 --- a/include/linux/htirq.h +++ b/include/linux/htirq.h | |||
@@ -9,8 +9,9 @@ struct ht_irq_msg { | |||
9 | /* Helper functions.. */ | 9 | /* Helper functions.. */ |
10 | void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); | 10 | void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); |
11 | void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); | 11 | void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); |
12 | void mask_ht_irq(unsigned int irq); | 12 | struct irq_data; |
13 | void unmask_ht_irq(unsigned int irq); | 13 | void mask_ht_irq(struct irq_data *data); |
14 | void unmask_ht_irq(struct irq_data *data); | ||
14 | 15 | ||
15 | /* The arch hook for getting things started */ | 16 | /* The arch hook for getting things started */ |
16 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev); | 17 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev); |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 531495db1708..414328577ced 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -647,11 +647,8 @@ static inline void init_irq_proc(void) | |||
647 | struct seq_file; | 647 | struct seq_file; |
648 | int show_interrupts(struct seq_file *p, void *v); | 648 | int show_interrupts(struct seq_file *p, void *v); |
649 | 649 | ||
650 | struct irq_desc; | ||
651 | |||
652 | extern int early_irq_init(void); | 650 | extern int early_irq_init(void); |
653 | extern int arch_probe_nr_irqs(void); | 651 | extern int arch_probe_nr_irqs(void); |
654 | extern int arch_early_irq_init(void); | 652 | extern int arch_early_irq_init(void); |
655 | extern int arch_init_chip_data(struct irq_desc *desc, int node); | ||
656 | 653 | ||
657 | #endif | 654 | #endif |
diff --git a/include/linux/irq.h b/include/linux/irq.h index c03243ad84b4..e9639115dff1 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -72,6 +72,10 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
72 | #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ | 72 | #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ |
73 | #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ | 73 | #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ |
74 | 74 | ||
75 | #define IRQF_MODIFY_MASK \ | ||
76 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ | ||
77 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL) | ||
78 | |||
75 | #ifdef CONFIG_IRQ_PER_CPU | 79 | #ifdef CONFIG_IRQ_PER_CPU |
76 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | 80 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) |
77 | # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) | 81 | # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
@@ -80,36 +84,77 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
80 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING | 84 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING |
81 | #endif | 85 | #endif |
82 | 86 | ||
83 | struct proc_dir_entry; | ||
84 | struct msi_desc; | 87 | struct msi_desc; |
85 | 88 | ||
86 | /** | 89 | /** |
90 | * struct irq_data - per irq and irq chip data passed down to chip functions | ||
91 | * @irq: interrupt number | ||
92 | * @node: node index useful for balancing | ||
93 | * @chip: low level interrupt hardware access | ||
94 | * @handler_data: per-IRQ data for the irq_chip methods | ||
95 | * @chip_data: platform-specific per-chip private data for the chip | ||
96 | * methods, to allow shared chip implementations | ||
97 | * @msi_desc: MSI descriptor | ||
98 | * @affinity: IRQ affinity on SMP | ||
99 | * | ||
100 | * The fields here need to overlay the ones in irq_desc until we | ||
101 | * cleaned up the direct references and switched everything over to | ||
102 | * irq_data. | ||
103 | */ | ||
104 | struct irq_data { | ||
105 | unsigned int irq; | ||
106 | unsigned int node; | ||
107 | struct irq_chip *chip; | ||
108 | void *handler_data; | ||
109 | void *chip_data; | ||
110 | struct msi_desc *msi_desc; | ||
111 | #ifdef CONFIG_SMP | ||
112 | cpumask_var_t affinity; | ||
113 | #endif | ||
114 | }; | ||
115 | |||
116 | /** | ||
87 | * struct irq_chip - hardware interrupt chip descriptor | 117 | * struct irq_chip - hardware interrupt chip descriptor |
88 | * | 118 | * |
89 | * @name: name for /proc/interrupts | 119 | * @name: name for /proc/interrupts |
90 | * @startup: start up the interrupt (defaults to ->enable if NULL) | 120 | * @startup: deprecated, replaced by irq_startup |
91 | * @shutdown: shut down the interrupt (defaults to ->disable if NULL) | 121 | * @shutdown: deprecated, replaced by irq_shutdown |
92 | * @enable: enable the interrupt (defaults to chip->unmask if NULL) | 122 | * @enable: deprecated, replaced by irq_enable |
93 | * @disable: disable the interrupt | 123 | * @disable: deprecated, replaced by irq_disable |
94 | * @ack: start of a new interrupt | 124 | * @ack: deprecated, replaced by irq_ack |
95 | * @mask: mask an interrupt source | 125 | * @mask: deprecated, replaced by irq_mask |
96 | * @mask_ack: ack and mask an interrupt source | 126 | * @mask_ack: deprecated, replaced by irq_mask_ack |
97 | * @unmask: unmask an interrupt source | 127 | * @unmask: deprecated, replaced by irq_unmask |
98 | * @eoi: end of interrupt - chip level | 128 | * @eoi: deprecated, replaced by irq_eoi |
99 | * @end: end of interrupt - flow level | 129 | * @end: deprecated, will go away with __do_IRQ() |
100 | * @set_affinity: set the CPU affinity on SMP machines | 130 | * @set_affinity: deprecated, replaced by irq_set_affinity |
101 | * @retrigger: resend an IRQ to the CPU | 131 | * @retrigger: deprecated, replaced by irq_retrigger |
102 | * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ | 132 | * @set_type: deprecated, replaced by irq_set_type |
103 | * @set_wake: enable/disable power-management wake-on of an IRQ | 133 | * @set_wake: deprecated, replaced by irq_wake |
134 | * @bus_lock: deprecated, replaced by irq_bus_lock | ||
135 | * @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock | ||
104 | * | 136 | * |
105 | * @bus_lock: function to lock access to slow bus (i2c) chips | 137 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) |
106 | * @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips | 138 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) |
139 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) | ||
140 | * @irq_disable: disable the interrupt | ||
141 | * @irq_ack: start of a new interrupt | ||
142 | * @irq_mask: mask an interrupt source | ||
143 | * @irq_mask_ack: ack and mask an interrupt source | ||
144 | * @irq_unmask: unmask an interrupt source | ||
145 | * @irq_eoi: end of interrupt | ||
146 | * @irq_set_affinity: set the CPU affinity on SMP machines | ||
147 | * @irq_retrigger: resend an IRQ to the CPU | ||
148 | * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ | ||
149 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ | ||
150 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips | ||
151 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips | ||
107 | * | 152 | * |
108 | * @release: release function solely used by UML | 153 | * @release: release function solely used by UML |
109 | * @typename: obsoleted by name, kept as migration helper | ||
110 | */ | 154 | */ |
111 | struct irq_chip { | 155 | struct irq_chip { |
112 | const char *name; | 156 | const char *name; |
157 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
113 | unsigned int (*startup)(unsigned int irq); | 158 | unsigned int (*startup)(unsigned int irq); |
114 | void (*shutdown)(unsigned int irq); | 159 | void (*shutdown)(unsigned int irq); |
115 | void (*enable)(unsigned int irq); | 160 | void (*enable)(unsigned int irq); |
@@ -130,154 +175,66 @@ struct irq_chip { | |||
130 | 175 | ||
131 | void (*bus_lock)(unsigned int irq); | 176 | void (*bus_lock)(unsigned int irq); |
132 | void (*bus_sync_unlock)(unsigned int irq); | 177 | void (*bus_sync_unlock)(unsigned int irq); |
178 | #endif | ||
179 | unsigned int (*irq_startup)(struct irq_data *data); | ||
180 | void (*irq_shutdown)(struct irq_data *data); | ||
181 | void (*irq_enable)(struct irq_data *data); | ||
182 | void (*irq_disable)(struct irq_data *data); | ||
183 | |||
184 | void (*irq_ack)(struct irq_data *data); | ||
185 | void (*irq_mask)(struct irq_data *data); | ||
186 | void (*irq_mask_ack)(struct irq_data *data); | ||
187 | void (*irq_unmask)(struct irq_data *data); | ||
188 | void (*irq_eoi)(struct irq_data *data); | ||
189 | |||
190 | int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); | ||
191 | int (*irq_retrigger)(struct irq_data *data); | ||
192 | int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); | ||
193 | int (*irq_set_wake)(struct irq_data *data, unsigned int on); | ||
194 | |||
195 | void (*irq_bus_lock)(struct irq_data *data); | ||
196 | void (*irq_bus_sync_unlock)(struct irq_data *data); | ||
133 | 197 | ||
134 | /* Currently used only by UML, might disappear one day.*/ | 198 | /* Currently used only by UML, might disappear one day.*/ |
135 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 199 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
136 | void (*release)(unsigned int irq, void *dev_id); | 200 | void (*release)(unsigned int irq, void *dev_id); |
137 | #endif | 201 | #endif |
138 | /* | ||
139 | * For compatibility, ->typename is copied into ->name. | ||
140 | * Will disappear. | ||
141 | */ | ||
142 | const char *typename; | ||
143 | }; | 202 | }; |
144 | 203 | ||
145 | struct timer_rand_state; | 204 | /* This include will go away once we isolated irq_desc usage to core code */ |
146 | struct irq_2_iommu; | 205 | #include <linux/irqdesc.h> |
147 | /** | ||
148 | * struct irq_desc - interrupt descriptor | ||
149 | * @irq: interrupt number for this descriptor | ||
150 | * @timer_rand_state: pointer to timer rand state struct | ||
151 | * @kstat_irqs: irq stats per cpu | ||
152 | * @irq_2_iommu: iommu with this irq | ||
153 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | ||
154 | * @chip: low level interrupt hardware access | ||
155 | * @msi_desc: MSI descriptor | ||
156 | * @handler_data: per-IRQ data for the irq_chip methods | ||
157 | * @chip_data: platform-specific per-chip private data for the chip | ||
158 | * methods, to allow shared chip implementations | ||
159 | * @action: the irq action chain | ||
160 | * @status: status information | ||
161 | * @depth: disable-depth, for nested irq_disable() calls | ||
162 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | ||
163 | * @irq_count: stats field to detect stalled irqs | ||
164 | * @last_unhandled: aging timer for unhandled count | ||
165 | * @irqs_unhandled: stats field for spurious unhandled interrupts | ||
166 | * @lock: locking for SMP | ||
167 | * @affinity: IRQ affinity on SMP | ||
168 | * @node: node index useful for balancing | ||
169 | * @pending_mask: pending rebalanced interrupts | ||
170 | * @threads_active: number of irqaction threads currently running | ||
171 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | ||
172 | * @dir: /proc/irq/ procfs entry | ||
173 | * @name: flow handler name for /proc/interrupts output | ||
174 | */ | ||
175 | struct irq_desc { | ||
176 | unsigned int irq; | ||
177 | struct timer_rand_state *timer_rand_state; | ||
178 | unsigned int *kstat_irqs; | ||
179 | #ifdef CONFIG_INTR_REMAP | ||
180 | struct irq_2_iommu *irq_2_iommu; | ||
181 | #endif | ||
182 | irq_flow_handler_t handle_irq; | ||
183 | struct irq_chip *chip; | ||
184 | struct msi_desc *msi_desc; | ||
185 | void *handler_data; | ||
186 | void *chip_data; | ||
187 | struct irqaction *action; /* IRQ action list */ | ||
188 | unsigned int status; /* IRQ status */ | ||
189 | |||
190 | unsigned int depth; /* nested irq disables */ | ||
191 | unsigned int wake_depth; /* nested wake enables */ | ||
192 | unsigned int irq_count; /* For detecting broken IRQs */ | ||
193 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | ||
194 | unsigned int irqs_unhandled; | ||
195 | raw_spinlock_t lock; | ||
196 | #ifdef CONFIG_SMP | ||
197 | cpumask_var_t affinity; | ||
198 | const struct cpumask *affinity_hint; | ||
199 | unsigned int node; | ||
200 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
201 | cpumask_var_t pending_mask; | ||
202 | #endif | ||
203 | #endif | ||
204 | atomic_t threads_active; | ||
205 | wait_queue_head_t wait_for_threads; | ||
206 | #ifdef CONFIG_PROC_FS | ||
207 | struct proc_dir_entry *dir; | ||
208 | #endif | ||
209 | const char *name; | ||
210 | } ____cacheline_internodealigned_in_smp; | ||
211 | 206 | ||
212 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, | 207 | /* |
213 | struct irq_desc *desc, int node); | 208 | * Pick up the arch-dependent methods: |
214 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); | 209 | */ |
210 | #include <asm/hw_irq.h> | ||
215 | 211 | ||
216 | #ifndef CONFIG_SPARSE_IRQ | 212 | #ifndef NR_IRQS_LEGACY |
217 | extern struct irq_desc irq_desc[NR_IRQS]; | 213 | # define NR_IRQS_LEGACY 0 |
218 | #endif | 214 | #endif |
219 | 215 | ||
220 | #ifdef CONFIG_NUMA_IRQ_DESC | 216 | #ifndef ARCH_IRQ_INIT_FLAGS |
221 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); | 217 | # define ARCH_IRQ_INIT_FLAGS 0 |
222 | #else | ||
223 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | ||
224 | { | ||
225 | return desc; | ||
226 | } | ||
227 | #endif | 218 | #endif |
228 | 219 | ||
229 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | 220 | #define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS) |
230 | |||
231 | /* | ||
232 | * Pick up the arch-dependent methods: | ||
233 | */ | ||
234 | #include <asm/hw_irq.h> | ||
235 | 221 | ||
222 | struct irqaction; | ||
236 | extern int setup_irq(unsigned int irq, struct irqaction *new); | 223 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
237 | extern void remove_irq(unsigned int irq, struct irqaction *act); | 224 | extern void remove_irq(unsigned int irq, struct irqaction *act); |
238 | 225 | ||
239 | #ifdef CONFIG_GENERIC_HARDIRQS | 226 | #ifdef CONFIG_GENERIC_HARDIRQS |
240 | 227 | ||
241 | #ifdef CONFIG_SMP | 228 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) |
242 | |||
243 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
244 | |||
245 | void move_native_irq(int irq); | 229 | void move_native_irq(int irq); |
246 | void move_masked_irq(int irq); | 230 | void move_masked_irq(int irq); |
247 | 231 | #else | |
248 | #else /* CONFIG_GENERIC_PENDING_IRQ */ | 232 | static inline void move_native_irq(int irq) { } |
249 | 233 | static inline void move_masked_irq(int irq) { } | |
250 | static inline void move_irq(int irq) | 234 | #endif |
251 | { | ||
252 | } | ||
253 | |||
254 | static inline void move_native_irq(int irq) | ||
255 | { | ||
256 | } | ||
257 | |||
258 | static inline void move_masked_irq(int irq) | ||
259 | { | ||
260 | } | ||
261 | |||
262 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ | ||
263 | |||
264 | #else /* CONFIG_SMP */ | ||
265 | |||
266 | #define move_native_irq(x) | ||
267 | #define move_masked_irq(x) | ||
268 | |||
269 | #endif /* CONFIG_SMP */ | ||
270 | 235 | ||
271 | extern int no_irq_affinity; | 236 | extern int no_irq_affinity; |
272 | 237 | ||
273 | static inline int irq_balancing_disabled(unsigned int irq) | ||
274 | { | ||
275 | struct irq_desc *desc; | ||
276 | |||
277 | desc = irq_to_desc(irq); | ||
278 | return desc->status & IRQ_NO_BALANCING_MASK; | ||
279 | } | ||
280 | |||
281 | /* Handle irq action chains: */ | 238 | /* Handle irq action chains: */ |
282 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); | 239 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); |
283 | 240 | ||
@@ -293,42 +250,10 @@ extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | |||
293 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 250 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
294 | extern void handle_nested_irq(unsigned int irq); | 251 | extern void handle_nested_irq(unsigned int irq); |
295 | 252 | ||
296 | /* | ||
297 | * Monolithic do_IRQ implementation. | ||
298 | */ | ||
299 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
300 | extern unsigned int __do_IRQ(unsigned int irq); | ||
301 | #endif | ||
302 | |||
303 | /* | ||
304 | * Architectures call this to let the generic IRQ layer | ||
305 | * handle an interrupt. If the descriptor is attached to an | ||
306 | * irqchip-style controller then we call the ->handle_irq() handler, | ||
307 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | ||
308 | */ | ||
309 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
310 | { | ||
311 | #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
312 | desc->handle_irq(irq, desc); | ||
313 | #else | ||
314 | if (likely(desc->handle_irq)) | ||
315 | desc->handle_irq(irq, desc); | ||
316 | else | ||
317 | __do_IRQ(irq); | ||
318 | #endif | ||
319 | } | ||
320 | |||
321 | static inline void generic_handle_irq(unsigned int irq) | ||
322 | { | ||
323 | generic_handle_irq_desc(irq, irq_to_desc(irq)); | ||
324 | } | ||
325 | |||
326 | /* Handling of unhandled and spurious interrupts: */ | 253 | /* Handling of unhandled and spurious interrupts: */ |
327 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, | 254 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, |
328 | irqreturn_t action_ret); | 255 | irqreturn_t action_ret); |
329 | 256 | ||
330 | /* Resending of interrupts :*/ | ||
331 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | ||
332 | 257 | ||
333 | /* Enable/disable irq debugging output: */ | 258 | /* Enable/disable irq debugging output: */ |
334 | extern int noirqdebug_setup(char *str); | 259 | extern int noirqdebug_setup(char *str); |
@@ -351,16 +276,6 @@ extern void | |||
351 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 276 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
352 | const char *name); | 277 | const char *name); |
353 | 278 | ||
354 | /* caller has locked the irq_desc and both params are valid */ | ||
355 | static inline void __set_irq_handler_unlocked(int irq, | ||
356 | irq_flow_handler_t handler) | ||
357 | { | ||
358 | struct irq_desc *desc; | ||
359 | |||
360 | desc = irq_to_desc(irq); | ||
361 | desc->handle_irq = handler; | ||
362 | } | ||
363 | |||
364 | /* | 279 | /* |
365 | * Set a highlevel flow handler for a given IRQ: | 280 | * Set a highlevel flow handler for a given IRQ: |
366 | */ | 281 | */ |
@@ -384,141 +299,121 @@ set_irq_chained_handler(unsigned int irq, | |||
384 | 299 | ||
385 | extern void set_irq_nested_thread(unsigned int irq, int nest); | 300 | extern void set_irq_nested_thread(unsigned int irq, int nest); |
386 | 301 | ||
387 | extern void set_irq_noprobe(unsigned int irq); | 302 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); |
388 | extern void set_irq_probe(unsigned int irq); | 303 | |
304 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) | ||
305 | { | ||
306 | irq_modify_status(irq, 0, set); | ||
307 | } | ||
308 | |||
309 | static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) | ||
310 | { | ||
311 | irq_modify_status(irq, clr, 0); | ||
312 | } | ||
313 | |||
314 | static inline void set_irq_noprobe(unsigned int irq) | ||
315 | { | ||
316 | irq_modify_status(irq, 0, IRQ_NOPROBE); | ||
317 | } | ||
318 | |||
319 | static inline void set_irq_probe(unsigned int irq) | ||
320 | { | ||
321 | irq_modify_status(irq, IRQ_NOPROBE, 0); | ||
322 | } | ||
389 | 323 | ||
390 | /* Handle dynamic irq creation and destruction */ | 324 | /* Handle dynamic irq creation and destruction */ |
391 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); | 325 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); |
392 | extern int create_irq(void); | 326 | extern int create_irq(void); |
393 | extern void destroy_irq(unsigned int irq); | 327 | extern void destroy_irq(unsigned int irq); |
394 | 328 | ||
395 | /* Test to see if a driver has successfully requested an irq */ | 329 | /* |
396 | static inline int irq_has_action(unsigned int irq) | 330 | * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and |
331 | * irq_free_desc instead. | ||
332 | */ | ||
333 | extern void dynamic_irq_cleanup(unsigned int irq); | ||
334 | static inline void dynamic_irq_init(unsigned int irq) | ||
397 | { | 335 | { |
398 | struct irq_desc *desc = irq_to_desc(irq); | 336 | dynamic_irq_cleanup(irq); |
399 | return desc->action != NULL; | ||
400 | } | 337 | } |
401 | 338 | ||
402 | /* Dynamic irq helper functions */ | ||
403 | extern void dynamic_irq_init(unsigned int irq); | ||
404 | void dynamic_irq_init_keep_chip_data(unsigned int irq); | ||
405 | extern void dynamic_irq_cleanup(unsigned int irq); | ||
406 | void dynamic_irq_cleanup_keep_chip_data(unsigned int irq); | ||
407 | |||
408 | /* Set/get chip/data for an IRQ: */ | 339 | /* Set/get chip/data for an IRQ: */ |
409 | extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); | 340 | extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); |
410 | extern int set_irq_data(unsigned int irq, void *data); | 341 | extern int set_irq_data(unsigned int irq, void *data); |
411 | extern int set_irq_chip_data(unsigned int irq, void *data); | 342 | extern int set_irq_chip_data(unsigned int irq, void *data); |
412 | extern int set_irq_type(unsigned int irq, unsigned int type); | 343 | extern int set_irq_type(unsigned int irq, unsigned int type); |
413 | extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | 344 | extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); |
345 | extern struct irq_data *irq_get_irq_data(unsigned int irq); | ||
414 | 346 | ||
415 | #define get_irq_chip(irq) (irq_to_desc(irq)->chip) | 347 | static inline struct irq_chip *get_irq_chip(unsigned int irq) |
416 | #define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data) | ||
417 | #define get_irq_data(irq) (irq_to_desc(irq)->handler_data) | ||
418 | #define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) | ||
419 | |||
420 | #define get_irq_desc_chip(desc) ((desc)->chip) | ||
421 | #define get_irq_desc_chip_data(desc) ((desc)->chip_data) | ||
422 | #define get_irq_desc_data(desc) ((desc)->handler_data) | ||
423 | #define get_irq_desc_msi(desc) ((desc)->msi_desc) | ||
424 | |||
425 | #endif /* CONFIG_GENERIC_HARDIRQS */ | ||
426 | |||
427 | #endif /* !CONFIG_S390 */ | ||
428 | |||
429 | #ifdef CONFIG_SMP | ||
430 | /** | ||
431 | * alloc_desc_masks - allocate cpumasks for irq_desc | ||
432 | * @desc: pointer to irq_desc struct | ||
433 | * @node: node which will be handling the cpumasks | ||
434 | * @boot: true if need bootmem | ||
435 | * | ||
436 | * Allocates affinity and pending_mask cpumask if required. | ||
437 | * Returns true if successful (or not required). | ||
438 | */ | ||
439 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | ||
440 | bool boot) | ||
441 | { | 348 | { |
442 | gfp_t gfp = GFP_ATOMIC; | 349 | struct irq_data *d = irq_get_irq_data(irq); |
443 | 350 | return d ? d->chip : NULL; | |
444 | if (boot) | 351 | } |
445 | gfp = GFP_NOWAIT; | ||
446 | |||
447 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
448 | if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) | ||
449 | return false; | ||
450 | 352 | ||
451 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 353 | static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) |
452 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | 354 | { |
453 | free_cpumask_var(desc->affinity); | 355 | return d->chip; |
454 | return false; | ||
455 | } | ||
456 | #endif | ||
457 | #endif | ||
458 | return true; | ||
459 | } | 356 | } |
460 | 357 | ||
461 | static inline void init_desc_masks(struct irq_desc *desc) | 358 | static inline void *get_irq_chip_data(unsigned int irq) |
462 | { | 359 | { |
463 | cpumask_setall(desc->affinity); | 360 | struct irq_data *d = irq_get_irq_data(irq); |
464 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 361 | return d ? d->chip_data : NULL; |
465 | cpumask_clear(desc->pending_mask); | ||
466 | #endif | ||
467 | } | 362 | } |
468 | 363 | ||
469 | /** | 364 | static inline void *irq_data_get_irq_chip_data(struct irq_data *d) |
470 | * init_copy_desc_masks - copy cpumasks for irq_desc | 365 | { |
471 | * @old_desc: pointer to old irq_desc struct | 366 | return d->chip_data; |
472 | * @new_desc: pointer to new irq_desc struct | 367 | } |
473 | * | ||
474 | * Insures affinity and pending_masks are copied to new irq_desc. | ||
475 | * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the | ||
476 | * irq_desc struct so the copy is redundant. | ||
477 | */ | ||
478 | 368 | ||
479 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 369 | static inline void *get_irq_data(unsigned int irq) |
480 | struct irq_desc *new_desc) | ||
481 | { | 370 | { |
482 | #ifdef CONFIG_CPUMASK_OFFSTACK | 371 | struct irq_data *d = irq_get_irq_data(irq); |
483 | cpumask_copy(new_desc->affinity, old_desc->affinity); | 372 | return d ? d->handler_data : NULL; |
373 | } | ||
484 | 374 | ||
485 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 375 | static inline void *irq_data_get_irq_data(struct irq_data *d) |
486 | cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); | 376 | { |
487 | #endif | 377 | return d->handler_data; |
488 | #endif | ||
489 | } | 378 | } |
490 | 379 | ||
491 | static inline void free_desc_masks(struct irq_desc *old_desc, | 380 | static inline struct msi_desc *get_irq_msi(unsigned int irq) |
492 | struct irq_desc *new_desc) | ||
493 | { | 381 | { |
494 | free_cpumask_var(old_desc->affinity); | 382 | struct irq_data *d = irq_get_irq_data(irq); |
383 | return d ? d->msi_desc : NULL; | ||
384 | } | ||
495 | 385 | ||
496 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 386 | static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) |
497 | free_cpumask_var(old_desc->pending_mask); | 387 | { |
498 | #endif | 388 | return d->msi_desc; |
499 | } | 389 | } |
500 | 390 | ||
501 | #else /* !CONFIG_SMP */ | 391 | int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); |
392 | void irq_free_descs(unsigned int irq, unsigned int cnt); | ||
393 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); | ||
502 | 394 | ||
503 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | 395 | static inline int irq_alloc_desc(int node) |
504 | bool boot) | ||
505 | { | 396 | { |
506 | return true; | 397 | return irq_alloc_descs(-1, 0, 1, node); |
507 | } | 398 | } |
508 | 399 | ||
509 | static inline void init_desc_masks(struct irq_desc *desc) | 400 | static inline int irq_alloc_desc_at(unsigned int at, int node) |
510 | { | 401 | { |
402 | return irq_alloc_descs(at, at, 1, node); | ||
511 | } | 403 | } |
512 | 404 | ||
513 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 405 | static inline int irq_alloc_desc_from(unsigned int from, int node) |
514 | struct irq_desc *new_desc) | ||
515 | { | 406 | { |
407 | return irq_alloc_descs(-1, from, 1, node); | ||
516 | } | 408 | } |
517 | 409 | ||
518 | static inline void free_desc_masks(struct irq_desc *old_desc, | 410 | static inline void irq_free_desc(unsigned int irq) |
519 | struct irq_desc *new_desc) | ||
520 | { | 411 | { |
412 | irq_free_descs(irq, 1); | ||
521 | } | 413 | } |
522 | #endif /* CONFIG_SMP */ | 414 | |
415 | #endif /* CONFIG_GENERIC_HARDIRQS */ | ||
416 | |||
417 | #endif /* !CONFIG_S390 */ | ||
523 | 418 | ||
524 | #endif /* _LINUX_IRQ_H */ | 419 | #endif /* _LINUX_IRQ_H */ |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h new file mode 100644 index 000000000000..979c68cc7458 --- /dev/null +++ b/include/linux/irqdesc.h | |||
@@ -0,0 +1,159 @@ | |||
1 | #ifndef _LINUX_IRQDESC_H | ||
2 | #define _LINUX_IRQDESC_H | ||
3 | |||
4 | /* | ||
5 | * Core internal functions to deal with irq descriptors | ||
6 | * | ||
7 | * This include will move to kernel/irq once we cleaned up the tree. | ||
8 | * For now it's included from <linux/irq.h> | ||
9 | */ | ||
10 | |||
11 | struct proc_dir_entry; | ||
12 | struct timer_rand_state; | ||
13 | /** | ||
14 | * struct irq_desc - interrupt descriptor | ||
15 | * @irq_data: per irq and chip data passed down to chip functions | ||
16 | * @timer_rand_state: pointer to timer rand state struct | ||
17 | * @kstat_irqs: irq stats per cpu | ||
18 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | ||
19 | * @action: the irq action chain | ||
20 | * @status: status information | ||
21 | * @depth: disable-depth, for nested irq_disable() calls | ||
22 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | ||
23 | * @irq_count: stats field to detect stalled irqs | ||
24 | * @last_unhandled: aging timer for unhandled count | ||
25 | * @irqs_unhandled: stats field for spurious unhandled interrupts | ||
26 | * @lock: locking for SMP | ||
27 | * @pending_mask: pending rebalanced interrupts | ||
28 | * @threads_active: number of irqaction threads currently running | ||
29 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | ||
30 | * @dir: /proc/irq/ procfs entry | ||
31 | * @name: flow handler name for /proc/interrupts output | ||
32 | */ | ||
33 | struct irq_desc { | ||
34 | |||
35 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
36 | struct irq_data irq_data; | ||
37 | #else | ||
38 | /* | ||
39 | * This union will go away, once we fixed the direct access to | ||
40 | * irq_desc all over the place. The direct fields are a 1:1 | ||
41 | * overlay of irq_data. | ||
42 | */ | ||
43 | union { | ||
44 | struct irq_data irq_data; | ||
45 | struct { | ||
46 | unsigned int irq; | ||
47 | unsigned int node; | ||
48 | struct irq_chip *chip; | ||
49 | void *handler_data; | ||
50 | void *chip_data; | ||
51 | struct msi_desc *msi_desc; | ||
52 | #ifdef CONFIG_SMP | ||
53 | cpumask_var_t affinity; | ||
54 | #endif | ||
55 | }; | ||
56 | }; | ||
57 | #endif | ||
58 | |||
59 | struct timer_rand_state *timer_rand_state; | ||
60 | unsigned int *kstat_irqs; | ||
61 | irq_flow_handler_t handle_irq; | ||
62 | struct irqaction *action; /* IRQ action list */ | ||
63 | unsigned int status; /* IRQ status */ | ||
64 | |||
65 | unsigned int depth; /* nested irq disables */ | ||
66 | unsigned int wake_depth; /* nested wake enables */ | ||
67 | unsigned int irq_count; /* For detecting broken IRQs */ | ||
68 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | ||
69 | unsigned int irqs_unhandled; | ||
70 | raw_spinlock_t lock; | ||
71 | #ifdef CONFIG_SMP | ||
72 | const struct cpumask *affinity_hint; | ||
73 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
74 | cpumask_var_t pending_mask; | ||
75 | #endif | ||
76 | #endif | ||
77 | atomic_t threads_active; | ||
78 | wait_queue_head_t wait_for_threads; | ||
79 | #ifdef CONFIG_PROC_FS | ||
80 | struct proc_dir_entry *dir; | ||
81 | #endif | ||
82 | const char *name; | ||
83 | } ____cacheline_internodealigned_in_smp; | ||
84 | |||
85 | #ifndef CONFIG_SPARSE_IRQ | ||
86 | extern struct irq_desc irq_desc[NR_IRQS]; | ||
87 | #endif | ||
88 | |||
89 | /* Will be removed once the last users in power and sh are gone */ | ||
90 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | ||
91 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | ||
92 | { | ||
93 | return desc; | ||
94 | } | ||
95 | |||
96 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
97 | |||
98 | #define get_irq_desc_chip(desc) ((desc)->irq_data.chip) | ||
99 | #define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) | ||
100 | #define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) | ||
101 | #define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) | ||
102 | |||
103 | /* | ||
104 | * Monolithic do_IRQ implementation. | ||
105 | */ | ||
106 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
107 | extern unsigned int __do_IRQ(unsigned int irq); | ||
108 | #endif | ||
109 | |||
110 | /* | ||
111 | * Architectures call this to let the generic IRQ layer | ||
112 | * handle an interrupt. If the descriptor is attached to an | ||
113 | * irqchip-style controller then we call the ->handle_irq() handler, | ||
114 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | ||
115 | */ | ||
116 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
117 | { | ||
118 | #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
119 | desc->handle_irq(irq, desc); | ||
120 | #else | ||
121 | if (likely(desc->handle_irq)) | ||
122 | desc->handle_irq(irq, desc); | ||
123 | else | ||
124 | __do_IRQ(irq); | ||
125 | #endif | ||
126 | } | ||
127 | |||
128 | static inline void generic_handle_irq(unsigned int irq) | ||
129 | { | ||
130 | generic_handle_irq_desc(irq, irq_to_desc(irq)); | ||
131 | } | ||
132 | |||
133 | /* Test to see if a driver has successfully requested an irq */ | ||
134 | static inline int irq_has_action(unsigned int irq) | ||
135 | { | ||
136 | struct irq_desc *desc = irq_to_desc(irq); | ||
137 | return desc->action != NULL; | ||
138 | } | ||
139 | |||
140 | static inline int irq_balancing_disabled(unsigned int irq) | ||
141 | { | ||
142 | struct irq_desc *desc; | ||
143 | |||
144 | desc = irq_to_desc(irq); | ||
145 | return desc->status & IRQ_NO_BALANCING_MASK; | ||
146 | } | ||
147 | |||
148 | /* caller has locked the irq_desc and both params are valid */ | ||
149 | static inline void __set_irq_handler_unlocked(int irq, | ||
150 | irq_flow_handler_t handler) | ||
151 | { | ||
152 | struct irq_desc *desc; | ||
153 | |||
154 | desc = irq_to_desc(irq); | ||
155 | desc->handle_irq = handler; | ||
156 | } | ||
157 | #endif | ||
158 | |||
159 | #endif | ||
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 7bf89bc8cbca..05aa8c23483f 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h | |||
@@ -25,6 +25,7 @@ | |||
25 | 25 | ||
26 | extern int nr_irqs; | 26 | extern int nr_irqs; |
27 | extern struct irq_desc *irq_to_desc(unsigned int irq); | 27 | extern struct irq_desc *irq_to_desc(unsigned int irq); |
28 | unsigned int irq_get_next_irq(unsigned int offset); | ||
28 | 29 | ||
29 | # define for_each_irq_desc(irq, desc) \ | 30 | # define for_each_irq_desc(irq, desc) \ |
30 | for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ | 31 | for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ |
@@ -47,6 +48,10 @@ extern struct irq_desc *irq_to_desc(unsigned int irq); | |||
47 | #define irq_node(irq) 0 | 48 | #define irq_node(irq) 0 |
48 | #endif | 49 | #endif |
49 | 50 | ||
51 | # define for_each_active_irq(irq) \ | ||
52 | for (irq = irq_get_next_irq(0); irq < nr_irqs; \ | ||
53 | irq = irq_get_next_irq(irq + 1)) | ||
54 | |||
50 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 55 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
51 | 56 | ||
52 | #define for_each_irq_nr(irq) \ | 57 | #define for_each_irq_nr(irq) \ |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 2186a64ee4b5..71c09b26c759 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -435,14 +435,6 @@ do { \ | |||
435 | 435 | ||
436 | #endif /* CONFIG_LOCKDEP */ | 436 | #endif /* CONFIG_LOCKDEP */ |
437 | 437 | ||
438 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
439 | extern void early_init_irq_lock_class(void); | ||
440 | #else | ||
441 | static inline void early_init_irq_lock_class(void) | ||
442 | { | ||
443 | } | ||
444 | #endif | ||
445 | |||
446 | #ifdef CONFIG_TRACE_IRQFLAGS | 438 | #ifdef CONFIG_TRACE_IRQFLAGS |
447 | extern void early_boot_irqs_off(void); | 439 | extern void early_boot_irqs_off(void); |
448 | extern void early_boot_irqs_on(void); | 440 | extern void early_boot_irqs_on(void); |
diff --git a/include/linux/msi.h b/include/linux/msi.h index 91b05c171854..05acced439a3 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
@@ -10,12 +10,13 @@ struct msi_msg { | |||
10 | }; | 10 | }; |
11 | 11 | ||
12 | /* Helper functions */ | 12 | /* Helper functions */ |
13 | struct irq_desc; | 13 | struct irq_data; |
14 | extern void mask_msi_irq(unsigned int irq); | 14 | struct msi_desc; |
15 | extern void unmask_msi_irq(unsigned int irq); | 15 | extern void mask_msi_irq(struct irq_data *data); |
16 | extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | 16 | extern void unmask_msi_irq(struct irq_data *data); |
17 | extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | 17 | extern void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
18 | extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | 18 | extern void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
19 | extern void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); | ||
19 | extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); | 20 | extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); |
20 | extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); | 21 | extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); |
21 | extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); | 22 | extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); |