diff options
Diffstat (limited to 'include/linux/irq.h')
| -rw-r--r-- | include/linux/irq.h | 447 |
1 files changed, 171 insertions, 276 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h index c03243ad84b4..e9639115dff1 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -72,6 +72,10 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
| 72 | #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ | 72 | #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ |
| 73 | #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ | 73 | #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ |
| 74 | 74 | ||
| 75 | #define IRQF_MODIFY_MASK \ | ||
| 76 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ | ||
| 77 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL) | ||
| 78 | |||
| 75 | #ifdef CONFIG_IRQ_PER_CPU | 79 | #ifdef CONFIG_IRQ_PER_CPU |
| 76 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | 80 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) |
| 77 | # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) | 81 | # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
| @@ -80,36 +84,77 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
| 80 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING | 84 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING |
| 81 | #endif | 85 | #endif |
| 82 | 86 | ||
| 83 | struct proc_dir_entry; | ||
| 84 | struct msi_desc; | 87 | struct msi_desc; |
| 85 | 88 | ||
| 86 | /** | 89 | /** |
| 90 | * struct irq_data - per irq and irq chip data passed down to chip functions | ||
| 91 | * @irq: interrupt number | ||
| 92 | * @node: node index useful for balancing | ||
| 93 | * @chip: low level interrupt hardware access | ||
| 94 | * @handler_data: per-IRQ data for the irq_chip methods | ||
| 95 | * @chip_data: platform-specific per-chip private data for the chip | ||
| 96 | * methods, to allow shared chip implementations | ||
| 97 | * @msi_desc: MSI descriptor | ||
| 98 | * @affinity: IRQ affinity on SMP | ||
| 99 | * | ||
| 100 | * The fields here need to overlay the ones in irq_desc until we | ||
| 101 | * cleaned up the direct references and switched everything over to | ||
| 102 | * irq_data. | ||
| 103 | */ | ||
| 104 | struct irq_data { | ||
| 105 | unsigned int irq; | ||
| 106 | unsigned int node; | ||
| 107 | struct irq_chip *chip; | ||
| 108 | void *handler_data; | ||
| 109 | void *chip_data; | ||
| 110 | struct msi_desc *msi_desc; | ||
| 111 | #ifdef CONFIG_SMP | ||
| 112 | cpumask_var_t affinity; | ||
| 113 | #endif | ||
| 114 | }; | ||
| 115 | |||
| 116 | /** | ||
| 87 | * struct irq_chip - hardware interrupt chip descriptor | 117 | * struct irq_chip - hardware interrupt chip descriptor |
| 88 | * | 118 | * |
| 89 | * @name: name for /proc/interrupts | 119 | * @name: name for /proc/interrupts |
| 90 | * @startup: start up the interrupt (defaults to ->enable if NULL) | 120 | * @startup: deprecated, replaced by irq_startup |
| 91 | * @shutdown: shut down the interrupt (defaults to ->disable if NULL) | 121 | * @shutdown: deprecated, replaced by irq_shutdown |
| 92 | * @enable: enable the interrupt (defaults to chip->unmask if NULL) | 122 | * @enable: deprecated, replaced by irq_enable |
| 93 | * @disable: disable the interrupt | 123 | * @disable: deprecated, replaced by irq_disable |
| 94 | * @ack: start of a new interrupt | 124 | * @ack: deprecated, replaced by irq_ack |
| 95 | * @mask: mask an interrupt source | 125 | * @mask: deprecated, replaced by irq_mask |
| 96 | * @mask_ack: ack and mask an interrupt source | 126 | * @mask_ack: deprecated, replaced by irq_mask_ack |
| 97 | * @unmask: unmask an interrupt source | 127 | * @unmask: deprecated, replaced by irq_unmask |
| 98 | * @eoi: end of interrupt - chip level | 128 | * @eoi: deprecated, replaced by irq_eoi |
| 99 | * @end: end of interrupt - flow level | 129 | * @end: deprecated, will go away with __do_IRQ() |
| 100 | * @set_affinity: set the CPU affinity on SMP machines | 130 | * @set_affinity: deprecated, replaced by irq_set_affinity |
| 101 | * @retrigger: resend an IRQ to the CPU | 131 | * @retrigger: deprecated, replaced by irq_retrigger |
| 102 | * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ | 132 | * @set_type: deprecated, replaced by irq_set_type |
| 103 | * @set_wake: enable/disable power-management wake-on of an IRQ | 133 | * @set_wake: deprecated, replaced by irq_wake |
| 134 | * @bus_lock: deprecated, replaced by irq_bus_lock | ||
| 135 | * @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock | ||
| 104 | * | 136 | * |
| 105 | * @bus_lock: function to lock access to slow bus (i2c) chips | 137 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) |
| 106 | * @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips | 138 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) |
| 139 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) | ||
| 140 | * @irq_disable: disable the interrupt | ||
| 141 | * @irq_ack: start of a new interrupt | ||
| 142 | * @irq_mask: mask an interrupt source | ||
| 143 | * @irq_mask_ack: ack and mask an interrupt source | ||
| 144 | * @irq_unmask: unmask an interrupt source | ||
| 145 | * @irq_eoi: end of interrupt | ||
| 146 | * @irq_set_affinity: set the CPU affinity on SMP machines | ||
| 147 | * @irq_retrigger: resend an IRQ to the CPU | ||
| 148 | * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ | ||
| 149 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ | ||
| 150 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips | ||
| 151 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips | ||
| 107 | * | 152 | * |
| 108 | * @release: release function solely used by UML | 153 | * @release: release function solely used by UML |
| 109 | * @typename: obsoleted by name, kept as migration helper | ||
| 110 | */ | 154 | */ |
| 111 | struct irq_chip { | 155 | struct irq_chip { |
| 112 | const char *name; | 156 | const char *name; |
| 157 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
| 113 | unsigned int (*startup)(unsigned int irq); | 158 | unsigned int (*startup)(unsigned int irq); |
| 114 | void (*shutdown)(unsigned int irq); | 159 | void (*shutdown)(unsigned int irq); |
| 115 | void (*enable)(unsigned int irq); | 160 | void (*enable)(unsigned int irq); |
| @@ -130,154 +175,66 @@ struct irq_chip { | |||
| 130 | 175 | ||
| 131 | void (*bus_lock)(unsigned int irq); | 176 | void (*bus_lock)(unsigned int irq); |
| 132 | void (*bus_sync_unlock)(unsigned int irq); | 177 | void (*bus_sync_unlock)(unsigned int irq); |
| 178 | #endif | ||
| 179 | unsigned int (*irq_startup)(struct irq_data *data); | ||
| 180 | void (*irq_shutdown)(struct irq_data *data); | ||
| 181 | void (*irq_enable)(struct irq_data *data); | ||
| 182 | void (*irq_disable)(struct irq_data *data); | ||
| 183 | |||
| 184 | void (*irq_ack)(struct irq_data *data); | ||
| 185 | void (*irq_mask)(struct irq_data *data); | ||
| 186 | void (*irq_mask_ack)(struct irq_data *data); | ||
| 187 | void (*irq_unmask)(struct irq_data *data); | ||
| 188 | void (*irq_eoi)(struct irq_data *data); | ||
| 189 | |||
| 190 | int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); | ||
| 191 | int (*irq_retrigger)(struct irq_data *data); | ||
| 192 | int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); | ||
| 193 | int (*irq_set_wake)(struct irq_data *data, unsigned int on); | ||
| 194 | |||
| 195 | void (*irq_bus_lock)(struct irq_data *data); | ||
| 196 | void (*irq_bus_sync_unlock)(struct irq_data *data); | ||
| 133 | 197 | ||
| 134 | /* Currently used only by UML, might disappear one day.*/ | 198 | /* Currently used only by UML, might disappear one day.*/ |
| 135 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 199 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
| 136 | void (*release)(unsigned int irq, void *dev_id); | 200 | void (*release)(unsigned int irq, void *dev_id); |
| 137 | #endif | 201 | #endif |
| 138 | /* | ||
| 139 | * For compatibility, ->typename is copied into ->name. | ||
| 140 | * Will disappear. | ||
| 141 | */ | ||
| 142 | const char *typename; | ||
| 143 | }; | 202 | }; |
| 144 | 203 | ||
| 145 | struct timer_rand_state; | 204 | /* This include will go away once we isolated irq_desc usage to core code */ |
| 146 | struct irq_2_iommu; | 205 | #include <linux/irqdesc.h> |
| 147 | /** | ||
| 148 | * struct irq_desc - interrupt descriptor | ||
| 149 | * @irq: interrupt number for this descriptor | ||
| 150 | * @timer_rand_state: pointer to timer rand state struct | ||
| 151 | * @kstat_irqs: irq stats per cpu | ||
| 152 | * @irq_2_iommu: iommu with this irq | ||
| 153 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | ||
| 154 | * @chip: low level interrupt hardware access | ||
| 155 | * @msi_desc: MSI descriptor | ||
| 156 | * @handler_data: per-IRQ data for the irq_chip methods | ||
| 157 | * @chip_data: platform-specific per-chip private data for the chip | ||
| 158 | * methods, to allow shared chip implementations | ||
| 159 | * @action: the irq action chain | ||
| 160 | * @status: status information | ||
| 161 | * @depth: disable-depth, for nested irq_disable() calls | ||
| 162 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | ||
| 163 | * @irq_count: stats field to detect stalled irqs | ||
| 164 | * @last_unhandled: aging timer for unhandled count | ||
| 165 | * @irqs_unhandled: stats field for spurious unhandled interrupts | ||
| 166 | * @lock: locking for SMP | ||
| 167 | * @affinity: IRQ affinity on SMP | ||
| 168 | * @node: node index useful for balancing | ||
| 169 | * @pending_mask: pending rebalanced interrupts | ||
| 170 | * @threads_active: number of irqaction threads currently running | ||
| 171 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | ||
| 172 | * @dir: /proc/irq/ procfs entry | ||
| 173 | * @name: flow handler name for /proc/interrupts output | ||
| 174 | */ | ||
| 175 | struct irq_desc { | ||
| 176 | unsigned int irq; | ||
| 177 | struct timer_rand_state *timer_rand_state; | ||
| 178 | unsigned int *kstat_irqs; | ||
| 179 | #ifdef CONFIG_INTR_REMAP | ||
| 180 | struct irq_2_iommu *irq_2_iommu; | ||
| 181 | #endif | ||
| 182 | irq_flow_handler_t handle_irq; | ||
| 183 | struct irq_chip *chip; | ||
| 184 | struct msi_desc *msi_desc; | ||
| 185 | void *handler_data; | ||
| 186 | void *chip_data; | ||
| 187 | struct irqaction *action; /* IRQ action list */ | ||
| 188 | unsigned int status; /* IRQ status */ | ||
| 189 | |||
| 190 | unsigned int depth; /* nested irq disables */ | ||
| 191 | unsigned int wake_depth; /* nested wake enables */ | ||
| 192 | unsigned int irq_count; /* For detecting broken IRQs */ | ||
| 193 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | ||
| 194 | unsigned int irqs_unhandled; | ||
| 195 | raw_spinlock_t lock; | ||
| 196 | #ifdef CONFIG_SMP | ||
| 197 | cpumask_var_t affinity; | ||
| 198 | const struct cpumask *affinity_hint; | ||
| 199 | unsigned int node; | ||
| 200 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
| 201 | cpumask_var_t pending_mask; | ||
| 202 | #endif | ||
| 203 | #endif | ||
| 204 | atomic_t threads_active; | ||
| 205 | wait_queue_head_t wait_for_threads; | ||
| 206 | #ifdef CONFIG_PROC_FS | ||
| 207 | struct proc_dir_entry *dir; | ||
| 208 | #endif | ||
| 209 | const char *name; | ||
| 210 | } ____cacheline_internodealigned_in_smp; | ||
| 211 | 206 | ||
| 212 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, | 207 | /* |
| 213 | struct irq_desc *desc, int node); | 208 | * Pick up the arch-dependent methods: |
| 214 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); | 209 | */ |
| 210 | #include <asm/hw_irq.h> | ||
| 215 | 211 | ||
| 216 | #ifndef CONFIG_SPARSE_IRQ | 212 | #ifndef NR_IRQS_LEGACY |
| 217 | extern struct irq_desc irq_desc[NR_IRQS]; | 213 | # define NR_IRQS_LEGACY 0 |
| 218 | #endif | 214 | #endif |
| 219 | 215 | ||
| 220 | #ifdef CONFIG_NUMA_IRQ_DESC | 216 | #ifndef ARCH_IRQ_INIT_FLAGS |
| 221 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); | 217 | # define ARCH_IRQ_INIT_FLAGS 0 |
| 222 | #else | ||
| 223 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | ||
| 224 | { | ||
| 225 | return desc; | ||
| 226 | } | ||
| 227 | #endif | 218 | #endif |
| 228 | 219 | ||
| 229 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | 220 | #define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS) |
| 230 | |||
| 231 | /* | ||
| 232 | * Pick up the arch-dependent methods: | ||
| 233 | */ | ||
| 234 | #include <asm/hw_irq.h> | ||
| 235 | 221 | ||
| 222 | struct irqaction; | ||
| 236 | extern int setup_irq(unsigned int irq, struct irqaction *new); | 223 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
| 237 | extern void remove_irq(unsigned int irq, struct irqaction *act); | 224 | extern void remove_irq(unsigned int irq, struct irqaction *act); |
| 238 | 225 | ||
| 239 | #ifdef CONFIG_GENERIC_HARDIRQS | 226 | #ifdef CONFIG_GENERIC_HARDIRQS |
| 240 | 227 | ||
| 241 | #ifdef CONFIG_SMP | 228 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) |
| 242 | |||
| 243 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
| 244 | |||
| 245 | void move_native_irq(int irq); | 229 | void move_native_irq(int irq); |
| 246 | void move_masked_irq(int irq); | 230 | void move_masked_irq(int irq); |
| 247 | 231 | #else | |
| 248 | #else /* CONFIG_GENERIC_PENDING_IRQ */ | 232 | static inline void move_native_irq(int irq) { } |
| 249 | 233 | static inline void move_masked_irq(int irq) { } | |
| 250 | static inline void move_irq(int irq) | 234 | #endif |
| 251 | { | ||
| 252 | } | ||
| 253 | |||
| 254 | static inline void move_native_irq(int irq) | ||
| 255 | { | ||
| 256 | } | ||
| 257 | |||
| 258 | static inline void move_masked_irq(int irq) | ||
| 259 | { | ||
| 260 | } | ||
| 261 | |||
| 262 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ | ||
| 263 | |||
| 264 | #else /* CONFIG_SMP */ | ||
| 265 | |||
| 266 | #define move_native_irq(x) | ||
| 267 | #define move_masked_irq(x) | ||
| 268 | |||
| 269 | #endif /* CONFIG_SMP */ | ||
| 270 | 235 | ||
| 271 | extern int no_irq_affinity; | 236 | extern int no_irq_affinity; |
| 272 | 237 | ||
| 273 | static inline int irq_balancing_disabled(unsigned int irq) | ||
| 274 | { | ||
| 275 | struct irq_desc *desc; | ||
| 276 | |||
| 277 | desc = irq_to_desc(irq); | ||
| 278 | return desc->status & IRQ_NO_BALANCING_MASK; | ||
| 279 | } | ||
| 280 | |||
| 281 | /* Handle irq action chains: */ | 238 | /* Handle irq action chains: */ |
| 282 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); | 239 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); |
| 283 | 240 | ||
| @@ -293,42 +250,10 @@ extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | |||
| 293 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 250 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
| 294 | extern void handle_nested_irq(unsigned int irq); | 251 | extern void handle_nested_irq(unsigned int irq); |
| 295 | 252 | ||
| 296 | /* | ||
| 297 | * Monolithic do_IRQ implementation. | ||
| 298 | */ | ||
| 299 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
| 300 | extern unsigned int __do_IRQ(unsigned int irq); | ||
| 301 | #endif | ||
| 302 | |||
| 303 | /* | ||
| 304 | * Architectures call this to let the generic IRQ layer | ||
| 305 | * handle an interrupt. If the descriptor is attached to an | ||
| 306 | * irqchip-style controller then we call the ->handle_irq() handler, | ||
| 307 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | ||
| 308 | */ | ||
| 309 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
| 310 | { | ||
| 311 | #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
| 312 | desc->handle_irq(irq, desc); | ||
| 313 | #else | ||
| 314 | if (likely(desc->handle_irq)) | ||
| 315 | desc->handle_irq(irq, desc); | ||
| 316 | else | ||
| 317 | __do_IRQ(irq); | ||
| 318 | #endif | ||
| 319 | } | ||
| 320 | |||
| 321 | static inline void generic_handle_irq(unsigned int irq) | ||
| 322 | { | ||
| 323 | generic_handle_irq_desc(irq, irq_to_desc(irq)); | ||
| 324 | } | ||
| 325 | |||
| 326 | /* Handling of unhandled and spurious interrupts: */ | 253 | /* Handling of unhandled and spurious interrupts: */ |
| 327 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, | 254 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, |
| 328 | irqreturn_t action_ret); | 255 | irqreturn_t action_ret); |
| 329 | 256 | ||
| 330 | /* Resending of interrupts :*/ | ||
| 331 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | ||
| 332 | 257 | ||
| 333 | /* Enable/disable irq debugging output: */ | 258 | /* Enable/disable irq debugging output: */ |
| 334 | extern int noirqdebug_setup(char *str); | 259 | extern int noirqdebug_setup(char *str); |
| @@ -351,16 +276,6 @@ extern void | |||
| 351 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 276 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
| 352 | const char *name); | 277 | const char *name); |
| 353 | 278 | ||
| 354 | /* caller has locked the irq_desc and both params are valid */ | ||
| 355 | static inline void __set_irq_handler_unlocked(int irq, | ||
| 356 | irq_flow_handler_t handler) | ||
| 357 | { | ||
| 358 | struct irq_desc *desc; | ||
| 359 | |||
| 360 | desc = irq_to_desc(irq); | ||
| 361 | desc->handle_irq = handler; | ||
| 362 | } | ||
| 363 | |||
| 364 | /* | 279 | /* |
| 365 | * Set a highlevel flow handler for a given IRQ: | 280 | * Set a highlevel flow handler for a given IRQ: |
| 366 | */ | 281 | */ |
| @@ -384,141 +299,121 @@ set_irq_chained_handler(unsigned int irq, | |||
| 384 | 299 | ||
| 385 | extern void set_irq_nested_thread(unsigned int irq, int nest); | 300 | extern void set_irq_nested_thread(unsigned int irq, int nest); |
| 386 | 301 | ||
| 387 | extern void set_irq_noprobe(unsigned int irq); | 302 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); |
| 388 | extern void set_irq_probe(unsigned int irq); | 303 | |
| 304 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) | ||
| 305 | { | ||
| 306 | irq_modify_status(irq, 0, set); | ||
| 307 | } | ||
| 308 | |||
| 309 | static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) | ||
| 310 | { | ||
| 311 | irq_modify_status(irq, clr, 0); | ||
| 312 | } | ||
| 313 | |||
| 314 | static inline void set_irq_noprobe(unsigned int irq) | ||
| 315 | { | ||
| 316 | irq_modify_status(irq, 0, IRQ_NOPROBE); | ||
| 317 | } | ||
| 318 | |||
| 319 | static inline void set_irq_probe(unsigned int irq) | ||
| 320 | { | ||
| 321 | irq_modify_status(irq, IRQ_NOPROBE, 0); | ||
| 322 | } | ||
| 389 | 323 | ||
| 390 | /* Handle dynamic irq creation and destruction */ | 324 | /* Handle dynamic irq creation and destruction */ |
| 391 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); | 325 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); |
| 392 | extern int create_irq(void); | 326 | extern int create_irq(void); |
| 393 | extern void destroy_irq(unsigned int irq); | 327 | extern void destroy_irq(unsigned int irq); |
| 394 | 328 | ||
| 395 | /* Test to see if a driver has successfully requested an irq */ | 329 | /* |
| 396 | static inline int irq_has_action(unsigned int irq) | 330 | * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and |
| 331 | * irq_free_desc instead. | ||
| 332 | */ | ||
| 333 | extern void dynamic_irq_cleanup(unsigned int irq); | ||
| 334 | static inline void dynamic_irq_init(unsigned int irq) | ||
| 397 | { | 335 | { |
| 398 | struct irq_desc *desc = irq_to_desc(irq); | 336 | dynamic_irq_cleanup(irq); |
| 399 | return desc->action != NULL; | ||
| 400 | } | 337 | } |
| 401 | 338 | ||
| 402 | /* Dynamic irq helper functions */ | ||
| 403 | extern void dynamic_irq_init(unsigned int irq); | ||
| 404 | void dynamic_irq_init_keep_chip_data(unsigned int irq); | ||
| 405 | extern void dynamic_irq_cleanup(unsigned int irq); | ||
| 406 | void dynamic_irq_cleanup_keep_chip_data(unsigned int irq); | ||
| 407 | |||
| 408 | /* Set/get chip/data for an IRQ: */ | 339 | /* Set/get chip/data for an IRQ: */ |
| 409 | extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); | 340 | extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); |
| 410 | extern int set_irq_data(unsigned int irq, void *data); | 341 | extern int set_irq_data(unsigned int irq, void *data); |
| 411 | extern int set_irq_chip_data(unsigned int irq, void *data); | 342 | extern int set_irq_chip_data(unsigned int irq, void *data); |
| 412 | extern int set_irq_type(unsigned int irq, unsigned int type); | 343 | extern int set_irq_type(unsigned int irq, unsigned int type); |
| 413 | extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | 344 | extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); |
| 345 | extern struct irq_data *irq_get_irq_data(unsigned int irq); | ||
| 414 | 346 | ||
| 415 | #define get_irq_chip(irq) (irq_to_desc(irq)->chip) | 347 | static inline struct irq_chip *get_irq_chip(unsigned int irq) |
| 416 | #define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data) | ||
| 417 | #define get_irq_data(irq) (irq_to_desc(irq)->handler_data) | ||
| 418 | #define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) | ||
| 419 | |||
| 420 | #define get_irq_desc_chip(desc) ((desc)->chip) | ||
| 421 | #define get_irq_desc_chip_data(desc) ((desc)->chip_data) | ||
| 422 | #define get_irq_desc_data(desc) ((desc)->handler_data) | ||
| 423 | #define get_irq_desc_msi(desc) ((desc)->msi_desc) | ||
| 424 | |||
| 425 | #endif /* CONFIG_GENERIC_HARDIRQS */ | ||
| 426 | |||
| 427 | #endif /* !CONFIG_S390 */ | ||
| 428 | |||
| 429 | #ifdef CONFIG_SMP | ||
| 430 | /** | ||
| 431 | * alloc_desc_masks - allocate cpumasks for irq_desc | ||
| 432 | * @desc: pointer to irq_desc struct | ||
| 433 | * @node: node which will be handling the cpumasks | ||
| 434 | * @boot: true if need bootmem | ||
| 435 | * | ||
| 436 | * Allocates affinity and pending_mask cpumask if required. | ||
| 437 | * Returns true if successful (or not required). | ||
| 438 | */ | ||
| 439 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | ||
| 440 | bool boot) | ||
| 441 | { | 348 | { |
| 442 | gfp_t gfp = GFP_ATOMIC; | 349 | struct irq_data *d = irq_get_irq_data(irq); |
| 443 | 350 | return d ? d->chip : NULL; | |
| 444 | if (boot) | 351 | } |
| 445 | gfp = GFP_NOWAIT; | ||
| 446 | |||
| 447 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
| 448 | if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) | ||
| 449 | return false; | ||
| 450 | 352 | ||
| 451 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 353 | static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) |
| 452 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | 354 | { |
| 453 | free_cpumask_var(desc->affinity); | 355 | return d->chip; |
| 454 | return false; | ||
| 455 | } | ||
| 456 | #endif | ||
| 457 | #endif | ||
| 458 | return true; | ||
| 459 | } | 356 | } |
| 460 | 357 | ||
| 461 | static inline void init_desc_masks(struct irq_desc *desc) | 358 | static inline void *get_irq_chip_data(unsigned int irq) |
| 462 | { | 359 | { |
| 463 | cpumask_setall(desc->affinity); | 360 | struct irq_data *d = irq_get_irq_data(irq); |
| 464 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 361 | return d ? d->chip_data : NULL; |
| 465 | cpumask_clear(desc->pending_mask); | ||
| 466 | #endif | ||
| 467 | } | 362 | } |
| 468 | 363 | ||
| 469 | /** | 364 | static inline void *irq_data_get_irq_chip_data(struct irq_data *d) |
| 470 | * init_copy_desc_masks - copy cpumasks for irq_desc | 365 | { |
| 471 | * @old_desc: pointer to old irq_desc struct | 366 | return d->chip_data; |
| 472 | * @new_desc: pointer to new irq_desc struct | 367 | } |
| 473 | * | ||
| 474 | * Insures affinity and pending_masks are copied to new irq_desc. | ||
| 475 | * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the | ||
| 476 | * irq_desc struct so the copy is redundant. | ||
| 477 | */ | ||
| 478 | 368 | ||
| 479 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 369 | static inline void *get_irq_data(unsigned int irq) |
| 480 | struct irq_desc *new_desc) | ||
| 481 | { | 370 | { |
| 482 | #ifdef CONFIG_CPUMASK_OFFSTACK | 371 | struct irq_data *d = irq_get_irq_data(irq); |
| 483 | cpumask_copy(new_desc->affinity, old_desc->affinity); | 372 | return d ? d->handler_data : NULL; |
| 373 | } | ||
| 484 | 374 | ||
| 485 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 375 | static inline void *irq_data_get_irq_data(struct irq_data *d) |
| 486 | cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); | 376 | { |
| 487 | #endif | 377 | return d->handler_data; |
| 488 | #endif | ||
| 489 | } | 378 | } |
| 490 | 379 | ||
| 491 | static inline void free_desc_masks(struct irq_desc *old_desc, | 380 | static inline struct msi_desc *get_irq_msi(unsigned int irq) |
| 492 | struct irq_desc *new_desc) | ||
| 493 | { | 381 | { |
| 494 | free_cpumask_var(old_desc->affinity); | 382 | struct irq_data *d = irq_get_irq_data(irq); |
| 383 | return d ? d->msi_desc : NULL; | ||
| 384 | } | ||
| 495 | 385 | ||
| 496 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 386 | static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) |
| 497 | free_cpumask_var(old_desc->pending_mask); | 387 | { |
| 498 | #endif | 388 | return d->msi_desc; |
| 499 | } | 389 | } |
| 500 | 390 | ||
| 501 | #else /* !CONFIG_SMP */ | 391 | int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); |
| 392 | void irq_free_descs(unsigned int irq, unsigned int cnt); | ||
| 393 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); | ||
| 502 | 394 | ||
| 503 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | 395 | static inline int irq_alloc_desc(int node) |
| 504 | bool boot) | ||
| 505 | { | 396 | { |
| 506 | return true; | 397 | return irq_alloc_descs(-1, 0, 1, node); |
| 507 | } | 398 | } |
| 508 | 399 | ||
| 509 | static inline void init_desc_masks(struct irq_desc *desc) | 400 | static inline int irq_alloc_desc_at(unsigned int at, int node) |
| 510 | { | 401 | { |
| 402 | return irq_alloc_descs(at, at, 1, node); | ||
| 511 | } | 403 | } |
| 512 | 404 | ||
| 513 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 405 | static inline int irq_alloc_desc_from(unsigned int from, int node) |
| 514 | struct irq_desc *new_desc) | ||
| 515 | { | 406 | { |
| 407 | return irq_alloc_descs(-1, from, 1, node); | ||
| 516 | } | 408 | } |
| 517 | 409 | ||
| 518 | static inline void free_desc_masks(struct irq_desc *old_desc, | 410 | static inline void irq_free_desc(unsigned int irq) |
| 519 | struct irq_desc *new_desc) | ||
| 520 | { | 411 | { |
| 412 | irq_free_descs(irq, 1); | ||
| 521 | } | 413 | } |
| 522 | #endif /* CONFIG_SMP */ | 414 | |
| 415 | #endif /* CONFIG_GENERIC_HARDIRQS */ | ||
| 416 | |||
| 417 | #endif /* !CONFIG_S390 */ | ||
| 523 | 418 | ||
| 524 | #endif /* _LINUX_IRQ_H */ | 419 | #endif /* _LINUX_IRQ_H */ |
