aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/irq.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/irq.h')
-rw-r--r--include/linux/irq.h78
1 files changed, 38 insertions, 40 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h
index b7cbeed972e4..cb2e77a3f7f7 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -117,7 +117,7 @@ struct irq_chip {
117 void (*eoi)(unsigned int irq); 117 void (*eoi)(unsigned int irq);
118 118
119 void (*end)(unsigned int irq); 119 void (*end)(unsigned int irq);
120 void (*set_affinity)(unsigned int irq, 120 int (*set_affinity)(unsigned int irq,
121 const struct cpumask *dest); 121 const struct cpumask *dest);
122 int (*retrigger)(unsigned int irq); 122 int (*retrigger)(unsigned int irq);
123 int (*set_type)(unsigned int irq, unsigned int flow_type); 123 int (*set_type)(unsigned int irq, unsigned int flow_type);
@@ -157,7 +157,7 @@ struct irq_2_iommu;
157 * @irqs_unhandled: stats field for spurious unhandled interrupts 157 * @irqs_unhandled: stats field for spurious unhandled interrupts
158 * @lock: locking for SMP 158 * @lock: locking for SMP
159 * @affinity: IRQ affinity on SMP 159 * @affinity: IRQ affinity on SMP
160 * @cpu: cpu index useful for balancing 160 * @node: node index useful for balancing
161 * @pending_mask: pending rebalanced interrupts 161 * @pending_mask: pending rebalanced interrupts
162 * @threads_active: number of irqaction threads currently running 162 * @threads_active: number of irqaction threads currently running
163 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers 163 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
@@ -187,7 +187,7 @@ struct irq_desc {
187 spinlock_t lock; 187 spinlock_t lock;
188#ifdef CONFIG_SMP 188#ifdef CONFIG_SMP
189 cpumask_var_t affinity; 189 cpumask_var_t affinity;
190 unsigned int cpu; 190 unsigned int node;
191#ifdef CONFIG_GENERIC_PENDING_IRQ 191#ifdef CONFIG_GENERIC_PENDING_IRQ
192 cpumask_var_t pending_mask; 192 cpumask_var_t pending_mask;
193#endif 193#endif
@@ -201,26 +201,23 @@ struct irq_desc {
201} ____cacheline_internodealigned_in_smp; 201} ____cacheline_internodealigned_in_smp;
202 202
203extern void arch_init_copy_chip_data(struct irq_desc *old_desc, 203extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
204 struct irq_desc *desc, int cpu); 204 struct irq_desc *desc, int node);
205extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); 205extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
206 206
207#ifndef CONFIG_SPARSE_IRQ 207#ifndef CONFIG_SPARSE_IRQ
208extern struct irq_desc irq_desc[NR_IRQS]; 208extern struct irq_desc irq_desc[NR_IRQS];
209#else /* CONFIG_SPARSE_IRQ */ 209#endif
210extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
211#endif /* CONFIG_SPARSE_IRQ */
212
213extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
214 210
215static inline struct irq_desc * 211#ifdef CONFIG_NUMA_IRQ_DESC
216irq_remap_to_desc(unsigned int irq, struct irq_desc *desc) 212extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node);
217{
218#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
219 return irq_to_desc(irq);
220#else 213#else
214static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
215{
221 return desc; 216 return desc;
222#endif
223} 217}
218#endif
219
220extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
224 221
225/* 222/*
226 * Migration helpers for obsolete names, they will go away: 223 * Migration helpers for obsolete names, they will go away:
@@ -386,7 +383,7 @@ extern void set_irq_noprobe(unsigned int irq);
386extern void set_irq_probe(unsigned int irq); 383extern void set_irq_probe(unsigned int irq);
387 384
388/* Handle dynamic irq creation and destruction */ 385/* Handle dynamic irq creation and destruction */
389extern unsigned int create_irq_nr(unsigned int irq_want); 386extern unsigned int create_irq_nr(unsigned int irq_want, int node);
390extern int create_irq(void); 387extern int create_irq(void);
391extern void destroy_irq(unsigned int irq); 388extern void destroy_irq(unsigned int irq);
392 389
@@ -424,47 +421,44 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
424 421
425#ifdef CONFIG_SMP 422#ifdef CONFIG_SMP
426/** 423/**
427 * init_alloc_desc_masks - allocate cpumasks for irq_desc 424 * alloc_desc_masks - allocate cpumasks for irq_desc
428 * @desc: pointer to irq_desc struct 425 * @desc: pointer to irq_desc struct
429 * @cpu: cpu which will be handling the cpumasks 426 * @node: node which will be handling the cpumasks
430 * @boot: true if need bootmem 427 * @boot: true if need bootmem
431 * 428 *
432 * Allocates affinity and pending_mask cpumask if required. 429 * Allocates affinity and pending_mask cpumask if required.
433 * Returns true if successful (or not required). 430 * Returns true if successful (or not required).
434 * Side effect: affinity has all bits set, pending_mask has all bits clear.
435 */ 431 */
436static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, 432static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
437 bool boot) 433 bool boot)
438{ 434{
439 int node; 435 gfp_t gfp = GFP_ATOMIC;
440
441 if (boot) {
442 alloc_bootmem_cpumask_var(&desc->affinity);
443 cpumask_setall(desc->affinity);
444
445#ifdef CONFIG_GENERIC_PENDING_IRQ
446 alloc_bootmem_cpumask_var(&desc->pending_mask);
447 cpumask_clear(desc->pending_mask);
448#endif
449 return true;
450 }
451 436
452 node = cpu_to_node(cpu); 437 if (boot)
438 gfp = GFP_NOWAIT;
453 439
454 if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node)) 440#ifdef CONFIG_CPUMASK_OFFSTACK
441 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
455 return false; 442 return false;
456 cpumask_setall(desc->affinity);
457 443
458#ifdef CONFIG_GENERIC_PENDING_IRQ 444#ifdef CONFIG_GENERIC_PENDING_IRQ
459 if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) { 445 if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
460 free_cpumask_var(desc->affinity); 446 free_cpumask_var(desc->affinity);
461 return false; 447 return false;
462 } 448 }
463 cpumask_clear(desc->pending_mask); 449#endif
464#endif 450#endif
465 return true; 451 return true;
466} 452}
467 453
454static inline void init_desc_masks(struct irq_desc *desc)
455{
456 cpumask_setall(desc->affinity);
457#ifdef CONFIG_GENERIC_PENDING_IRQ
458 cpumask_clear(desc->pending_mask);
459#endif
460}
461
468/** 462/**
469 * init_copy_desc_masks - copy cpumasks for irq_desc 463 * init_copy_desc_masks - copy cpumasks for irq_desc
470 * @old_desc: pointer to old irq_desc struct 464 * @old_desc: pointer to old irq_desc struct
@@ -478,7 +472,7 @@ static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
478static inline void init_copy_desc_masks(struct irq_desc *old_desc, 472static inline void init_copy_desc_masks(struct irq_desc *old_desc,
479 struct irq_desc *new_desc) 473 struct irq_desc *new_desc)
480{ 474{
481#ifdef CONFIG_CPUMASKS_OFFSTACK 475#ifdef CONFIG_CPUMASK_OFFSTACK
482 cpumask_copy(new_desc->affinity, old_desc->affinity); 476 cpumask_copy(new_desc->affinity, old_desc->affinity);
483 477
484#ifdef CONFIG_GENERIC_PENDING_IRQ 478#ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -499,12 +493,16 @@ static inline void free_desc_masks(struct irq_desc *old_desc,
499 493
500#else /* !CONFIG_SMP */ 494#else /* !CONFIG_SMP */
501 495
502static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, 496static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
503 bool boot) 497 bool boot)
504{ 498{
505 return true; 499 return true;
506} 500}
507 501
502static inline void init_desc_masks(struct irq_desc *desc)
503{
504}
505
508static inline void init_copy_desc_masks(struct irq_desc *old_desc, 506static inline void init_copy_desc_masks(struct irq_desc *old_desc,
509 struct irq_desc *new_desc) 507 struct irq_desc *new_desc)
510{ 508{