aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c83
1 files changed, 62 insertions, 21 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c498a1b8c621..46953a06f4a8 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -82,24 +82,27 @@ int irq_can_set_affinity(unsigned int irq)
82int irq_set_affinity(unsigned int irq, cpumask_t cpumask) 82int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
83{ 83{
84 struct irq_desc *desc = irq_to_desc(irq); 84 struct irq_desc *desc = irq_to_desc(irq);
85 unsigned long flags;
85 86
86 if (!desc->chip->set_affinity) 87 if (!desc->chip->set_affinity)
87 return -EINVAL; 88 return -EINVAL;
88 89
90 spin_lock_irqsave(&desc->lock, flags);
91
89#ifdef CONFIG_GENERIC_PENDING_IRQ 92#ifdef CONFIG_GENERIC_PENDING_IRQ
90 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 93 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
91 unsigned long flags;
92
93 spin_lock_irqsave(&desc->lock, flags);
94 desc->affinity = cpumask; 94 desc->affinity = cpumask;
95 desc->chip->set_affinity(irq, cpumask); 95 desc->chip->set_affinity(irq, cpumask);
96 spin_unlock_irqrestore(&desc->lock, flags); 96 } else {
97 } else 97 desc->status |= IRQ_MOVE_PENDING;
98 set_pending_irq(irq, cpumask); 98 desc->pending_mask = cpumask;
99 }
99#else 100#else
100 desc->affinity = cpumask; 101 desc->affinity = cpumask;
101 desc->chip->set_affinity(irq, cpumask); 102 desc->chip->set_affinity(irq, cpumask);
102#endif 103#endif
104 desc->status |= IRQ_AFFINITY_SET;
105 spin_unlock_irqrestore(&desc->lock, flags);
103 return 0; 106 return 0;
104} 107}
105 108
@@ -107,24 +110,59 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
107/* 110/*
108 * Generic version of the affinity autoselector. 111 * Generic version of the affinity autoselector.
109 */ 112 */
110int irq_select_affinity(unsigned int irq) 113int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
111{ 114{
112 cpumask_t mask; 115 cpumask_t mask;
113 struct irq_desc *desc;
114 116
115 if (!irq_can_set_affinity(irq)) 117 if (!irq_can_set_affinity(irq))
116 return 0; 118 return 0;
117 119
118 cpus_and(mask, cpu_online_map, irq_default_affinity); 120 cpus_and(mask, cpu_online_map, irq_default_affinity);
119 121
120 desc = irq_to_desc(irq); 122 /*
123 * Preserve an userspace affinity setup, but make sure that
124 * one of the targets is online.
125 */
126 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
127 if (cpus_intersects(desc->affinity, cpu_online_map))
128 mask = desc->affinity;
129 else
130 desc->status &= ~IRQ_AFFINITY_SET;
131 }
132
121 desc->affinity = mask; 133 desc->affinity = mask;
122 desc->chip->set_affinity(irq, mask); 134 desc->chip->set_affinity(irq, mask);
123 135
124 return 0; 136 return 0;
125} 137}
138#else
139static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d)
140{
141 return irq_select_affinity(irq);
142}
126#endif 143#endif
127 144
145/*
146 * Called when affinity is set via /proc/irq
147 */
148int irq_select_affinity_usr(unsigned int irq)
149{
150 struct irq_desc *desc = irq_to_desc(irq);
151 unsigned long flags;
152 int ret;
153
154 spin_lock_irqsave(&desc->lock, flags);
155 ret = do_irq_select_affinity(irq, desc);
156 spin_unlock_irqrestore(&desc->lock, flags);
157
158 return ret;
159}
160
161#else
162static inline int do_irq_select_affinity(int irq, struct irq_desc *desc)
163{
164 return 0;
165}
128#endif 166#endif
129 167
130/** 168/**
@@ -327,21 +365,23 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
327 * IRQF_TRIGGER_* but the PIC does not support multiple 365 * IRQF_TRIGGER_* but the PIC does not support multiple
328 * flow-types? 366 * flow-types?
329 */ 367 */
330 pr_warning("No set_type function for IRQ %d (%s)\n", irq, 368 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
331 chip ? (chip->name ? : "unknown") : "unknown"); 369 chip ? (chip->name ? : "unknown") : "unknown");
332 return 0; 370 return 0;
333 } 371 }
334 372
335 ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK); 373 /* caller masked out all except trigger mode flags */
374 ret = chip->set_type(irq, flags);
336 375
337 if (ret) 376 if (ret)
338 pr_err("setting trigger mode %d for irq %u failed (%pF)\n", 377 pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
339 (int)(flags & IRQF_TRIGGER_MASK), 378 (int)flags, irq, chip->set_type);
340 irq, chip->set_type);
341 else { 379 else {
380 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
381 flags |= IRQ_LEVEL;
342 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ 382 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
343 desc->status &= ~IRQ_TYPE_SENSE_MASK; 383 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
344 desc->status |= flags & IRQ_TYPE_SENSE_MASK; 384 desc->status |= flags;
345 } 385 }
346 386
347 return ret; 387 return ret;
@@ -421,7 +461,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
421 461
422 /* Setup the type (level, edge polarity) if configured: */ 462 /* Setup the type (level, edge polarity) if configured: */
423 if (new->flags & IRQF_TRIGGER_MASK) { 463 if (new->flags & IRQF_TRIGGER_MASK) {
424 ret = __irq_set_trigger(desc, irq, new->flags); 464 ret = __irq_set_trigger(desc, irq,
465 new->flags & IRQF_TRIGGER_MASK);
425 466
426 if (ret) { 467 if (ret) {
427 spin_unlock_irqrestore(&desc->lock, flags); 468 spin_unlock_irqrestore(&desc->lock, flags);
@@ -445,8 +486,12 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
445 /* Undo nested disables: */ 486 /* Undo nested disables: */
446 desc->depth = 1; 487 desc->depth = 1;
447 488
489 /* Exclude IRQ from balancing if requested */
490 if (new->flags & IRQF_NOBALANCING)
491 desc->status |= IRQ_NO_BALANCING;
492
448 /* Set default affinity mask once everything is setup */ 493 /* Set default affinity mask once everything is setup */
449 irq_select_affinity(irq); 494 do_irq_select_affinity(irq, desc);
450 495
451 } else if ((new->flags & IRQF_TRIGGER_MASK) 496 } else if ((new->flags & IRQF_TRIGGER_MASK)
452 && (new->flags & IRQF_TRIGGER_MASK) 497 && (new->flags & IRQF_TRIGGER_MASK)
@@ -459,10 +504,6 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
459 504
460 *p = new; 505 *p = new;
461 506
462 /* Exclude IRQ from balancing */
463 if (new->flags & IRQF_NOBALANCING)
464 desc->status |= IRQ_NO_BALANCING;
465
466 /* Reset broken irq detection when installing new handler */ 507 /* Reset broken irq detection when installing new handler */
467 desc->irq_count = 0; 508 desc->irq_count = 0;
468 desc->irqs_unhandled = 0; 509 desc->irqs_unhandled = 0;