diff options
Diffstat (limited to 'kernel/irq/manage.c')
| -rw-r--r-- | kernel/irq/manage.c | 82 |
1 files changed, 59 insertions, 23 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index c498a1b8c621..10ad2f87ed9a 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -79,27 +79,30 @@ int irq_can_set_affinity(unsigned int irq) | |||
| 79 | * @cpumask: cpumask | 79 | * @cpumask: cpumask |
| 80 | * | 80 | * |
| 81 | */ | 81 | */ |
| 82 | int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | 82 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) |
| 83 | { | 83 | { |
| 84 | struct irq_desc *desc = irq_to_desc(irq); | 84 | struct irq_desc *desc = irq_to_desc(irq); |
| 85 | unsigned long flags; | ||
| 85 | 86 | ||
| 86 | if (!desc->chip->set_affinity) | 87 | if (!desc->chip->set_affinity) |
| 87 | return -EINVAL; | 88 | return -EINVAL; |
| 88 | 89 | ||
| 90 | spin_lock_irqsave(&desc->lock, flags); | ||
| 91 | |||
| 89 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 92 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 90 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { | 93 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { |
| 91 | unsigned long flags; | 94 | cpumask_copy(&desc->affinity, cpumask); |
| 92 | |||
| 93 | spin_lock_irqsave(&desc->lock, flags); | ||
| 94 | desc->affinity = cpumask; | ||
| 95 | desc->chip->set_affinity(irq, cpumask); | 95 | desc->chip->set_affinity(irq, cpumask); |
| 96 | spin_unlock_irqrestore(&desc->lock, flags); | 96 | } else { |
| 97 | } else | 97 | desc->status |= IRQ_MOVE_PENDING; |
| 98 | set_pending_irq(irq, cpumask); | 98 | cpumask_copy(&desc->pending_mask, cpumask); |
| 99 | } | ||
| 99 | #else | 100 | #else |
| 100 | desc->affinity = cpumask; | 101 | cpumask_copy(&desc->affinity, cpumask); |
| 101 | desc->chip->set_affinity(irq, cpumask); | 102 | desc->chip->set_affinity(irq, cpumask); |
| 102 | #endif | 103 | #endif |
| 104 | desc->status |= IRQ_AFFINITY_SET; | ||
| 105 | spin_unlock_irqrestore(&desc->lock, flags); | ||
| 103 | return 0; | 106 | return 0; |
| 104 | } | 107 | } |
| 105 | 108 | ||
| @@ -107,24 +110,57 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
| 107 | /* | 110 | /* |
| 108 | * Generic version of the affinity autoselector. | 111 | * Generic version of the affinity autoselector. |
| 109 | */ | 112 | */ |
| 110 | int irq_select_affinity(unsigned int irq) | 113 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) |
| 111 | { | 114 | { |
| 112 | cpumask_t mask; | ||
| 113 | struct irq_desc *desc; | ||
| 114 | |||
| 115 | if (!irq_can_set_affinity(irq)) | 115 | if (!irq_can_set_affinity(irq)) |
| 116 | return 0; | 116 | return 0; |
| 117 | 117 | ||
| 118 | cpus_and(mask, cpu_online_map, irq_default_affinity); | 118 | /* |
| 119 | * Preserve an userspace affinity setup, but make sure that | ||
| 120 | * one of the targets is online. | ||
| 121 | */ | ||
| 122 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | ||
| 123 | if (cpumask_any_and(&desc->affinity, cpu_online_mask) | ||
| 124 | < nr_cpu_ids) | ||
| 125 | goto set_affinity; | ||
| 126 | else | ||
| 127 | desc->status &= ~IRQ_AFFINITY_SET; | ||
| 128 | } | ||
| 119 | 129 | ||
| 120 | desc = irq_to_desc(irq); | 130 | cpumask_and(&desc->affinity, cpu_online_mask, &irq_default_affinity); |
| 121 | desc->affinity = mask; | 131 | set_affinity: |
| 122 | desc->chip->set_affinity(irq, mask); | 132 | desc->chip->set_affinity(irq, &desc->affinity); |
| 123 | 133 | ||
| 124 | return 0; | 134 | return 0; |
| 125 | } | 135 | } |
| 136 | #else | ||
| 137 | static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) | ||
| 138 | { | ||
| 139 | return irq_select_affinity(irq); | ||
| 140 | } | ||
| 126 | #endif | 141 | #endif |
| 127 | 142 | ||
| 143 | /* | ||
| 144 | * Called when affinity is set via /proc/irq | ||
| 145 | */ | ||
| 146 | int irq_select_affinity_usr(unsigned int irq) | ||
| 147 | { | ||
| 148 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 149 | unsigned long flags; | ||
| 150 | int ret; | ||
| 151 | |||
| 152 | spin_lock_irqsave(&desc->lock, flags); | ||
| 153 | ret = do_irq_select_affinity(irq, desc); | ||
| 154 | spin_unlock_irqrestore(&desc->lock, flags); | ||
| 155 | |||
| 156 | return ret; | ||
| 157 | } | ||
| 158 | |||
| 159 | #else | ||
| 160 | static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) | ||
| 161 | { | ||
| 162 | return 0; | ||
| 163 | } | ||
| 128 | #endif | 164 | #endif |
| 129 | 165 | ||
| 130 | /** | 166 | /** |
| @@ -327,7 +363,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 327 | * IRQF_TRIGGER_* but the PIC does not support multiple | 363 | * IRQF_TRIGGER_* but the PIC does not support multiple |
| 328 | * flow-types? | 364 | * flow-types? |
| 329 | */ | 365 | */ |
| 330 | pr_warning("No set_type function for IRQ %d (%s)\n", irq, | 366 | pr_debug("No set_type function for IRQ %d (%s)\n", irq, |
| 331 | chip ? (chip->name ? : "unknown") : "unknown"); | 367 | chip ? (chip->name ? : "unknown") : "unknown"); |
| 332 | return 0; | 368 | return 0; |
| 333 | } | 369 | } |
| @@ -445,8 +481,12 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 445 | /* Undo nested disables: */ | 481 | /* Undo nested disables: */ |
| 446 | desc->depth = 1; | 482 | desc->depth = 1; |
| 447 | 483 | ||
| 484 | /* Exclude IRQ from balancing if requested */ | ||
| 485 | if (new->flags & IRQF_NOBALANCING) | ||
| 486 | desc->status |= IRQ_NO_BALANCING; | ||
| 487 | |||
| 448 | /* Set default affinity mask once everything is setup */ | 488 | /* Set default affinity mask once everything is setup */ |
| 449 | irq_select_affinity(irq); | 489 | do_irq_select_affinity(irq, desc); |
| 450 | 490 | ||
| 451 | } else if ((new->flags & IRQF_TRIGGER_MASK) | 491 | } else if ((new->flags & IRQF_TRIGGER_MASK) |
| 452 | && (new->flags & IRQF_TRIGGER_MASK) | 492 | && (new->flags & IRQF_TRIGGER_MASK) |
| @@ -459,10 +499,6 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 459 | 499 | ||
| 460 | *p = new; | 500 | *p = new; |
| 461 | 501 | ||
| 462 | /* Exclude IRQ from balancing */ | ||
| 463 | if (new->flags & IRQF_NOBALANCING) | ||
| 464 | desc->status |= IRQ_NO_BALANCING; | ||
| 465 | |||
| 466 | /* Reset broken irq detection when installing new handler */ | 502 | /* Reset broken irq detection when installing new handler */ |
| 467 | desc->irq_count = 0; | 503 | desc->irq_count = 0; |
| 468 | desc->irqs_unhandled = 0; | 504 | desc->irqs_unhandled = 0; |
