diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2008-11-07 07:18:30 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-11-09 16:23:49 -0500 |
| commit | f6d87f4bd259cf33e092cd1a8fde05f291c47af1 (patch) | |
| tree | abaa66af3c80fb18a20004b8d97261c680551792 | |
| parent | 8b805ef617cf0e02f6d18b891f8deb6246421b01 (diff) | |
genirq: keep affinities set from userspace across free/request_irq()
Impact: preserve user-modified affinities on interrupts
Kumar Galak noticed that commit
18404756765c713a0be4eb1082920c04822ce588 (genirq: Expose default irq
affinity mask (take 3))
overrides an already set affinity setting across a free /
request_irq(). Happens e.g. with ifdown/ifup of a network device.
Change the logic to mark the affinities as set and keep them
intact. This also fixes the unlocked access to irq_desc in
irq_select_affinity() when called from irq_affinity_proc_write()
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | include/linux/irq.h | 8 | ||||
| -rw-r--r-- | kernel/irq/internals.h | 2 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 58 | ||||
| -rw-r--r-- | kernel/irq/migration.c | 11 | ||||
| -rw-r--r-- | kernel/irq/proc.c | 2 |
5 files changed, 53 insertions, 28 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h index d058c57be02d..36b186eb318b 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -63,7 +63,8 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
| 63 | #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ | 63 | #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ |
| 64 | #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ | 64 | #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ |
| 65 | #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ | 65 | #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ |
| 66 | #define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ | 66 | #define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ |
| 67 | #define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ | ||
| 67 | 68 | ||
| 68 | #ifdef CONFIG_IRQ_PER_CPU | 69 | #ifdef CONFIG_IRQ_PER_CPU |
| 69 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | 70 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) |
| @@ -210,7 +211,6 @@ extern int setup_irq(unsigned int irq, struct irqaction *new); | |||
| 210 | 211 | ||
| 211 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 212 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 212 | 213 | ||
| 213 | void set_pending_irq(unsigned int irq, cpumask_t mask); | ||
| 214 | void move_native_irq(int irq); | 214 | void move_native_irq(int irq); |
| 215 | void move_masked_irq(int irq); | 215 | void move_masked_irq(int irq); |
| 216 | 216 | ||
| @@ -228,10 +228,6 @@ static inline void move_masked_irq(int irq) | |||
| 228 | { | 228 | { |
| 229 | } | 229 | } |
| 230 | 230 | ||
| 231 | static inline void set_pending_irq(unsigned int irq, cpumask_t mask) | ||
| 232 | { | ||
| 233 | } | ||
| 234 | |||
| 235 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ | 231 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ |
| 236 | 232 | ||
| 237 | #else /* CONFIG_SMP */ | 233 | #else /* CONFIG_SMP */ |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index c9767e641980..64c1c7253dae 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
| @@ -25,6 +25,8 @@ static inline void unregister_handler_proc(unsigned int irq, | |||
| 25 | struct irqaction *action) { } | 25 | struct irqaction *action) { } |
| 26 | #endif | 26 | #endif |
| 27 | 27 | ||
| 28 | extern int irq_select_affinity_usr(unsigned int irq); | ||
| 29 | |||
| 28 | /* | 30 | /* |
| 29 | * Debugging printout: | 31 | * Debugging printout: |
| 30 | */ | 32 | */ |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index c498a1b8c621..634a2a955104 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -82,24 +82,27 @@ int irq_can_set_affinity(unsigned int irq) | |||
| 82 | int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | 82 | int irq_set_affinity(unsigned int irq, cpumask_t cpumask) |
| 83 | { | 83 | { |
| 84 | struct irq_desc *desc = irq_to_desc(irq); | 84 | struct irq_desc *desc = irq_to_desc(irq); |
| 85 | unsigned long flags; | ||
| 85 | 86 | ||
| 86 | if (!desc->chip->set_affinity) | 87 | if (!desc->chip->set_affinity) |
| 87 | return -EINVAL; | 88 | return -EINVAL; |
| 88 | 89 | ||
| 90 | spin_lock_irqsave(&desc->lock, flags); | ||
| 91 | |||
| 89 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 92 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 90 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { | 93 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { |
| 91 | unsigned long flags; | ||
| 92 | |||
| 93 | spin_lock_irqsave(&desc->lock, flags); | ||
| 94 | desc->affinity = cpumask; | 94 | desc->affinity = cpumask; |
| 95 | desc->chip->set_affinity(irq, cpumask); | 95 | desc->chip->set_affinity(irq, cpumask); |
| 96 | spin_unlock_irqrestore(&desc->lock, flags); | 96 | } else { |
| 97 | } else | 97 | desc->status |= IRQ_MOVE_PENDING; |
| 98 | set_pending_irq(irq, cpumask); | 98 | desc->pending_mask = cpumask; |
| 99 | } | ||
| 99 | #else | 100 | #else |
| 100 | desc->affinity = cpumask; | 101 | desc->affinity = cpumask; |
| 101 | desc->chip->set_affinity(irq, cpumask); | 102 | desc->chip->set_affinity(irq, cpumask); |
| 102 | #endif | 103 | #endif |
| 104 | desc->status |= IRQ_AFFINITY_SET; | ||
| 105 | spin_unlock_irqrestore(&desc->lock, flags); | ||
| 103 | return 0; | 106 | return 0; |
| 104 | } | 107 | } |
| 105 | 108 | ||
| @@ -107,24 +110,59 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
| 107 | /* | 110 | /* |
| 108 | * Generic version of the affinity autoselector. | 111 | * Generic version of the affinity autoselector. |
| 109 | */ | 112 | */ |
| 110 | int irq_select_affinity(unsigned int irq) | 113 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) |
| 111 | { | 114 | { |
| 112 | cpumask_t mask; | 115 | cpumask_t mask; |
| 113 | struct irq_desc *desc; | ||
| 114 | 116 | ||
| 115 | if (!irq_can_set_affinity(irq)) | 117 | if (!irq_can_set_affinity(irq)) |
| 116 | return 0; | 118 | return 0; |
| 117 | 119 | ||
| 118 | cpus_and(mask, cpu_online_map, irq_default_affinity); | 120 | cpus_and(mask, cpu_online_map, irq_default_affinity); |
| 119 | 121 | ||
| 120 | desc = irq_to_desc(irq); | 122 | /* |
| 123 | * Preserve an userspace affinity setup, but make sure that | ||
| 124 | * one of the targets is online. | ||
| 125 | */ | ||
| 126 | if (desc->status & IRQ_AFFINITY_SET) { | ||
| 127 | if (cpus_intersects(desc->affinity, cpu_online_map)) | ||
| 128 | mask = desc->affinity; | ||
| 129 | else | ||
| 130 | desc->status &= ~IRQ_AFFINITY_SET; | ||
| 131 | } | ||
| 132 | |||
| 121 | desc->affinity = mask; | 133 | desc->affinity = mask; |
| 122 | desc->chip->set_affinity(irq, mask); | 134 | desc->chip->set_affinity(irq, mask); |
| 123 | 135 | ||
| 124 | return 0; | 136 | return 0; |
| 125 | } | 137 | } |
| 138 | #else | ||
| 139 | static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) | ||
| 140 | { | ||
| 141 | return irq_select_affinity(irq); | ||
| 142 | } | ||
| 126 | #endif | 143 | #endif |
| 127 | 144 | ||
| 145 | /* | ||
| 146 | * Called when affinity is set via /proc/irq | ||
| 147 | */ | ||
| 148 | int irq_select_affinity_usr(unsigned int irq) | ||
| 149 | { | ||
| 150 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 151 | unsigned long flags; | ||
| 152 | int ret; | ||
| 153 | |||
| 154 | spin_lock_irqsave(&desc->lock, flags); | ||
| 155 | ret = do_irq_select_affinity(irq, desc); | ||
| 156 | spin_unlock_irqrestore(&desc->lock, flags); | ||
| 157 | |||
| 158 | return ret; | ||
| 159 | } | ||
| 160 | |||
| 161 | #else | ||
| 162 | static inline int do_select_irq_affinity(int irq, struct irq_desc *desc) | ||
| 163 | { | ||
| 164 | return 0; | ||
| 165 | } | ||
| 128 | #endif | 166 | #endif |
| 129 | 167 | ||
| 130 | /** | 168 | /** |
| @@ -446,7 +484,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 446 | desc->depth = 1; | 484 | desc->depth = 1; |
| 447 | 485 | ||
| 448 | /* Set default affinity mask once everything is setup */ | 486 | /* Set default affinity mask once everything is setup */ |
| 449 | irq_select_affinity(irq); | 487 | do_irq_select_affinity(irq, desc); |
| 450 | 488 | ||
| 451 | } else if ((new->flags & IRQF_TRIGGER_MASK) | 489 | } else if ((new->flags & IRQF_TRIGGER_MASK) |
| 452 | && (new->flags & IRQF_TRIGGER_MASK) | 490 | && (new->flags & IRQF_TRIGGER_MASK) |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 90b920d3f52b..9db681d95814 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
| @@ -1,17 +1,6 @@ | |||
| 1 | 1 | ||
| 2 | #include <linux/irq.h> | 2 | #include <linux/irq.h> |
| 3 | 3 | ||
| 4 | void set_pending_irq(unsigned int irq, cpumask_t mask) | ||
| 5 | { | ||
| 6 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 7 | unsigned long flags; | ||
| 8 | |||
| 9 | spin_lock_irqsave(&desc->lock, flags); | ||
| 10 | desc->status |= IRQ_MOVE_PENDING; | ||
| 11 | desc->pending_mask = mask; | ||
| 12 | spin_unlock_irqrestore(&desc->lock, flags); | ||
| 13 | } | ||
| 14 | |||
| 15 | void move_masked_irq(int irq) | 4 | void move_masked_irq(int irq) |
| 16 | { | 5 | { |
| 17 | struct irq_desc *desc = irq_to_desc(irq); | 6 | struct irq_desc *desc = irq_to_desc(irq); |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 4d161c70ba55..d257e7d6a8a4 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
| @@ -62,7 +62,7 @@ static ssize_t irq_affinity_proc_write(struct file *file, | |||
| 62 | if (!cpus_intersects(new_value, cpu_online_map)) | 62 | if (!cpus_intersects(new_value, cpu_online_map)) |
| 63 | /* Special case for empty set - allow the architecture | 63 | /* Special case for empty set - allow the architecture |
| 64 | code to set default SMP affinity. */ | 64 | code to set default SMP affinity. */ |
| 65 | return irq_select_affinity(irq) ? -EINVAL : count; | 65 | return irq_select_affinity_usr(irq) ? -EINVAL : count; |
| 66 | 66 | ||
| 67 | irq_set_affinity(irq, new_value); | 67 | irq_set_affinity(irq, new_value); |
| 68 | 68 | ||
