diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-02-07 10:46:58 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-02-19 06:58:07 -0500 |
commit | 1fa46f1f070961783661ae640cd2f6b2557f3885 (patch) | |
tree | cef8928b1a89edc251f8b3f9d9a67bfaa10248af /kernel/irq/manage.c | |
parent | a0cd9ca2b907d7ee26575e7b63ac92dad768a75e (diff) |
genirq: Simplify affinity related code
There is lot of #ifdef CONFIG_GENERIC_PENDING_IRQ along with
duplicated code in the irq core. Move the #ifdeffery into one place
and cleanup the code so it's readable. No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 64 |
1 files changed, 41 insertions, 23 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index b1b4da9446e6..99f3e9a3780c 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -100,47 +100,70 @@ void irq_set_thread_affinity(struct irq_desc *desc) | |||
100 | } | 100 | } |
101 | } | 101 | } |
102 | 102 | ||
103 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
104 | static inline bool irq_can_move_pcntxt(struct irq_desc *desc) | ||
105 | { | ||
106 | return desc->status & IRQ_MOVE_PCNTXT; | ||
107 | } | ||
108 | static inline bool irq_move_pending(struct irq_desc *desc) | ||
109 | { | ||
110 | return desc->status & IRQ_MOVE_PENDING; | ||
111 | } | ||
112 | static inline void | ||
113 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) | ||
114 | { | ||
115 | cpumask_copy(desc->pending_mask, mask); | ||
116 | } | ||
117 | static inline void | ||
118 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) | ||
119 | { | ||
120 | cpumask_copy(mask, desc->pending_mask); | ||
121 | } | ||
122 | #else | ||
123 | static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; } | ||
124 | static inline bool irq_move_pending(struct irq_desc *desc) { return false; } | ||
125 | static inline void | ||
126 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } | ||
127 | static inline void | ||
128 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } | ||
129 | #endif | ||
130 | |||
103 | /** | 131 | /** |
104 | * irq_set_affinity - Set the irq affinity of a given irq | 132 | * irq_set_affinity - Set the irq affinity of a given irq |
105 | * @irq: Interrupt to set affinity | 133 | * @irq: Interrupt to set affinity |
106 | * @cpumask: cpumask | 134 | * @cpumask: cpumask |
107 | * | 135 | * |
108 | */ | 136 | */ |
109 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | 137 | int irq_set_affinity(unsigned int irq, const struct cpumask *mask) |
110 | { | 138 | { |
111 | struct irq_desc *desc = irq_to_desc(irq); | 139 | struct irq_desc *desc = irq_to_desc(irq); |
112 | struct irq_chip *chip = desc->irq_data.chip; | 140 | struct irq_chip *chip = desc->irq_data.chip; |
113 | unsigned long flags; | 141 | unsigned long flags; |
142 | int ret = 0; | ||
114 | 143 | ||
115 | if (!chip->irq_set_affinity) | 144 | if (!chip->irq_set_affinity) |
116 | return -EINVAL; | 145 | return -EINVAL; |
117 | 146 | ||
118 | raw_spin_lock_irqsave(&desc->lock, flags); | 147 | raw_spin_lock_irqsave(&desc->lock, flags); |
119 | 148 | ||
120 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 149 | if (irq_can_move_pcntxt(desc)) { |
121 | if (desc->status & IRQ_MOVE_PCNTXT) { | 150 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); |
122 | if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { | 151 | if (!ret) { |
123 | cpumask_copy(desc->irq_data.affinity, cpumask); | 152 | cpumask_copy(desc->irq_data.affinity, mask); |
124 | irq_set_thread_affinity(desc); | 153 | irq_set_thread_affinity(desc); |
125 | } | 154 | } |
126 | } | 155 | } else { |
127 | else { | ||
128 | desc->status |= IRQ_MOVE_PENDING; | 156 | desc->status |= IRQ_MOVE_PENDING; |
129 | cpumask_copy(desc->pending_mask, cpumask); | 157 | irq_copy_pending(desc, mask); |
130 | } | 158 | } |
131 | #else | 159 | |
132 | if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { | ||
133 | cpumask_copy(desc->irq_data.affinity, cpumask); | ||
134 | irq_set_thread_affinity(desc); | ||
135 | } | ||
136 | #endif | ||
137 | if (desc->affinity_notify) { | 160 | if (desc->affinity_notify) { |
138 | kref_get(&desc->affinity_notify->kref); | 161 | kref_get(&desc->affinity_notify->kref); |
139 | schedule_work(&desc->affinity_notify->work); | 162 | schedule_work(&desc->affinity_notify->work); |
140 | } | 163 | } |
141 | desc->status |= IRQ_AFFINITY_SET; | 164 | desc->status |= IRQ_AFFINITY_SET; |
142 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 165 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
143 | return 0; | 166 | return ret; |
144 | } | 167 | } |
145 | 168 | ||
146 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) | 169 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) |
@@ -167,18 +190,13 @@ static void irq_affinity_notify(struct work_struct *work) | |||
167 | cpumask_var_t cpumask; | 190 | cpumask_var_t cpumask; |
168 | unsigned long flags; | 191 | unsigned long flags; |
169 | 192 | ||
170 | if (!desc) | 193 | if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) |
171 | goto out; | ||
172 | |||
173 | if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) | ||
174 | goto out; | 194 | goto out; |
175 | 195 | ||
176 | raw_spin_lock_irqsave(&desc->lock, flags); | 196 | raw_spin_lock_irqsave(&desc->lock, flags); |
177 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 197 | if (irq_move_pending(desc)) |
178 | if (desc->status & IRQ_MOVE_PENDING) | 198 | irq_get_pending(cpumask, desc); |
179 | cpumask_copy(cpumask, desc->pending_mask); | ||
180 | else | 199 | else |
181 | #endif | ||
182 | cpumask_copy(cpumask, desc->irq_data.affinity); | 200 | cpumask_copy(cpumask, desc->irq_data.affinity); |
183 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 201 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
184 | 202 | ||