diff options
Diffstat (limited to 'kernel/irq/irqdesc.c')
-rw-r--r-- | kernel/irq/irqdesc.c | 63 |
1 files changed, 47 insertions, 16 deletions
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 8731e1c5d1e7..a623b44f2d4b 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -68,9 +68,13 @@ static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) | |||
68 | return 0; | 68 | return 0; |
69 | } | 69 | } |
70 | 70 | ||
71 | static void desc_smp_init(struct irq_desc *desc, int node) | 71 | static void desc_smp_init(struct irq_desc *desc, int node, |
72 | const struct cpumask *affinity) | ||
72 | { | 73 | { |
73 | cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity); | 74 | if (!affinity) |
75 | affinity = irq_default_affinity; | ||
76 | cpumask_copy(desc->irq_common_data.affinity, affinity); | ||
77 | |||
74 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 78 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
75 | cpumask_clear(desc->pending_mask); | 79 | cpumask_clear(desc->pending_mask); |
76 | #endif | 80 | #endif |
@@ -82,11 +86,12 @@ static void desc_smp_init(struct irq_desc *desc, int node) | |||
82 | #else | 86 | #else |
83 | static inline int | 87 | static inline int |
84 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } | 88 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } |
85 | static inline void desc_smp_init(struct irq_desc *desc, int node) { } | 89 | static inline void |
90 | desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } | ||
86 | #endif | 91 | #endif |
87 | 92 | ||
88 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, | 93 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, |
89 | struct module *owner) | 94 | const struct cpumask *affinity, struct module *owner) |
90 | { | 95 | { |
91 | int cpu; | 96 | int cpu; |
92 | 97 | ||
@@ -107,7 +112,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, | |||
107 | desc->owner = owner; | 112 | desc->owner = owner; |
108 | for_each_possible_cpu(cpu) | 113 | for_each_possible_cpu(cpu) |
109 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; | 114 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; |
110 | desc_smp_init(desc, node); | 115 | desc_smp_init(desc, node, affinity); |
111 | } | 116 | } |
112 | 117 | ||
113 | int nr_irqs = NR_IRQS; | 118 | int nr_irqs = NR_IRQS; |
@@ -158,7 +163,9 @@ void irq_unlock_sparse(void) | |||
158 | mutex_unlock(&sparse_irq_lock); | 163 | mutex_unlock(&sparse_irq_lock); |
159 | } | 164 | } |
160 | 165 | ||
161 | static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) | 166 | static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, |
167 | const struct cpumask *affinity, | ||
168 | struct module *owner) | ||
162 | { | 169 | { |
163 | struct irq_desc *desc; | 170 | struct irq_desc *desc; |
164 | gfp_t gfp = GFP_KERNEL; | 171 | gfp_t gfp = GFP_KERNEL; |
@@ -178,7 +185,8 @@ static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) | |||
178 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 185 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
179 | init_rcu_head(&desc->rcu); | 186 | init_rcu_head(&desc->rcu); |
180 | 187 | ||
181 | desc_set_defaults(irq, desc, node, owner); | 188 | desc_set_defaults(irq, desc, node, affinity, owner); |
189 | irqd_set(&desc->irq_data, flags); | ||
182 | 190 | ||
183 | return desc; | 191 | return desc; |
184 | 192 | ||
@@ -223,13 +231,32 @@ static void free_desc(unsigned int irq) | |||
223 | } | 231 | } |
224 | 232 | ||
225 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, | 233 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, |
226 | struct module *owner) | 234 | const struct cpumask *affinity, struct module *owner) |
227 | { | 235 | { |
236 | const struct cpumask *mask = NULL; | ||
228 | struct irq_desc *desc; | 237 | struct irq_desc *desc; |
229 | int i; | 238 | unsigned int flags; |
239 | int i, cpu = -1; | ||
240 | |||
241 | if (affinity && cpumask_empty(affinity)) | ||
242 | return -EINVAL; | ||
243 | |||
244 | flags = affinity ? IRQD_AFFINITY_MANAGED : 0; | ||
230 | 245 | ||
231 | for (i = 0; i < cnt; i++) { | 246 | for (i = 0; i < cnt; i++) { |
232 | desc = alloc_desc(start + i, node, owner); | 247 | if (affinity) { |
248 | cpu = cpumask_next(cpu, affinity); | ||
249 | if (cpu >= nr_cpu_ids) | ||
250 | cpu = cpumask_first(affinity); | ||
251 | node = cpu_to_node(cpu); | ||
252 | |||
253 | /* | ||
254 | * For single allocations we use the caller provided | ||
255 | * mask otherwise we use the mask of the target cpu | ||
256 | */ | ||
257 | mask = cnt == 1 ? affinity : cpumask_of(cpu); | ||
258 | } | ||
259 | desc = alloc_desc(start + i, node, flags, mask, owner); | ||
233 | if (!desc) | 260 | if (!desc) |
234 | goto err; | 261 | goto err; |
235 | mutex_lock(&sparse_irq_lock); | 262 | mutex_lock(&sparse_irq_lock); |
@@ -277,7 +304,7 @@ int __init early_irq_init(void) | |||
277 | nr_irqs = initcnt; | 304 | nr_irqs = initcnt; |
278 | 305 | ||
279 | for (i = 0; i < initcnt; i++) { | 306 | for (i = 0; i < initcnt; i++) { |
280 | desc = alloc_desc(i, node, NULL); | 307 | desc = alloc_desc(i, node, 0, NULL, NULL); |
281 | set_bit(i, allocated_irqs); | 308 | set_bit(i, allocated_irqs); |
282 | irq_insert_desc(i, desc); | 309 | irq_insert_desc(i, desc); |
283 | } | 310 | } |
@@ -311,7 +338,7 @@ int __init early_irq_init(void) | |||
311 | alloc_masks(&desc[i], GFP_KERNEL, node); | 338 | alloc_masks(&desc[i], GFP_KERNEL, node); |
312 | raw_spin_lock_init(&desc[i].lock); | 339 | raw_spin_lock_init(&desc[i].lock); |
313 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 340 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
314 | desc_set_defaults(i, &desc[i], node, NULL); | 341 | desc_set_defaults(i, &desc[i], node, NULL, NULL); |
315 | } | 342 | } |
316 | return arch_early_irq_init(); | 343 | return arch_early_irq_init(); |
317 | } | 344 | } |
@@ -328,11 +355,12 @@ static void free_desc(unsigned int irq) | |||
328 | unsigned long flags; | 355 | unsigned long flags; |
329 | 356 | ||
330 | raw_spin_lock_irqsave(&desc->lock, flags); | 357 | raw_spin_lock_irqsave(&desc->lock, flags); |
331 | desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL); | 358 | desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); |
332 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 359 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
333 | } | 360 | } |
334 | 361 | ||
335 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, | 362 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, |
363 | const struct cpumask *affinity, | ||
336 | struct module *owner) | 364 | struct module *owner) |
337 | { | 365 | { |
338 | u32 i; | 366 | u32 i; |
@@ -453,12 +481,15 @@ EXPORT_SYMBOL_GPL(irq_free_descs); | |||
453 | * @cnt: Number of consecutive irqs to allocate. | 481 | * @cnt: Number of consecutive irqs to allocate. |
454 | * @node: Preferred node on which the irq descriptor should be allocated | 482 | * @node: Preferred node on which the irq descriptor should be allocated |
455 | * @owner: Owning module (can be NULL) | 483 | * @owner: Owning module (can be NULL) |
484 | * @affinity: Optional pointer to an affinity mask which hints where the | ||
485 | * irq descriptors should be allocated and which default | ||
486 | * affinities to use | ||
456 | * | 487 | * |
457 | * Returns the first irq number or error code | 488 | * Returns the first irq number or error code |
458 | */ | 489 | */ |
459 | int __ref | 490 | int __ref |
460 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | 491 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
461 | struct module *owner) | 492 | struct module *owner, const struct cpumask *affinity) |
462 | { | 493 | { |
463 | int start, ret; | 494 | int start, ret; |
464 | 495 | ||
@@ -494,7 +525,7 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | |||
494 | 525 | ||
495 | bitmap_set(allocated_irqs, start, cnt); | 526 | bitmap_set(allocated_irqs, start, cnt); |
496 | mutex_unlock(&sparse_irq_lock); | 527 | mutex_unlock(&sparse_irq_lock); |
497 | return alloc_descs(start, cnt, node, owner); | 528 | return alloc_descs(start, cnt, node, affinity, owner); |
498 | 529 | ||
499 | err: | 530 | err: |
500 | mutex_unlock(&sparse_irq_lock); | 531 | mutex_unlock(&sparse_irq_lock); |
@@ -512,7 +543,7 @@ EXPORT_SYMBOL_GPL(__irq_alloc_descs); | |||
512 | */ | 543 | */ |
513 | unsigned int irq_alloc_hwirqs(int cnt, int node) | 544 | unsigned int irq_alloc_hwirqs(int cnt, int node) |
514 | { | 545 | { |
515 | int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL); | 546 | int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL); |
516 | 547 | ||
517 | if (irq < 0) | 548 | if (irq < 0) |
518 | return 0; | 549 | return 0; |