diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2010-09-29 12:46:55 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-10-12 10:53:44 -0400 |
commit | b7b29338dc7111ed8bd4d6555d84afae13ebe752 (patch) | |
tree | 4c3159ea8bb0489ba463a061d3e6446dbfb45af2 /kernel/irq/irqdesc.c | |
parent | b7d0d8258a9f71949b810e0f82a3d75088f4d364 (diff) |
genirq: Sanitize dynamic irq handling
Use the cleanup functions of the dynamic allocator. No need to have
separate implementations.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/irq/irqdesc.c')
-rw-r--r-- | kernel/irq/irqdesc.c | 41 |
1 files changed, 23 insertions, 18 deletions
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 6c71f8ea5d7d..c9d5a1c12874 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -53,12 +53,21 @@ static void desc_smp_init(struct irq_desc *desc, int node) | |||
53 | { | 53 | { |
54 | desc->irq_data.node = node; | 54 | desc->irq_data.node = node; |
55 | cpumask_copy(desc->irq_data.affinity, irq_default_affinity); | 55 | cpumask_copy(desc->irq_data.affinity, irq_default_affinity); |
56 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
57 | cpumask_clear(desc->pending_mask); | ||
58 | #endif | ||
59 | } | ||
60 | |||
61 | static inline int desc_node(struct irq_desc *desc) | ||
62 | { | ||
63 | return desc->irq_data.node; | ||
56 | } | 64 | } |
57 | 65 | ||
58 | #else | 66 | #else |
59 | static inline int | 67 | static inline int |
60 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } | 68 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } |
61 | static inline void desc_smp_init(struct irq_desc *desc, int node) { } | 69 | static inline void desc_smp_init(struct irq_desc *desc, int node) { } |
70 | static inline int desc_node(struct irq_desc *desc) { return 0; } | ||
62 | #endif | 71 | #endif |
63 | 72 | ||
64 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | 73 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) |
@@ -71,6 +80,8 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | |||
71 | desc->status = IRQ_DEFAULT_INIT_FLAGS; | 80 | desc->status = IRQ_DEFAULT_INIT_FLAGS; |
72 | desc->handle_irq = handle_bad_irq; | 81 | desc->handle_irq = handle_bad_irq; |
73 | desc->depth = 1; | 82 | desc->depth = 1; |
83 | desc->irq_count = 0; | ||
84 | desc->irqs_unhandled = 0; | ||
74 | desc->name = NULL; | 85 | desc->name = NULL; |
75 | memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); | 86 | memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); |
76 | desc_smp_init(desc, node); | 87 | desc_smp_init(desc, node); |
@@ -286,23 +297,9 @@ struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) | |||
286 | return irq_to_desc(irq); | 297 | return irq_to_desc(irq); |
287 | } | 298 | } |
288 | 299 | ||
289 | #ifdef CONFIG_SMP | ||
290 | static inline int desc_node(struct irq_desc *desc) | ||
291 | { | ||
292 | return desc->irq_data.node; | ||
293 | } | ||
294 | #else | ||
295 | static inline int desc_node(struct irq_desc *desc) { return 0; } | ||
296 | #endif | ||
297 | |||
298 | static void free_desc(unsigned int irq) | 300 | static void free_desc(unsigned int irq) |
299 | { | 301 | { |
300 | struct irq_desc *desc = irq_to_desc(irq); | 302 | dynamic_irq_cleanup(irq); |
301 | unsigned long flags; | ||
302 | |||
303 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
304 | desc_set_defaults(irq, desc, desc_node(desc)); | ||
305 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
306 | } | 303 | } |
307 | 304 | ||
308 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) | 305 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) |
@@ -409,10 +406,18 @@ unsigned int irq_get_next_irq(unsigned int offset) | |||
409 | return find_next_bit(allocated_irqs, nr_irqs, offset); | 406 | return find_next_bit(allocated_irqs, nr_irqs, offset); |
410 | } | 407 | } |
411 | 408 | ||
412 | /* Statistics access */ | 409 | /** |
413 | void clear_kstat_irqs(struct irq_desc *desc) | 410 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq |
411 | * @irq: irq number to initialize | ||
412 | */ | ||
413 | void dynamic_irq_cleanup(unsigned int irq) | ||
414 | { | 414 | { |
415 | memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); | 415 | struct irq_desc *desc = irq_to_desc(irq); |
416 | unsigned long flags; | ||
417 | |||
418 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
419 | desc_set_defaults(irq, desc, desc_node(desc)); | ||
420 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
416 | } | 421 | } |
417 | 422 | ||
418 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 423 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |