aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/irqdesc.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2014-12-11 17:01:41 -0500
committerThomas Gleixner <tglx@linutronix.de>2014-12-13 07:33:07 -0500
commitc291ee622165cb2c8d4e7af63fffd499354a23be (patch)
tree40737c5d5d419d35ed386d46b77f979dd5900312 /kernel/irq/irqdesc.c
parent3a5dc1fafb016560315fe45bb4ef8bde259dd1bc (diff)
genirq: Prevent proc race against freeing of irq descriptors
Since the rework of the sparse interrupt code to actually free the unused interrupt descriptors there exists a race between the /proc interfaces to the irq subsystem and the code which frees the interrupt descriptor. CPU0 CPU1 show_interrupts() desc = irq_to_desc(X); free_desc(desc) remove_from_radix_tree(); kfree(desc); raw_spinlock_irq(&desc->lock); /proc/interrupts is the only interface which can actively corrupt kernel memory via the lock access. /proc/stat can only read from freed memory. Extremly hard to trigger, but possible. The interfaces in /proc/irq/N/ are not affected by this because the removal of the proc file is serialized in procfs against concurrent readers/writers. The removal happens before the descriptor is freed. For architectures which have CONFIG_SPARSE_IRQ=n this is a non issue as the descriptor is never freed. It's merely cleared out with the irq descriptor lock held. So any concurrent proc access will either see the old correct value or the cleared out ones. Protect the lookup and access to the irq descriptor in show_interrupts() with the sparse_irq_lock. Provide kstat_irqs_usr() which is protecting the lookup and access with sparse_irq_lock and switch /proc/stat to use it. Document the existing kstat_irqs interfaces so it's clear that the caller needs to take care about protection. The users of these interfaces are either not affected due to SPARSE_IRQ=n or already protected against removal. Fixes: 1f5a5b87f78f "genirq: Implement a sane sparse_irq allocator" Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: stable@vger.kernel.org
Diffstat (limited to 'kernel/irq/irqdesc.c')
-rw-r--r--kernel/irq/irqdesc.c52
1 files changed, 52 insertions, 0 deletions
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index a1782f88f0af..99793b9b6d23 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -132,6 +132,16 @@ static void free_masks(struct irq_desc *desc)
132static inline void free_masks(struct irq_desc *desc) { } 132static inline void free_masks(struct irq_desc *desc) { }
133#endif 133#endif
134 134
135void irq_lock_sparse(void)
136{
137 mutex_lock(&sparse_irq_lock);
138}
139
140void irq_unlock_sparse(void)
141{
142 mutex_unlock(&sparse_irq_lock);
143}
144
135static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) 145static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
136{ 146{
137 struct irq_desc *desc; 147 struct irq_desc *desc;
@@ -168,6 +178,12 @@ static void free_desc(unsigned int irq)
168 178
169 unregister_irq_proc(irq, desc); 179 unregister_irq_proc(irq, desc);
170 180
181 /*
182 * sparse_irq_lock protects also show_interrupts() and
183 * kstat_irq_usr(). Once we deleted the descriptor from the
184 * sparse tree we can free it. Access in proc will fail to
185 * lookup the descriptor.
186 */
171 mutex_lock(&sparse_irq_lock); 187 mutex_lock(&sparse_irq_lock);
172 delete_irq_desc(irq); 188 delete_irq_desc(irq);
173 mutex_unlock(&sparse_irq_lock); 189 mutex_unlock(&sparse_irq_lock);
@@ -574,6 +590,15 @@ void kstat_incr_irq_this_cpu(unsigned int irq)
574 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); 590 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
575} 591}
576 592
593/**
594 * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
595 * @irq: The interrupt number
596 * @cpu: The cpu number
597 *
598 * Returns the sum of interrupt counts on @cpu since boot for
599 * @irq. The caller must ensure that the interrupt is not removed
600 * concurrently.
601 */
577unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 602unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
578{ 603{
579 struct irq_desc *desc = irq_to_desc(irq); 604 struct irq_desc *desc = irq_to_desc(irq);
@@ -582,6 +607,14 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
582 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; 607 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
583} 608}
584 609
610/**
611 * kstat_irqs - Get the statistics for an interrupt
612 * @irq: The interrupt number
613 *
614 * Returns the sum of interrupt counts on all cpus since boot for
615 * @irq. The caller must ensure that the interrupt is not removed
616 * concurrently.
617 */
585unsigned int kstat_irqs(unsigned int irq) 618unsigned int kstat_irqs(unsigned int irq)
586{ 619{
587 struct irq_desc *desc = irq_to_desc(irq); 620 struct irq_desc *desc = irq_to_desc(irq);
@@ -594,3 +627,22 @@ unsigned int kstat_irqs(unsigned int irq)
594 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); 627 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
595 return sum; 628 return sum;
596} 629}
630
631/**
632 * kstat_irqs_usr - Get the statistics for an interrupt
633 * @irq: The interrupt number
634 *
635 * Returns the sum of interrupt counts on all cpus since boot for
636 * @irq. Contrary to kstat_irqs() this can be called from any
637 * preemptible context. It's protected against concurrent removal of
638 * an interrupt descriptor when sparse irqs are enabled.
639 */
640unsigned int kstat_irqs_usr(unsigned int irq)
641{
642 int sum;
643
644 irq_lock_sparse();
645 sum = kstat_irqs(irq);
646 irq_unlock_sparse();
647 return sum;
648}