aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-12-07 09:49:47 -0500
committerTony Lindgren <tony@atomide.com>2013-02-01 17:33:56 -0500
commitaecb9e1422e904d1950620d90c589a141cb32196 (patch)
tree3b300a0cde3c8002fdedebfe0bc4f9bbec422068 /arch
parent88b62b915b0b7e25870eb0604ed9a92ba4bfc9f7 (diff)
ARM: OMAP: make wakeupgen_lock raw
When applying RT patch on top of Linux, spinlocks are implemented as RT-mutexes, which means they are preemptable. Current GIC implementation on OMAP is using a spinlock to protect against preemption. As it turns out, we need to convert that lock into a raw_spinlock so that OMAP's interrupt controller works as expected after RT-patch is applied. This patch is simply to decrease the amount of changes RT-team needs to carry out of tree. It doesn't cause any changes in behavior. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Felipe Balbi <balbi@ti.com> Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Signed-off-by: Tony Lindgren <tony@atomide.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 5d3b4f4f81ae..8633a43acae2 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -46,7 +46,7 @@
46 46
47static void __iomem *wakeupgen_base; 47static void __iomem *wakeupgen_base;
48static void __iomem *sar_base; 48static void __iomem *sar_base;
49static DEFINE_SPINLOCK(wakeupgen_lock); 49static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
50static unsigned int irq_target_cpu[MAX_IRQS]; 50static unsigned int irq_target_cpu[MAX_IRQS];
51static unsigned int irq_banks = MAX_NR_REG_BANKS; 51static unsigned int irq_banks = MAX_NR_REG_BANKS;
52static unsigned int max_irqs = MAX_IRQS; 52static unsigned int max_irqs = MAX_IRQS;
@@ -134,9 +134,9 @@ static void wakeupgen_mask(struct irq_data *d)
134{ 134{
135 unsigned long flags; 135 unsigned long flags;
136 136
137 spin_lock_irqsave(&wakeupgen_lock, flags); 137 raw_spin_lock_irqsave(&wakeupgen_lock, flags);
138 _wakeupgen_clear(d->irq, irq_target_cpu[d->irq]); 138 _wakeupgen_clear(d->irq, irq_target_cpu[d->irq]);
139 spin_unlock_irqrestore(&wakeupgen_lock, flags); 139 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
140} 140}
141 141
142/* 142/*
@@ -146,9 +146,9 @@ static void wakeupgen_unmask(struct irq_data *d)
146{ 146{
147 unsigned long flags; 147 unsigned long flags;
148 148
149 spin_lock_irqsave(&wakeupgen_lock, flags); 149 raw_spin_lock_irqsave(&wakeupgen_lock, flags);
150 _wakeupgen_set(d->irq, irq_target_cpu[d->irq]); 150 _wakeupgen_set(d->irq, irq_target_cpu[d->irq]);
151 spin_unlock_irqrestore(&wakeupgen_lock, flags); 151 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
152} 152}
153 153
154#ifdef CONFIG_HOTPLUG_CPU 154#ifdef CONFIG_HOTPLUG_CPU
@@ -189,7 +189,7 @@ static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
189{ 189{
190 unsigned long flags; 190 unsigned long flags;
191 191
192 spin_lock_irqsave(&wakeupgen_lock, flags); 192 raw_spin_lock_irqsave(&wakeupgen_lock, flags);
193 if (set) { 193 if (set) {
194 _wakeupgen_save_masks(cpu); 194 _wakeupgen_save_masks(cpu);
195 _wakeupgen_set_all(cpu, WKG_MASK_ALL); 195 _wakeupgen_set_all(cpu, WKG_MASK_ALL);
@@ -197,7 +197,7 @@ static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
197 _wakeupgen_set_all(cpu, WKG_UNMASK_ALL); 197 _wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
198 _wakeupgen_restore_masks(cpu); 198 _wakeupgen_restore_masks(cpu);
199 } 199 }
200 spin_unlock_irqrestore(&wakeupgen_lock, flags); 200 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
201} 201}
202#endif 202#endif
203 203