diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2009-07-03 09:44:46 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2011-09-13 05:12:14 -0400 |
| commit | bd31b85960a7fcb2d7ede216460b8da71a88411c (patch) | |
| tree | f2ab1a1105705856c5cdfc71bcf3f7b5f897d30d | |
| parent | a1741e7fcbc19a67520115df480ab17012cc3d0b (diff) | |
locking, ARM: Annotate low level hw locks as raw
Annotate the low level hardware locks which must not be preempted.
In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | arch/arm/common/gic.c | 26 | ||||
| -rw-r--r-- | arch/arm/include/asm/dma.h | 6 | ||||
| -rw-r--r-- | arch/arm/include/asm/mmu.h | 4 | ||||
| -rw-r--r-- | arch/arm/kernel/dma.c | 2 | ||||
| -rw-r--r-- | arch/arm/kernel/smp.c | 6 | ||||
| -rw-r--r-- | arch/arm/kernel/traps.c | 20 | ||||
| -rw-r--r-- | arch/arm/mach-footbridge/include/mach/hardware.h | 2 | ||||
| -rw-r--r-- | arch/arm/mach-footbridge/netwinder-hw.c | 14 | ||||
| -rw-r--r-- | arch/arm/mach-footbridge/netwinder-leds.c | 10 | ||||
| -rw-r--r-- | arch/arm/mach-integrator/core.c | 6 | ||||
| -rw-r--r-- | arch/arm/mach-integrator/pci_v3.c | 14 | ||||
| -rw-r--r-- | arch/arm/mach-ixp4xx/common-pci.c | 22 | ||||
| -rw-r--r-- | arch/arm/mach-shark/leds.c | 6 | ||||
| -rw-r--r-- | arch/arm/mm/cache-l2x0.c | 46 | ||||
| -rw-r--r-- | arch/arm/mm/context.c | 14 | ||||
| -rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 6 | ||||
| -rw-r--r-- | arch/arm/mm/copypage-v6.c | 10 | ||||
| -rw-r--r-- | arch/arm/mm/copypage-xscale.c | 6 |
18 files changed, 110 insertions, 110 deletions
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 3227ca952a12..6fba8bd5689e 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
| @@ -33,7 +33,7 @@ | |||
| 33 | #include <asm/mach/irq.h> | 33 | #include <asm/mach/irq.h> |
| 34 | #include <asm/hardware/gic.h> | 34 | #include <asm/hardware/gic.h> |
| 35 | 35 | ||
| 36 | static DEFINE_SPINLOCK(irq_controller_lock); | 36 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); |
| 37 | 37 | ||
| 38 | /* Address of GIC 0 CPU interface */ | 38 | /* Address of GIC 0 CPU interface */ |
| 39 | void __iomem *gic_cpu_base_addr __read_mostly; | 39 | void __iomem *gic_cpu_base_addr __read_mostly; |
| @@ -82,30 +82,30 @@ static void gic_mask_irq(struct irq_data *d) | |||
| 82 | { | 82 | { |
| 83 | u32 mask = 1 << (d->irq % 32); | 83 | u32 mask = 1 << (d->irq % 32); |
| 84 | 84 | ||
| 85 | spin_lock(&irq_controller_lock); | 85 | raw_spin_lock(&irq_controller_lock); |
| 86 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); | 86 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); |
| 87 | if (gic_arch_extn.irq_mask) | 87 | if (gic_arch_extn.irq_mask) |
| 88 | gic_arch_extn.irq_mask(d); | 88 | gic_arch_extn.irq_mask(d); |
| 89 | spin_unlock(&irq_controller_lock); | 89 | raw_spin_unlock(&irq_controller_lock); |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | static void gic_unmask_irq(struct irq_data *d) | 92 | static void gic_unmask_irq(struct irq_data *d) |
| 93 | { | 93 | { |
| 94 | u32 mask = 1 << (d->irq % 32); | 94 | u32 mask = 1 << (d->irq % 32); |
| 95 | 95 | ||
| 96 | spin_lock(&irq_controller_lock); | 96 | raw_spin_lock(&irq_controller_lock); |
| 97 | if (gic_arch_extn.irq_unmask) | 97 | if (gic_arch_extn.irq_unmask) |
| 98 | gic_arch_extn.irq_unmask(d); | 98 | gic_arch_extn.irq_unmask(d); |
| 99 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); | 99 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); |
| 100 | spin_unlock(&irq_controller_lock); | 100 | raw_spin_unlock(&irq_controller_lock); |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | static void gic_eoi_irq(struct irq_data *d) | 103 | static void gic_eoi_irq(struct irq_data *d) |
| 104 | { | 104 | { |
| 105 | if (gic_arch_extn.irq_eoi) { | 105 | if (gic_arch_extn.irq_eoi) { |
| 106 | spin_lock(&irq_controller_lock); | 106 | raw_spin_lock(&irq_controller_lock); |
| 107 | gic_arch_extn.irq_eoi(d); | 107 | gic_arch_extn.irq_eoi(d); |
| 108 | spin_unlock(&irq_controller_lock); | 108 | raw_spin_unlock(&irq_controller_lock); |
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); | 111 | writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); |
| @@ -129,7 +129,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |||
| 129 | if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) | 129 | if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) |
| 130 | return -EINVAL; | 130 | return -EINVAL; |
| 131 | 131 | ||
| 132 | spin_lock(&irq_controller_lock); | 132 | raw_spin_lock(&irq_controller_lock); |
| 133 | 133 | ||
| 134 | if (gic_arch_extn.irq_set_type) | 134 | if (gic_arch_extn.irq_set_type) |
| 135 | gic_arch_extn.irq_set_type(d, type); | 135 | gic_arch_extn.irq_set_type(d, type); |
| @@ -154,7 +154,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |||
| 154 | if (enabled) | 154 | if (enabled) |
| 155 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); | 155 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); |
| 156 | 156 | ||
| 157 | spin_unlock(&irq_controller_lock); | 157 | raw_spin_unlock(&irq_controller_lock); |
| 158 | 158 | ||
| 159 | return 0; | 159 | return 0; |
| 160 | } | 160 | } |
| @@ -182,10 +182,10 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
| 182 | mask = 0xff << shift; | 182 | mask = 0xff << shift; |
| 183 | bit = 1 << (cpu + shift); | 183 | bit = 1 << (cpu + shift); |
| 184 | 184 | ||
| 185 | spin_lock(&irq_controller_lock); | 185 | raw_spin_lock(&irq_controller_lock); |
| 186 | val = readl_relaxed(reg) & ~mask; | 186 | val = readl_relaxed(reg) & ~mask; |
| 187 | writel_relaxed(val | bit, reg); | 187 | writel_relaxed(val | bit, reg); |
| 188 | spin_unlock(&irq_controller_lock); | 188 | raw_spin_unlock(&irq_controller_lock); |
| 189 | 189 | ||
| 190 | return IRQ_SET_MASK_OK; | 190 | return IRQ_SET_MASK_OK; |
| 191 | } | 191 | } |
| @@ -215,9 +215,9 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) | |||
| 215 | 215 | ||
| 216 | chained_irq_enter(chip, desc); | 216 | chained_irq_enter(chip, desc); |
| 217 | 217 | ||
| 218 | spin_lock(&irq_controller_lock); | 218 | raw_spin_lock(&irq_controller_lock); |
| 219 | status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK); | 219 | status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK); |
| 220 | spin_unlock(&irq_controller_lock); | 220 | raw_spin_unlock(&irq_controller_lock); |
| 221 | 221 | ||
| 222 | gic_irq = (status & 0x3ff); | 222 | gic_irq = (status & 0x3ff); |
| 223 | if (gic_irq == 1023) | 223 | if (gic_irq == 1023) |
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 628670e9d7c9..69a5b0b6455c 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h | |||
| @@ -34,18 +34,18 @@ | |||
| 34 | #define DMA_MODE_CASCADE 0xc0 | 34 | #define DMA_MODE_CASCADE 0xc0 |
| 35 | #define DMA_AUTOINIT 0x10 | 35 | #define DMA_AUTOINIT 0x10 |
| 36 | 36 | ||
| 37 | extern spinlock_t dma_spin_lock; | 37 | extern raw_spinlock_t dma_spin_lock; |
| 38 | 38 | ||
| 39 | static inline unsigned long claim_dma_lock(void) | 39 | static inline unsigned long claim_dma_lock(void) |
| 40 | { | 40 | { |
| 41 | unsigned long flags; | 41 | unsigned long flags; |
| 42 | spin_lock_irqsave(&dma_spin_lock, flags); | 42 | raw_spin_lock_irqsave(&dma_spin_lock, flags); |
| 43 | return flags; | 43 | return flags; |
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | static inline void release_dma_lock(unsigned long flags) | 46 | static inline void release_dma_lock(unsigned long flags) |
| 47 | { | 47 | { |
| 48 | spin_unlock_irqrestore(&dma_spin_lock, flags); | 48 | raw_spin_unlock_irqrestore(&dma_spin_lock, flags); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | /* Clear the 'DMA Pointer Flip Flop'. | 51 | /* Clear the 'DMA Pointer Flip Flop'. |
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index b4ffe9d5b526..14965658a923 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | typedef struct { | 6 | typedef struct { |
| 7 | #ifdef CONFIG_CPU_HAS_ASID | 7 | #ifdef CONFIG_CPU_HAS_ASID |
| 8 | unsigned int id; | 8 | unsigned int id; |
| 9 | spinlock_t id_lock; | 9 | raw_spinlock_t id_lock; |
| 10 | #endif | 10 | #endif |
| 11 | unsigned int kvm_seq; | 11 | unsigned int kvm_seq; |
| 12 | } mm_context_t; | 12 | } mm_context_t; |
| @@ -16,7 +16,7 @@ typedef struct { | |||
| 16 | 16 | ||
| 17 | /* init_mm.context.id_lock should be initialized. */ | 17 | /* init_mm.context.id_lock should be initialized. */ |
| 18 | #define INIT_MM_CONTEXT(name) \ | 18 | #define INIT_MM_CONTEXT(name) \ |
| 19 | .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock), | 19 | .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock), |
| 20 | #else | 20 | #else |
| 21 | #define ASID(mm) (0) | 21 | #define ASID(mm) (0) |
| 22 | #endif | 22 | #endif |
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c index 2c4a185f92cd..7b829d9663b1 100644 --- a/arch/arm/kernel/dma.c +++ b/arch/arm/kernel/dma.c | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | 23 | ||
| 24 | #include <asm/mach/dma.h> | 24 | #include <asm/mach/dma.h> |
| 25 | 25 | ||
| 26 | DEFINE_SPINLOCK(dma_spin_lock); | 26 | DEFINE_RAW_SPINLOCK(dma_spin_lock); |
| 27 | EXPORT_SYMBOL(dma_spin_lock); | 27 | EXPORT_SYMBOL(dma_spin_lock); |
| 28 | 28 | ||
| 29 | static dma_t *dma_chan[MAX_DMA_CHANNELS]; | 29 | static dma_t *dma_chan[MAX_DMA_CHANNELS]; |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index d88ff0230e82..4e76e0cf09fb 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
| @@ -538,7 +538,7 @@ static void percpu_timer_stop(void) | |||
| 538 | } | 538 | } |
| 539 | #endif | 539 | #endif |
| 540 | 540 | ||
| 541 | static DEFINE_SPINLOCK(stop_lock); | 541 | static DEFINE_RAW_SPINLOCK(stop_lock); |
| 542 | 542 | ||
| 543 | /* | 543 | /* |
| 544 | * ipi_cpu_stop - handle IPI from smp_send_stop() | 544 | * ipi_cpu_stop - handle IPI from smp_send_stop() |
| @@ -547,10 +547,10 @@ static void ipi_cpu_stop(unsigned int cpu) | |||
| 547 | { | 547 | { |
| 548 | if (system_state == SYSTEM_BOOTING || | 548 | if (system_state == SYSTEM_BOOTING || |
| 549 | system_state == SYSTEM_RUNNING) { | 549 | system_state == SYSTEM_RUNNING) { |
| 550 | spin_lock(&stop_lock); | 550 | raw_spin_lock(&stop_lock); |
| 551 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); | 551 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); |
| 552 | dump_stack(); | 552 | dump_stack(); |
| 553 | spin_unlock(&stop_lock); | 553 | raw_spin_unlock(&stop_lock); |
| 554 | } | 554 | } |
| 555 | 555 | ||
| 556 | set_cpu_online(cpu, false); | 556 | set_cpu_online(cpu, false); |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index bc9f9da782cb..81cc05a0274c 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
| @@ -255,7 +255,7 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt | |||
| 255 | return ret; | 255 | return ret; |
| 256 | } | 256 | } |
| 257 | 257 | ||
| 258 | static DEFINE_SPINLOCK(die_lock); | 258 | static DEFINE_RAW_SPINLOCK(die_lock); |
| 259 | 259 | ||
| 260 | /* | 260 | /* |
| 261 | * This function is protected against re-entrancy. | 261 | * This function is protected against re-entrancy. |
| @@ -267,7 +267,7 @@ void die(const char *str, struct pt_regs *regs, int err) | |||
| 267 | 267 | ||
| 268 | oops_enter(); | 268 | oops_enter(); |
| 269 | 269 | ||
| 270 | spin_lock_irq(&die_lock); | 270 | raw_spin_lock_irq(&die_lock); |
| 271 | console_verbose(); | 271 | console_verbose(); |
| 272 | bust_spinlocks(1); | 272 | bust_spinlocks(1); |
| 273 | ret = __die(str, err, thread, regs); | 273 | ret = __die(str, err, thread, regs); |
| @@ -277,7 +277,7 @@ void die(const char *str, struct pt_regs *regs, int err) | |||
| 277 | 277 | ||
| 278 | bust_spinlocks(0); | 278 | bust_spinlocks(0); |
| 279 | add_taint(TAINT_DIE); | 279 | add_taint(TAINT_DIE); |
| 280 | spin_unlock_irq(&die_lock); | 280 | raw_spin_unlock_irq(&die_lock); |
| 281 | oops_exit(); | 281 | oops_exit(); |
| 282 | 282 | ||
| 283 | if (in_interrupt()) | 283 | if (in_interrupt()) |
| @@ -302,24 +302,24 @@ void arm_notify_die(const char *str, struct pt_regs *regs, | |||
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | static LIST_HEAD(undef_hook); | 304 | static LIST_HEAD(undef_hook); |
| 305 | static DEFINE_SPINLOCK(undef_lock); | 305 | static DEFINE_RAW_SPINLOCK(undef_lock); |
| 306 | 306 | ||
| 307 | void register_undef_hook(struct undef_hook *hook) | 307 | void register_undef_hook(struct undef_hook *hook) |
| 308 | { | 308 | { |
| 309 | unsigned long flags; | 309 | unsigned long flags; |
| 310 | 310 | ||
| 311 | spin_lock_irqsave(&undef_lock, flags); | 311 | raw_spin_lock_irqsave(&undef_lock, flags); |
| 312 | list_add(&hook->node, &undef_hook); | 312 | list_add(&hook->node, &undef_hook); |
| 313 | spin_unlock_irqrestore(&undef_lock, flags); | 313 | raw_spin_unlock_irqrestore(&undef_lock, flags); |
| 314 | } | 314 | } |
| 315 | 315 | ||
| 316 | void unregister_undef_hook(struct undef_hook *hook) | 316 | void unregister_undef_hook(struct undef_hook *hook) |
| 317 | { | 317 | { |
| 318 | unsigned long flags; | 318 | unsigned long flags; |
| 319 | 319 | ||
| 320 | spin_lock_irqsave(&undef_lock, flags); | 320 | raw_spin_lock_irqsave(&undef_lock, flags); |
| 321 | list_del(&hook->node); | 321 | list_del(&hook->node); |
| 322 | spin_unlock_irqrestore(&undef_lock, flags); | 322 | raw_spin_unlock_irqrestore(&undef_lock, flags); |
| 323 | } | 323 | } |
| 324 | 324 | ||
| 325 | static int call_undef_hook(struct pt_regs *regs, unsigned int instr) | 325 | static int call_undef_hook(struct pt_regs *regs, unsigned int instr) |
| @@ -328,12 +328,12 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr) | |||
| 328 | unsigned long flags; | 328 | unsigned long flags; |
| 329 | int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL; | 329 | int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL; |
| 330 | 330 | ||
| 331 | spin_lock_irqsave(&undef_lock, flags); | 331 | raw_spin_lock_irqsave(&undef_lock, flags); |
| 332 | list_for_each_entry(hook, &undef_hook, node) | 332 | list_for_each_entry(hook, &undef_hook, node) |
| 333 | if ((instr & hook->instr_mask) == hook->instr_val && | 333 | if ((instr & hook->instr_mask) == hook->instr_val && |
| 334 | (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) | 334 | (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) |
| 335 | fn = hook->fn; | 335 | fn = hook->fn; |
| 336 | spin_unlock_irqrestore(&undef_lock, flags); | 336 | raw_spin_unlock_irqrestore(&undef_lock, flags); |
| 337 | 337 | ||
| 338 | return fn ? fn(regs, instr) : 1; | 338 | return fn ? fn(regs, instr) : 1; |
| 339 | } | 339 | } |
diff --git a/arch/arm/mach-footbridge/include/mach/hardware.h b/arch/arm/mach-footbridge/include/mach/hardware.h index 15d54981674c..e3d6ccac2162 100644 --- a/arch/arm/mach-footbridge/include/mach/hardware.h +++ b/arch/arm/mach-footbridge/include/mach/hardware.h | |||
| @@ -93,7 +93,7 @@ | |||
| 93 | #define CPLD_FLASH_WR_ENABLE 1 | 93 | #define CPLD_FLASH_WR_ENABLE 1 |
| 94 | 94 | ||
| 95 | #ifndef __ASSEMBLY__ | 95 | #ifndef __ASSEMBLY__ |
| 96 | extern spinlock_t nw_gpio_lock; | 96 | extern raw_spinlock_t nw_gpio_lock; |
| 97 | extern void nw_gpio_modify_op(unsigned int mask, unsigned int set); | 97 | extern void nw_gpio_modify_op(unsigned int mask, unsigned int set); |
| 98 | extern void nw_gpio_modify_io(unsigned int mask, unsigned int in); | 98 | extern void nw_gpio_modify_io(unsigned int mask, unsigned int in); |
| 99 | extern unsigned int nw_gpio_read(void); | 99 | extern unsigned int nw_gpio_read(void); |
diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c index 06e514f372d0..5b73190feb3a 100644 --- a/arch/arm/mach-footbridge/netwinder-hw.c +++ b/arch/arm/mach-footbridge/netwinder-hw.c | |||
| @@ -68,7 +68,7 @@ static inline void wb977_ww(int reg, int val) | |||
| 68 | /* | 68 | /* |
| 69 | * This is a lock for accessing ports GP1_IO_BASE and GP2_IO_BASE | 69 | * This is a lock for accessing ports GP1_IO_BASE and GP2_IO_BASE |
| 70 | */ | 70 | */ |
| 71 | DEFINE_SPINLOCK(nw_gpio_lock); | 71 | DEFINE_RAW_SPINLOCK(nw_gpio_lock); |
| 72 | EXPORT_SYMBOL(nw_gpio_lock); | 72 | EXPORT_SYMBOL(nw_gpio_lock); |
| 73 | 73 | ||
| 74 | static unsigned int current_gpio_op; | 74 | static unsigned int current_gpio_op; |
| @@ -327,9 +327,9 @@ static inline void wb977_init_gpio(void) | |||
| 327 | /* | 327 | /* |
| 328 | * Set Group1/Group2 outputs | 328 | * Set Group1/Group2 outputs |
| 329 | */ | 329 | */ |
| 330 | spin_lock_irqsave(&nw_gpio_lock, flags); | 330 | raw_spin_lock_irqsave(&nw_gpio_lock, flags); |
| 331 | nw_gpio_modify_op(-1, GPIO_RED_LED | GPIO_FAN); | 331 | nw_gpio_modify_op(-1, GPIO_RED_LED | GPIO_FAN); |
| 332 | spin_unlock_irqrestore(&nw_gpio_lock, flags); | 332 | raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); |
| 333 | } | 333 | } |
| 334 | 334 | ||
| 335 | /* | 335 | /* |
| @@ -390,9 +390,9 @@ static void __init cpld_init(void) | |||
| 390 | { | 390 | { |
| 391 | unsigned long flags; | 391 | unsigned long flags; |
| 392 | 392 | ||
| 393 | spin_lock_irqsave(&nw_gpio_lock, flags); | 393 | raw_spin_lock_irqsave(&nw_gpio_lock, flags); |
| 394 | nw_cpld_modify(-1, CPLD_UNMUTE | CPLD_7111_DISABLE); | 394 | nw_cpld_modify(-1, CPLD_UNMUTE | CPLD_7111_DISABLE); |
| 395 | spin_unlock_irqrestore(&nw_gpio_lock, flags); | 395 | raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); |
| 396 | } | 396 | } |
| 397 | 397 | ||
| 398 | static unsigned char rwa_unlock[] __initdata = | 398 | static unsigned char rwa_unlock[] __initdata = |
| @@ -616,9 +616,9 @@ static int __init nw_hw_init(void) | |||
| 616 | cpld_init(); | 616 | cpld_init(); |
| 617 | rwa010_init(); | 617 | rwa010_init(); |
| 618 | 618 | ||
| 619 | spin_lock_irqsave(&nw_gpio_lock, flags); | 619 | raw_spin_lock_irqsave(&nw_gpio_lock, flags); |
| 620 | nw_gpio_modify_op(GPIO_RED_LED|GPIO_GREEN_LED, DEFAULT_LEDS); | 620 | nw_gpio_modify_op(GPIO_RED_LED|GPIO_GREEN_LED, DEFAULT_LEDS); |
| 621 | spin_unlock_irqrestore(&nw_gpio_lock, flags); | 621 | raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); |
| 622 | } | 622 | } |
| 623 | return 0; | 623 | return 0; |
| 624 | } | 624 | } |
diff --git a/arch/arm/mach-footbridge/netwinder-leds.c b/arch/arm/mach-footbridge/netwinder-leds.c index 00269fe0be8a..e57102e871fc 100644 --- a/arch/arm/mach-footbridge/netwinder-leds.c +++ b/arch/arm/mach-footbridge/netwinder-leds.c | |||
| @@ -31,13 +31,13 @@ | |||
| 31 | static char led_state; | 31 | static char led_state; |
| 32 | static char hw_led_state; | 32 | static char hw_led_state; |
| 33 | 33 | ||
| 34 | static DEFINE_SPINLOCK(leds_lock); | 34 | static DEFINE_RAW_SPINLOCK(leds_lock); |
| 35 | 35 | ||
| 36 | static void netwinder_leds_event(led_event_t evt) | 36 | static void netwinder_leds_event(led_event_t evt) |
| 37 | { | 37 | { |
| 38 | unsigned long flags; | 38 | unsigned long flags; |
| 39 | 39 | ||
| 40 | spin_lock_irqsave(&leds_lock, flags); | 40 | raw_spin_lock_irqsave(&leds_lock, flags); |
| 41 | 41 | ||
| 42 | switch (evt) { | 42 | switch (evt) { |
| 43 | case led_start: | 43 | case led_start: |
| @@ -117,12 +117,12 @@ static void netwinder_leds_event(led_event_t evt) | |||
| 117 | break; | 117 | break; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | spin_unlock_irqrestore(&leds_lock, flags); | 120 | raw_spin_unlock_irqrestore(&leds_lock, flags); |
| 121 | 121 | ||
| 122 | if (led_state & LED_STATE_ENABLED) { | 122 | if (led_state & LED_STATE_ENABLED) { |
| 123 | spin_lock_irqsave(&nw_gpio_lock, flags); | 123 | raw_spin_lock_irqsave(&nw_gpio_lock, flags); |
| 124 | nw_gpio_modify_op(GPIO_RED_LED | GPIO_GREEN_LED, hw_led_state); | 124 | nw_gpio_modify_op(GPIO_RED_LED | GPIO_GREEN_LED, hw_led_state); |
| 125 | spin_unlock_irqrestore(&nw_gpio_lock, flags); | 125 | raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); |
| 126 | } | 126 | } |
| 127 | } | 127 | } |
| 128 | 128 | ||
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c index 77315b995681..0c20cf638575 100644 --- a/arch/arm/mach-integrator/core.c +++ b/arch/arm/mach-integrator/core.c | |||
| @@ -205,7 +205,7 @@ static struct amba_pl010_data integrator_uart_data = { | |||
| 205 | 205 | ||
| 206 | #define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_CTRL) | 206 | #define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_CTRL) |
| 207 | 207 | ||
| 208 | static DEFINE_SPINLOCK(cm_lock); | 208 | static DEFINE_RAW_SPINLOCK(cm_lock); |
| 209 | 209 | ||
| 210 | /** | 210 | /** |
| 211 | * cm_control - update the CM_CTRL register. | 211 | * cm_control - update the CM_CTRL register. |
| @@ -217,10 +217,10 @@ void cm_control(u32 mask, u32 set) | |||
| 217 | unsigned long flags; | 217 | unsigned long flags; |
| 218 | u32 val; | 218 | u32 val; |
| 219 | 219 | ||
| 220 | spin_lock_irqsave(&cm_lock, flags); | 220 | raw_spin_lock_irqsave(&cm_lock, flags); |
| 221 | val = readl(CM_CTRL) & ~mask; | 221 | val = readl(CM_CTRL) & ~mask; |
| 222 | writel(val | set, CM_CTRL); | 222 | writel(val | set, CM_CTRL); |
| 223 | spin_unlock_irqrestore(&cm_lock, flags); | 223 | raw_spin_unlock_irqrestore(&cm_lock, flags); |
| 224 | } | 224 | } |
| 225 | 225 | ||
| 226 | EXPORT_SYMBOL(cm_control); | 226 | EXPORT_SYMBOL(cm_control); |
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c index dd56bfb351e3..56c30622d239 100644 --- a/arch/arm/mach-integrator/pci_v3.c +++ b/arch/arm/mach-integrator/pci_v3.c | |||
| @@ -164,7 +164,7 @@ | |||
| 164 | * 7:2 register number | 164 | * 7:2 register number |
| 165 | * | 165 | * |
| 166 | */ | 166 | */ |
| 167 | static DEFINE_SPINLOCK(v3_lock); | 167 | static DEFINE_RAW_SPINLOCK(v3_lock); |
| 168 | 168 | ||
| 169 | #define PCI_BUS_NONMEM_START 0x00000000 | 169 | #define PCI_BUS_NONMEM_START 0x00000000 |
| 170 | #define PCI_BUS_NONMEM_SIZE SZ_256M | 170 | #define PCI_BUS_NONMEM_SIZE SZ_256M |
| @@ -285,7 +285,7 @@ static int v3_read_config(struct pci_bus *bus, unsigned int devfn, int where, | |||
| 285 | unsigned long flags; | 285 | unsigned long flags; |
| 286 | u32 v; | 286 | u32 v; |
| 287 | 287 | ||
| 288 | spin_lock_irqsave(&v3_lock, flags); | 288 | raw_spin_lock_irqsave(&v3_lock, flags); |
| 289 | addr = v3_open_config_window(bus, devfn, where); | 289 | addr = v3_open_config_window(bus, devfn, where); |
| 290 | 290 | ||
| 291 | switch (size) { | 291 | switch (size) { |
| @@ -303,7 +303,7 @@ static int v3_read_config(struct pci_bus *bus, unsigned int devfn, int where, | |||
| 303 | } | 303 | } |
| 304 | 304 | ||
| 305 | v3_close_config_window(); | 305 | v3_close_config_window(); |
| 306 | spin_unlock_irqrestore(&v3_lock, flags); | 306 | raw_spin_unlock_irqrestore(&v3_lock, flags); |
| 307 | 307 | ||
| 308 | *val = v; | 308 | *val = v; |
| 309 | return PCIBIOS_SUCCESSFUL; | 309 | return PCIBIOS_SUCCESSFUL; |
| @@ -315,7 +315,7 @@ static int v3_write_config(struct pci_bus *bus, unsigned int devfn, int where, | |||
| 315 | unsigned long addr; | 315 | unsigned long addr; |
| 316 | unsigned long flags; | 316 | unsigned long flags; |
| 317 | 317 | ||
| 318 | spin_lock_irqsave(&v3_lock, flags); | 318 | raw_spin_lock_irqsave(&v3_lock, flags); |
| 319 | addr = v3_open_config_window(bus, devfn, where); | 319 | addr = v3_open_config_window(bus, devfn, where); |
| 320 | 320 | ||
| 321 | switch (size) { | 321 | switch (size) { |
| @@ -336,7 +336,7 @@ static int v3_write_config(struct pci_bus *bus, unsigned int devfn, int where, | |||
| 336 | } | 336 | } |
| 337 | 337 | ||
| 338 | v3_close_config_window(); | 338 | v3_close_config_window(); |
| 339 | spin_unlock_irqrestore(&v3_lock, flags); | 339 | raw_spin_unlock_irqrestore(&v3_lock, flags); |
| 340 | 340 | ||
| 341 | return PCIBIOS_SUCCESSFUL; | 341 | return PCIBIOS_SUCCESSFUL; |
| 342 | } | 342 | } |
| @@ -515,7 +515,7 @@ void __init pci_v3_preinit(void) | |||
| 515 | hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); | 515 | hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); |
| 516 | hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); | 516 | hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); |
| 517 | 517 | ||
| 518 | spin_lock_irqsave(&v3_lock, flags); | 518 | raw_spin_lock_irqsave(&v3_lock, flags); |
| 519 | 519 | ||
| 520 | /* | 520 | /* |
| 521 | * Unlock V3 registers, but only if they were previously locked. | 521 | * Unlock V3 registers, but only if they were previously locked. |
| @@ -588,7 +588,7 @@ void __init pci_v3_preinit(void) | |||
| 588 | printk(KERN_ERR "PCI: unable to grab PCI error " | 588 | printk(KERN_ERR "PCI: unable to grab PCI error " |
| 589 | "interrupt: %d\n", ret); | 589 | "interrupt: %d\n", ret); |
| 590 | 590 | ||
| 591 | spin_unlock_irqrestore(&v3_lock, flags); | 591 | raw_spin_unlock_irqrestore(&v3_lock, flags); |
| 592 | } | 592 | } |
| 593 | 593 | ||
| 594 | void __init pci_v3_postinit(void) | 594 | void __init pci_v3_postinit(void) |
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index 2131832ee6ba..1b5fe8705757 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c | |||
| @@ -54,7 +54,7 @@ unsigned long ixp4xx_pci_reg_base = 0; | |||
| 54 | * these transactions are atomic or we will end up | 54 | * these transactions are atomic or we will end up |
| 55 | * with corrupt data on the bus or in a driver. | 55 | * with corrupt data on the bus or in a driver. |
| 56 | */ | 56 | */ |
| 57 | static DEFINE_SPINLOCK(ixp4xx_pci_lock); | 57 | static DEFINE_RAW_SPINLOCK(ixp4xx_pci_lock); |
| 58 | 58 | ||
| 59 | /* | 59 | /* |
| 60 | * Read from PCI config space | 60 | * Read from PCI config space |
| @@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(ixp4xx_pci_lock); | |||
| 62 | static void crp_read(u32 ad_cbe, u32 *data) | 62 | static void crp_read(u32 ad_cbe, u32 *data) |
| 63 | { | 63 | { |
| 64 | unsigned long flags; | 64 | unsigned long flags; |
| 65 | spin_lock_irqsave(&ixp4xx_pci_lock, flags); | 65 | raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags); |
| 66 | *PCI_CRP_AD_CBE = ad_cbe; | 66 | *PCI_CRP_AD_CBE = ad_cbe; |
| 67 | *data = *PCI_CRP_RDATA; | 67 | *data = *PCI_CRP_RDATA; |
| 68 | spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); | 68 | raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | /* | 71 | /* |
| @@ -74,10 +74,10 @@ static void crp_read(u32 ad_cbe, u32 *data) | |||
| 74 | static void crp_write(u32 ad_cbe, u32 data) | 74 | static void crp_write(u32 ad_cbe, u32 data) |
| 75 | { | 75 | { |
| 76 | unsigned long flags; | 76 | unsigned long flags; |
| 77 | spin_lock_irqsave(&ixp4xx_pci_lock, flags); | 77 | raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags); |
| 78 | *PCI_CRP_AD_CBE = CRP_AD_CBE_WRITE | ad_cbe; | 78 | *PCI_CRP_AD_CBE = CRP_AD_CBE_WRITE | ad_cbe; |
| 79 | *PCI_CRP_WDATA = data; | 79 | *PCI_CRP_WDATA = data; |
| 80 | spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); | 80 | raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | static inline int check_master_abort(void) | 83 | static inline int check_master_abort(void) |
| @@ -101,7 +101,7 @@ int ixp4xx_pci_read_errata(u32 addr, u32 cmd, u32* data) | |||
| 101 | int retval = 0; | 101 | int retval = 0; |
| 102 | int i; | 102 | int i; |
| 103 | 103 | ||
| 104 | spin_lock_irqsave(&ixp4xx_pci_lock, flags); | 104 | raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags); |
| 105 | 105 | ||
| 106 | *PCI_NP_AD = addr; | 106 | *PCI_NP_AD = addr; |
| 107 | 107 | ||
| @@ -118,7 +118,7 @@ int ixp4xx_pci_read_errata(u32 addr, u32 cmd, u32* data) | |||
| 118 | if(check_master_abort()) | 118 | if(check_master_abort()) |
| 119 | retval = 1; | 119 | retval = 1; |
| 120 | 120 | ||
| 121 | spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); | 121 | raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); |
| 122 | return retval; | 122 | return retval; |
| 123 | } | 123 | } |
| 124 | 124 | ||
| @@ -127,7 +127,7 @@ int ixp4xx_pci_read_no_errata(u32 addr, u32 cmd, u32* data) | |||
| 127 | unsigned long flags; | 127 | unsigned long flags; |
| 128 | int retval = 0; | 128 | int retval = 0; |
| 129 | 129 | ||
| 130 | spin_lock_irqsave(&ixp4xx_pci_lock, flags); | 130 | raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags); |
| 131 | 131 | ||
| 132 | *PCI_NP_AD = addr; | 132 | *PCI_NP_AD = addr; |
| 133 | 133 | ||
| @@ -140,7 +140,7 @@ int ixp4xx_pci_read_no_errata(u32 addr, u32 cmd, u32* data) | |||
| 140 | if(check_master_abort()) | 140 | if(check_master_abort()) |
| 141 | retval = 1; | 141 | retval = 1; |
| 142 | 142 | ||
| 143 | spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); | 143 | raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); |
| 144 | return retval; | 144 | return retval; |
| 145 | } | 145 | } |
| 146 | 146 | ||
| @@ -149,7 +149,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data) | |||
| 149 | unsigned long flags; | 149 | unsigned long flags; |
| 150 | int retval = 0; | 150 | int retval = 0; |
| 151 | 151 | ||
| 152 | spin_lock_irqsave(&ixp4xx_pci_lock, flags); | 152 | raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags); |
| 153 | 153 | ||
| 154 | *PCI_NP_AD = addr; | 154 | *PCI_NP_AD = addr; |
| 155 | 155 | ||
| @@ -162,7 +162,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data) | |||
| 162 | if(check_master_abort()) | 162 | if(check_master_abort()) |
| 163 | retval = 1; | 163 | retval = 1; |
| 164 | 164 | ||
| 165 | spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); | 165 | raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); |
| 166 | return retval; | 166 | return retval; |
| 167 | } | 167 | } |
| 168 | 168 | ||
diff --git a/arch/arm/mach-shark/leds.c b/arch/arm/mach-shark/leds.c index c9e32de4adf9..ccd49189bbd0 100644 --- a/arch/arm/mach-shark/leds.c +++ b/arch/arm/mach-shark/leds.c | |||
| @@ -36,7 +36,7 @@ static char led_state; | |||
| 36 | static short hw_led_state; | 36 | static short hw_led_state; |
| 37 | static short saved_state; | 37 | static short saved_state; |
| 38 | 38 | ||
| 39 | static DEFINE_SPINLOCK(leds_lock); | 39 | static DEFINE_RAW_SPINLOCK(leds_lock); |
| 40 | 40 | ||
| 41 | short sequoia_read(int addr) { | 41 | short sequoia_read(int addr) { |
| 42 | outw(addr,0x24); | 42 | outw(addr,0x24); |
| @@ -52,7 +52,7 @@ static void sequoia_leds_event(led_event_t evt) | |||
| 52 | { | 52 | { |
| 53 | unsigned long flags; | 53 | unsigned long flags; |
| 54 | 54 | ||
| 55 | spin_lock_irqsave(&leds_lock, flags); | 55 | raw_spin_lock_irqsave(&leds_lock, flags); |
| 56 | 56 | ||
| 57 | hw_led_state = sequoia_read(0x09); | 57 | hw_led_state = sequoia_read(0x09); |
| 58 | 58 | ||
| @@ -144,7 +144,7 @@ static void sequoia_leds_event(led_event_t evt) | |||
| 144 | if (led_state & LED_STATE_ENABLED) | 144 | if (led_state & LED_STATE_ENABLED) |
| 145 | sequoia_write(hw_led_state,0x09); | 145 | sequoia_write(hw_led_state,0x09); |
| 146 | 146 | ||
| 147 | spin_unlock_irqrestore(&leds_lock, flags); | 147 | raw_spin_unlock_irqrestore(&leds_lock, flags); |
| 148 | } | 148 | } |
| 149 | 149 | ||
| 150 | static int __init leds_init(void) | 150 | static int __init leds_init(void) |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 9ecfdb511951..3255c51e3e35 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | #define CACHE_LINE_SIZE 32 | 26 | #define CACHE_LINE_SIZE 32 |
| 27 | 27 | ||
| 28 | static void __iomem *l2x0_base; | 28 | static void __iomem *l2x0_base; |
| 29 | static DEFINE_SPINLOCK(l2x0_lock); | 29 | static DEFINE_RAW_SPINLOCK(l2x0_lock); |
| 30 | static uint32_t l2x0_way_mask; /* Bitmask of active ways */ | 30 | static uint32_t l2x0_way_mask; /* Bitmask of active ways */ |
| 31 | static uint32_t l2x0_size; | 31 | static uint32_t l2x0_size; |
| 32 | 32 | ||
| @@ -115,9 +115,9 @@ static void l2x0_cache_sync(void) | |||
| 115 | { | 115 | { |
| 116 | unsigned long flags; | 116 | unsigned long flags; |
| 117 | 117 | ||
| 118 | spin_lock_irqsave(&l2x0_lock, flags); | 118 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 119 | cache_sync(); | 119 | cache_sync(); |
| 120 | spin_unlock_irqrestore(&l2x0_lock, flags); | 120 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | static void __l2x0_flush_all(void) | 123 | static void __l2x0_flush_all(void) |
| @@ -134,9 +134,9 @@ static void l2x0_flush_all(void) | |||
| 134 | unsigned long flags; | 134 | unsigned long flags; |
| 135 | 135 | ||
| 136 | /* clean all ways */ | 136 | /* clean all ways */ |
| 137 | spin_lock_irqsave(&l2x0_lock, flags); | 137 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 138 | __l2x0_flush_all(); | 138 | __l2x0_flush_all(); |
| 139 | spin_unlock_irqrestore(&l2x0_lock, flags); | 139 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 140 | } | 140 | } |
| 141 | 141 | ||
| 142 | static void l2x0_clean_all(void) | 142 | static void l2x0_clean_all(void) |
| @@ -144,11 +144,11 @@ static void l2x0_clean_all(void) | |||
| 144 | unsigned long flags; | 144 | unsigned long flags; |
| 145 | 145 | ||
| 146 | /* clean all ways */ | 146 | /* clean all ways */ |
| 147 | spin_lock_irqsave(&l2x0_lock, flags); | 147 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 148 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); | 148 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); |
| 149 | cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); | 149 | cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); |
| 150 | cache_sync(); | 150 | cache_sync(); |
| 151 | spin_unlock_irqrestore(&l2x0_lock, flags); | 151 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | static void l2x0_inv_all(void) | 154 | static void l2x0_inv_all(void) |
| @@ -156,13 +156,13 @@ static void l2x0_inv_all(void) | |||
| 156 | unsigned long flags; | 156 | unsigned long flags; |
| 157 | 157 | ||
| 158 | /* invalidate all ways */ | 158 | /* invalidate all ways */ |
| 159 | spin_lock_irqsave(&l2x0_lock, flags); | 159 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 160 | /* Invalidating when L2 is enabled is a nono */ | 160 | /* Invalidating when L2 is enabled is a nono */ |
| 161 | BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); | 161 | BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); |
| 162 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); | 162 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); |
| 163 | cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); | 163 | cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); |
| 164 | cache_sync(); | 164 | cache_sync(); |
| 165 | spin_unlock_irqrestore(&l2x0_lock, flags); | 165 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 166 | } | 166 | } |
| 167 | 167 | ||
| 168 | static void l2x0_inv_range(unsigned long start, unsigned long end) | 168 | static void l2x0_inv_range(unsigned long start, unsigned long end) |
| @@ -170,7 +170,7 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) | |||
| 170 | void __iomem *base = l2x0_base; | 170 | void __iomem *base = l2x0_base; |
| 171 | unsigned long flags; | 171 | unsigned long flags; |
| 172 | 172 | ||
| 173 | spin_lock_irqsave(&l2x0_lock, flags); | 173 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 174 | if (start & (CACHE_LINE_SIZE - 1)) { | 174 | if (start & (CACHE_LINE_SIZE - 1)) { |
| 175 | start &= ~(CACHE_LINE_SIZE - 1); | 175 | start &= ~(CACHE_LINE_SIZE - 1); |
| 176 | debug_writel(0x03); | 176 | debug_writel(0x03); |
| @@ -195,13 +195,13 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) | |||
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | if (blk_end < end) { | 197 | if (blk_end < end) { |
| 198 | spin_unlock_irqrestore(&l2x0_lock, flags); | 198 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 199 | spin_lock_irqsave(&l2x0_lock, flags); | 199 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 200 | } | 200 | } |
| 201 | } | 201 | } |
| 202 | cache_wait(base + L2X0_INV_LINE_PA, 1); | 202 | cache_wait(base + L2X0_INV_LINE_PA, 1); |
| 203 | cache_sync(); | 203 | cache_sync(); |
| 204 | spin_unlock_irqrestore(&l2x0_lock, flags); | 204 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 205 | } | 205 | } |
| 206 | 206 | ||
| 207 | static void l2x0_clean_range(unsigned long start, unsigned long end) | 207 | static void l2x0_clean_range(unsigned long start, unsigned long end) |
| @@ -214,7 +214,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) | |||
| 214 | return; | 214 | return; |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | spin_lock_irqsave(&l2x0_lock, flags); | 217 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 218 | start &= ~(CACHE_LINE_SIZE - 1); | 218 | start &= ~(CACHE_LINE_SIZE - 1); |
| 219 | while (start < end) { | 219 | while (start < end) { |
| 220 | unsigned long blk_end = start + min(end - start, 4096UL); | 220 | unsigned long blk_end = start + min(end - start, 4096UL); |
| @@ -225,13 +225,13 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) | |||
| 225 | } | 225 | } |
| 226 | 226 | ||
| 227 | if (blk_end < end) { | 227 | if (blk_end < end) { |
| 228 | spin_unlock_irqrestore(&l2x0_lock, flags); | 228 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 229 | spin_lock_irqsave(&l2x0_lock, flags); | 229 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 230 | } | 230 | } |
| 231 | } | 231 | } |
| 232 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | 232 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); |
| 233 | cache_sync(); | 233 | cache_sync(); |
| 234 | spin_unlock_irqrestore(&l2x0_lock, flags); | 234 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 235 | } | 235 | } |
| 236 | 236 | ||
| 237 | static void l2x0_flush_range(unsigned long start, unsigned long end) | 237 | static void l2x0_flush_range(unsigned long start, unsigned long end) |
| @@ -244,7 +244,7 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) | |||
| 244 | return; | 244 | return; |
| 245 | } | 245 | } |
| 246 | 246 | ||
| 247 | spin_lock_irqsave(&l2x0_lock, flags); | 247 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 248 | start &= ~(CACHE_LINE_SIZE - 1); | 248 | start &= ~(CACHE_LINE_SIZE - 1); |
| 249 | while (start < end) { | 249 | while (start < end) { |
| 250 | unsigned long blk_end = start + min(end - start, 4096UL); | 250 | unsigned long blk_end = start + min(end - start, 4096UL); |
| @@ -257,24 +257,24 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) | |||
| 257 | debug_writel(0x00); | 257 | debug_writel(0x00); |
| 258 | 258 | ||
| 259 | if (blk_end < end) { | 259 | if (blk_end < end) { |
| 260 | spin_unlock_irqrestore(&l2x0_lock, flags); | 260 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 261 | spin_lock_irqsave(&l2x0_lock, flags); | 261 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 262 | } | 262 | } |
| 263 | } | 263 | } |
| 264 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); | 264 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); |
| 265 | cache_sync(); | 265 | cache_sync(); |
| 266 | spin_unlock_irqrestore(&l2x0_lock, flags); | 266 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 267 | } | 267 | } |
| 268 | 268 | ||
| 269 | static void l2x0_disable(void) | 269 | static void l2x0_disable(void) |
| 270 | { | 270 | { |
| 271 | unsigned long flags; | 271 | unsigned long flags; |
| 272 | 272 | ||
| 273 | spin_lock_irqsave(&l2x0_lock, flags); | 273 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 274 | __l2x0_flush_all(); | 274 | __l2x0_flush_all(); |
| 275 | writel_relaxed(0, l2x0_base + L2X0_CTRL); | 275 | writel_relaxed(0, l2x0_base + L2X0_CTRL); |
| 276 | dsb(); | 276 | dsb(); |
| 277 | spin_unlock_irqrestore(&l2x0_lock, flags); | 277 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 278 | } | 278 | } |
| 279 | 279 | ||
| 280 | static void __init l2x0_unlock(__u32 cache_id) | 280 | static void __init l2x0_unlock(__u32 cache_id) |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index b0ee9ba3cfab..93aac068da94 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
| 17 | #include <asm/tlbflush.h> | 17 | #include <asm/tlbflush.h> |
| 18 | 18 | ||
| 19 | static DEFINE_SPINLOCK(cpu_asid_lock); | 19 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
| 20 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; | 20 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; |
| 21 | #ifdef CONFIG_SMP | 21 | #ifdef CONFIG_SMP |
| 22 | DEFINE_PER_CPU(struct mm_struct *, current_mm); | 22 | DEFINE_PER_CPU(struct mm_struct *, current_mm); |
| @@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm); | |||
| 31 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 31 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 32 | { | 32 | { |
| 33 | mm->context.id = 0; | 33 | mm->context.id = 0; |
| 34 | spin_lock_init(&mm->context.id_lock); | 34 | raw_spin_lock_init(&mm->context.id_lock); |
| 35 | } | 35 | } |
| 36 | 36 | ||
| 37 | static void flush_context(void) | 37 | static void flush_context(void) |
| @@ -58,7 +58,7 @@ static void set_mm_context(struct mm_struct *mm, unsigned int asid) | |||
| 58 | * the broadcast. This function is also called via IPI so the | 58 | * the broadcast. This function is also called via IPI so the |
| 59 | * mm->context.id_lock has to be IRQ-safe. | 59 | * mm->context.id_lock has to be IRQ-safe. |
| 60 | */ | 60 | */ |
| 61 | spin_lock_irqsave(&mm->context.id_lock, flags); | 61 | raw_spin_lock_irqsave(&mm->context.id_lock, flags); |
| 62 | if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { | 62 | if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { |
| 63 | /* | 63 | /* |
| 64 | * Old version of ASID found. Set the new one and | 64 | * Old version of ASID found. Set the new one and |
| @@ -67,7 +67,7 @@ static void set_mm_context(struct mm_struct *mm, unsigned int asid) | |||
| 67 | mm->context.id = asid; | 67 | mm->context.id = asid; |
| 68 | cpumask_clear(mm_cpumask(mm)); | 68 | cpumask_clear(mm_cpumask(mm)); |
| 69 | } | 69 | } |
| 70 | spin_unlock_irqrestore(&mm->context.id_lock, flags); | 70 | raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); |
| 71 | 71 | ||
| 72 | /* | 72 | /* |
| 73 | * Set the mm_cpumask(mm) bit for the current CPU. | 73 | * Set the mm_cpumask(mm) bit for the current CPU. |
| @@ -117,7 +117,7 @@ void __new_context(struct mm_struct *mm) | |||
| 117 | { | 117 | { |
| 118 | unsigned int asid; | 118 | unsigned int asid; |
| 119 | 119 | ||
| 120 | spin_lock(&cpu_asid_lock); | 120 | raw_spin_lock(&cpu_asid_lock); |
| 121 | #ifdef CONFIG_SMP | 121 | #ifdef CONFIG_SMP |
| 122 | /* | 122 | /* |
| 123 | * Check the ASID again, in case the change was broadcast from | 123 | * Check the ASID again, in case the change was broadcast from |
| @@ -125,7 +125,7 @@ void __new_context(struct mm_struct *mm) | |||
| 125 | */ | 125 | */ |
| 126 | if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { | 126 | if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { |
| 127 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | 127 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
| 128 | spin_unlock(&cpu_asid_lock); | 128 | raw_spin_unlock(&cpu_asid_lock); |
| 129 | return; | 129 | return; |
| 130 | } | 130 | } |
| 131 | #endif | 131 | #endif |
| @@ -153,5 +153,5 @@ void __new_context(struct mm_struct *mm) | |||
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | set_mm_context(mm, asid); | 155 | set_mm_context(mm, asid); |
| 156 | spin_unlock(&cpu_asid_lock); | 156 | raw_spin_unlock(&cpu_asid_lock); |
| 157 | } | 157 | } |
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index b8061519ce77..7d0a8c230342 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c | |||
| @@ -30,7 +30,7 @@ | |||
| 30 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ | 30 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ |
| 31 | L_PTE_MT_MINICACHE) | 31 | L_PTE_MT_MINICACHE) |
| 32 | 32 | ||
| 33 | static DEFINE_SPINLOCK(minicache_lock); | 33 | static DEFINE_RAW_SPINLOCK(minicache_lock); |
| 34 | 34 | ||
| 35 | /* | 35 | /* |
| 36 | * ARMv4 mini-dcache optimised copy_user_highpage | 36 | * ARMv4 mini-dcache optimised copy_user_highpage |
| @@ -76,14 +76,14 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, | |||
| 76 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) | 76 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) |
| 77 | __flush_dcache_page(page_mapping(from), from); | 77 | __flush_dcache_page(page_mapping(from), from); |
| 78 | 78 | ||
| 79 | spin_lock(&minicache_lock); | 79 | raw_spin_lock(&minicache_lock); |
| 80 | 80 | ||
| 81 | set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); | 81 | set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); |
| 82 | flush_tlb_kernel_page(0xffff8000); | 82 | flush_tlb_kernel_page(0xffff8000); |
| 83 | 83 | ||
| 84 | mc_copy_user_page((void *)0xffff8000, kto); | 84 | mc_copy_user_page((void *)0xffff8000, kto); |
| 85 | 85 | ||
| 86 | spin_unlock(&minicache_lock); | 86 | raw_spin_unlock(&minicache_lock); |
| 87 | 87 | ||
| 88 | kunmap_atomic(kto, KM_USER1); | 88 | kunmap_atomic(kto, KM_USER1); |
| 89 | } | 89 | } |
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 63cca0097130..3d9a1552cef6 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
| @@ -27,7 +27,7 @@ | |||
| 27 | #define from_address (0xffff8000) | 27 | #define from_address (0xffff8000) |
| 28 | #define to_address (0xffffc000) | 28 | #define to_address (0xffffc000) |
| 29 | 29 | ||
| 30 | static DEFINE_SPINLOCK(v6_lock); | 30 | static DEFINE_RAW_SPINLOCK(v6_lock); |
| 31 | 31 | ||
| 32 | /* | 32 | /* |
| 33 | * Copy the user page. No aliasing to deal with so we can just | 33 | * Copy the user page. No aliasing to deal with so we can just |
| @@ -88,7 +88,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, | |||
| 88 | * Now copy the page using the same cache colour as the | 88 | * Now copy the page using the same cache colour as the |
| 89 | * pages ultimate destination. | 89 | * pages ultimate destination. |
| 90 | */ | 90 | */ |
| 91 | spin_lock(&v6_lock); | 91 | raw_spin_lock(&v6_lock); |
| 92 | 92 | ||
| 93 | set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); | 93 | set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); |
| 94 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); | 94 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); |
| @@ -101,7 +101,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, | |||
| 101 | 101 | ||
| 102 | copy_page((void *)kto, (void *)kfrom); | 102 | copy_page((void *)kto, (void *)kfrom); |
| 103 | 103 | ||
| 104 | spin_unlock(&v6_lock); | 104 | raw_spin_unlock(&v6_lock); |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | /* | 107 | /* |
| @@ -121,13 +121,13 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad | |||
| 121 | * Now clear the page using the same cache colour as | 121 | * Now clear the page using the same cache colour as |
| 122 | * the pages ultimate destination. | 122 | * the pages ultimate destination. |
| 123 | */ | 123 | */ |
| 124 | spin_lock(&v6_lock); | 124 | raw_spin_lock(&v6_lock); |
| 125 | 125 | ||
| 126 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); | 126 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); |
| 127 | flush_tlb_kernel_page(to); | 127 | flush_tlb_kernel_page(to); |
| 128 | clear_page((void *)to); | 128 | clear_page((void *)to); |
| 129 | 129 | ||
| 130 | spin_unlock(&v6_lock); | 130 | raw_spin_unlock(&v6_lock); |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | struct cpu_user_fns v6_user_fns __initdata = { | 133 | struct cpu_user_fns v6_user_fns __initdata = { |
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 649bbcd325bf..610c24ced310 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ | 32 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ |
| 33 | L_PTE_MT_MINICACHE) | 33 | L_PTE_MT_MINICACHE) |
| 34 | 34 | ||
| 35 | static DEFINE_SPINLOCK(minicache_lock); | 35 | static DEFINE_RAW_SPINLOCK(minicache_lock); |
| 36 | 36 | ||
| 37 | /* | 37 | /* |
| 38 | * XScale mini-dcache optimised copy_user_highpage | 38 | * XScale mini-dcache optimised copy_user_highpage |
| @@ -98,14 +98,14 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, | |||
| 98 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) | 98 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) |
| 99 | __flush_dcache_page(page_mapping(from), from); | 99 | __flush_dcache_page(page_mapping(from), from); |
| 100 | 100 | ||
| 101 | spin_lock(&minicache_lock); | 101 | raw_spin_lock(&minicache_lock); |
| 102 | 102 | ||
| 103 | set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); | 103 | set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); |
| 104 | flush_tlb_kernel_page(COPYPAGE_MINICACHE); | 104 | flush_tlb_kernel_page(COPYPAGE_MINICACHE); |
| 105 | 105 | ||
| 106 | mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); | 106 | mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); |
| 107 | 107 | ||
| 108 | spin_unlock(&minicache_lock); | 108 | raw_spin_unlock(&minicache_lock); |
| 109 | 109 | ||
| 110 | kunmap_atomic(kto, KM_USER1); | 110 | kunmap_atomic(kto, KM_USER1); |
| 111 | } | 111 | } |
