diff options
61 files changed, 670 insertions, 573 deletions
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 666b278e56d7..bdbb3f74f0fe 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
| @@ -33,7 +33,7 @@ | |||
| 33 | #include <asm/mach/irq.h> | 33 | #include <asm/mach/irq.h> |
| 34 | #include <asm/hardware/gic.h> | 34 | #include <asm/hardware/gic.h> |
| 35 | 35 | ||
| 36 | static DEFINE_SPINLOCK(irq_controller_lock); | 36 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); |
| 37 | 37 | ||
| 38 | /* Address of GIC 0 CPU interface */ | 38 | /* Address of GIC 0 CPU interface */ |
| 39 | void __iomem *gic_cpu_base_addr __read_mostly; | 39 | void __iomem *gic_cpu_base_addr __read_mostly; |
| @@ -82,30 +82,30 @@ static void gic_mask_irq(struct irq_data *d) | |||
| 82 | { | 82 | { |
| 83 | u32 mask = 1 << (d->irq % 32); | 83 | u32 mask = 1 << (d->irq % 32); |
| 84 | 84 | ||
| 85 | spin_lock(&irq_controller_lock); | 85 | raw_spin_lock(&irq_controller_lock); |
| 86 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); | 86 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); |
| 87 | if (gic_arch_extn.irq_mask) | 87 | if (gic_arch_extn.irq_mask) |
| 88 | gic_arch_extn.irq_mask(d); | 88 | gic_arch_extn.irq_mask(d); |
| 89 | spin_unlock(&irq_controller_lock); | 89 | raw_spin_unlock(&irq_controller_lock); |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | static void gic_unmask_irq(struct irq_data *d) | 92 | static void gic_unmask_irq(struct irq_data *d) |
| 93 | { | 93 | { |
| 94 | u32 mask = 1 << (d->irq % 32); | 94 | u32 mask = 1 << (d->irq % 32); |
| 95 | 95 | ||
| 96 | spin_lock(&irq_controller_lock); | 96 | raw_spin_lock(&irq_controller_lock); |
| 97 | if (gic_arch_extn.irq_unmask) | 97 | if (gic_arch_extn.irq_unmask) |
| 98 | gic_arch_extn.irq_unmask(d); | 98 | gic_arch_extn.irq_unmask(d); |
| 99 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); | 99 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); |
| 100 | spin_unlock(&irq_controller_lock); | 100 | raw_spin_unlock(&irq_controller_lock); |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | static void gic_eoi_irq(struct irq_data *d) | 103 | static void gic_eoi_irq(struct irq_data *d) |
| 104 | { | 104 | { |
| 105 | if (gic_arch_extn.irq_eoi) { | 105 | if (gic_arch_extn.irq_eoi) { |
| 106 | spin_lock(&irq_controller_lock); | 106 | raw_spin_lock(&irq_controller_lock); |
| 107 | gic_arch_extn.irq_eoi(d); | 107 | gic_arch_extn.irq_eoi(d); |
| 108 | spin_unlock(&irq_controller_lock); | 108 | raw_spin_unlock(&irq_controller_lock); |
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); | 111 | writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); |
| @@ -129,7 +129,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |||
| 129 | if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) | 129 | if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) |
| 130 | return -EINVAL; | 130 | return -EINVAL; |
| 131 | 131 | ||
| 132 | spin_lock(&irq_controller_lock); | 132 | raw_spin_lock(&irq_controller_lock); |
| 133 | 133 | ||
| 134 | if (gic_arch_extn.irq_set_type) | 134 | if (gic_arch_extn.irq_set_type) |
| 135 | gic_arch_extn.irq_set_type(d, type); | 135 | gic_arch_extn.irq_set_type(d, type); |
| @@ -154,7 +154,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |||
| 154 | if (enabled) | 154 | if (enabled) |
| 155 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); | 155 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); |
| 156 | 156 | ||
| 157 | spin_unlock(&irq_controller_lock); | 157 | raw_spin_unlock(&irq_controller_lock); |
| 158 | 158 | ||
| 159 | return 0; | 159 | return 0; |
| 160 | } | 160 | } |
| @@ -182,10 +182,10 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
| 182 | mask = 0xff << shift; | 182 | mask = 0xff << shift; |
| 183 | bit = 1 << (cpu_logical_map(cpu) + shift); | 183 | bit = 1 << (cpu_logical_map(cpu) + shift); |
| 184 | 184 | ||
| 185 | spin_lock(&irq_controller_lock); | 185 | raw_spin_lock(&irq_controller_lock); |
| 186 | val = readl_relaxed(reg) & ~mask; | 186 | val = readl_relaxed(reg) & ~mask; |
| 187 | writel_relaxed(val | bit, reg); | 187 | writel_relaxed(val | bit, reg); |
| 188 | spin_unlock(&irq_controller_lock); | 188 | raw_spin_unlock(&irq_controller_lock); |
| 189 | 189 | ||
| 190 | return IRQ_SET_MASK_OK; | 190 | return IRQ_SET_MASK_OK; |
| 191 | } | 191 | } |
| @@ -215,9 +215,9 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) | |||
| 215 | 215 | ||
| 216 | chained_irq_enter(chip, desc); | 216 | chained_irq_enter(chip, desc); |
| 217 | 217 | ||
| 218 | spin_lock(&irq_controller_lock); | 218 | raw_spin_lock(&irq_controller_lock); |
| 219 | status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK); | 219 | status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK); |
| 220 | spin_unlock(&irq_controller_lock); | 220 | raw_spin_unlock(&irq_controller_lock); |
| 221 | 221 | ||
| 222 | gic_irq = (status & 0x3ff); | 222 | gic_irq = (status & 0x3ff); |
| 223 | if (gic_irq == 1023) | 223 | if (gic_irq == 1023) |
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 628670e9d7c9..69a5b0b6455c 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h | |||
| @@ -34,18 +34,18 @@ | |||
| 34 | #define DMA_MODE_CASCADE 0xc0 | 34 | #define DMA_MODE_CASCADE 0xc0 |
| 35 | #define DMA_AUTOINIT 0x10 | 35 | #define DMA_AUTOINIT 0x10 |
| 36 | 36 | ||
| 37 | extern spinlock_t dma_spin_lock; | 37 | extern raw_spinlock_t dma_spin_lock; |
| 38 | 38 | ||
| 39 | static inline unsigned long claim_dma_lock(void) | 39 | static inline unsigned long claim_dma_lock(void) |
| 40 | { | 40 | { |
| 41 | unsigned long flags; | 41 | unsigned long flags; |
| 42 | spin_lock_irqsave(&dma_spin_lock, flags); | 42 | raw_spin_lock_irqsave(&dma_spin_lock, flags); |
| 43 | return flags; | 43 | return flags; |
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | static inline void release_dma_lock(unsigned long flags) | 46 | static inline void release_dma_lock(unsigned long flags) |
| 47 | { | 47 | { |
| 48 | spin_unlock_irqrestore(&dma_spin_lock, flags); | 48 | raw_spin_unlock_irqrestore(&dma_spin_lock, flags); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | /* Clear the 'DMA Pointer Flip Flop'. | 51 | /* Clear the 'DMA Pointer Flip Flop'. |
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index b4ffe9d5b526..14965658a923 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | typedef struct { | 6 | typedef struct { |
| 7 | #ifdef CONFIG_CPU_HAS_ASID | 7 | #ifdef CONFIG_CPU_HAS_ASID |
| 8 | unsigned int id; | 8 | unsigned int id; |
| 9 | spinlock_t id_lock; | 9 | raw_spinlock_t id_lock; |
| 10 | #endif | 10 | #endif |
| 11 | unsigned int kvm_seq; | 11 | unsigned int kvm_seq; |
| 12 | } mm_context_t; | 12 | } mm_context_t; |
| @@ -16,7 +16,7 @@ typedef struct { | |||
| 16 | 16 | ||
| 17 | /* init_mm.context.id_lock should be initialized. */ | 17 | /* init_mm.context.id_lock should be initialized. */ |
| 18 | #define INIT_MM_CONTEXT(name) \ | 18 | #define INIT_MM_CONTEXT(name) \ |
| 19 | .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock), | 19 | .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock), |
| 20 | #else | 20 | #else |
| 21 | #define ASID(mm) (0) | 21 | #define ASID(mm) (0) |
| 22 | #endif | 22 | #endif |
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c index 2c4a185f92cd..7b829d9663b1 100644 --- a/arch/arm/kernel/dma.c +++ b/arch/arm/kernel/dma.c | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | 23 | ||
| 24 | #include <asm/mach/dma.h> | 24 | #include <asm/mach/dma.h> |
| 25 | 25 | ||
| 26 | DEFINE_SPINLOCK(dma_spin_lock); | 26 | DEFINE_RAW_SPINLOCK(dma_spin_lock); |
| 27 | EXPORT_SYMBOL(dma_spin_lock); | 27 | EXPORT_SYMBOL(dma_spin_lock); |
| 28 | 28 | ||
| 29 | static dma_t *dma_chan[MAX_DMA_CHANNELS]; | 29 | static dma_t *dma_chan[MAX_DMA_CHANNELS]; |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 854ce33715f4..94f34a6c8610 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
| @@ -566,7 +566,7 @@ static void percpu_timer_stop(void) | |||
| 566 | } | 566 | } |
| 567 | #endif | 567 | #endif |
| 568 | 568 | ||
| 569 | static DEFINE_SPINLOCK(stop_lock); | 569 | static DEFINE_RAW_SPINLOCK(stop_lock); |
| 570 | 570 | ||
| 571 | /* | 571 | /* |
| 572 | * ipi_cpu_stop - handle IPI from smp_send_stop() | 572 | * ipi_cpu_stop - handle IPI from smp_send_stop() |
| @@ -575,10 +575,10 @@ static void ipi_cpu_stop(unsigned int cpu) | |||
| 575 | { | 575 | { |
| 576 | if (system_state == SYSTEM_BOOTING || | 576 | if (system_state == SYSTEM_BOOTING || |
| 577 | system_state == SYSTEM_RUNNING) { | 577 | system_state == SYSTEM_RUNNING) { |
| 578 | spin_lock(&stop_lock); | 578 | raw_spin_lock(&stop_lock); |
| 579 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); | 579 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); |
| 580 | dump_stack(); | 580 | dump_stack(); |
| 581 | spin_unlock(&stop_lock); | 581 | raw_spin_unlock(&stop_lock); |
| 582 | } | 582 | } |
| 583 | 583 | ||
| 584 | set_cpu_online(cpu, false); | 584 | set_cpu_online(cpu, false); |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 7f5b99eb2c50..99a572702509 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
| @@ -257,7 +257,7 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt | |||
| 257 | return ret; | 257 | return ret; |
| 258 | } | 258 | } |
| 259 | 259 | ||
| 260 | static DEFINE_SPINLOCK(die_lock); | 260 | static DEFINE_RAW_SPINLOCK(die_lock); |
| 261 | 261 | ||
| 262 | /* | 262 | /* |
| 263 | * This function is protected against re-entrancy. | 263 | * This function is protected against re-entrancy. |
| @@ -269,7 +269,7 @@ void die(const char *str, struct pt_regs *regs, int err) | |||
| 269 | 269 | ||
| 270 | oops_enter(); | 270 | oops_enter(); |
| 271 | 271 | ||
| 272 | spin_lock_irq(&die_lock); | 272 | raw_spin_lock_irq(&die_lock); |
| 273 | console_verbose(); | 273 | console_verbose(); |
| 274 | bust_spinlocks(1); | 274 | bust_spinlocks(1); |
| 275 | if (!user_mode(regs)) | 275 | if (!user_mode(regs)) |
| @@ -281,7 +281,7 @@ void die(const char *str, struct pt_regs *regs, int err) | |||
| 281 | 281 | ||
| 282 | bust_spinlocks(0); | 282 | bust_spinlocks(0); |
| 283 | add_taint(TAINT_DIE); | 283 | add_taint(TAINT_DIE); |
| 284 | spin_unlock_irq(&die_lock); | 284 | raw_spin_unlock_irq(&die_lock); |
| 285 | oops_exit(); | 285 | oops_exit(); |
| 286 | 286 | ||
| 287 | if (in_interrupt()) | 287 | if (in_interrupt()) |
| @@ -324,24 +324,24 @@ int is_valid_bugaddr(unsigned long pc) | |||
| 324 | #endif | 324 | #endif |
| 325 | 325 | ||
| 326 | static LIST_HEAD(undef_hook); | 326 | static LIST_HEAD(undef_hook); |
| 327 | static DEFINE_SPINLOCK(undef_lock); | 327 | static DEFINE_RAW_SPINLOCK(undef_lock); |
| 328 | 328 | ||
| 329 | void register_undef_hook(struct undef_hook *hook) | 329 | void register_undef_hook(struct undef_hook *hook) |
| 330 | { | 330 | { |
| 331 | unsigned long flags; | 331 | unsigned long flags; |
| 332 | 332 | ||
| 333 | spin_lock_irqsave(&undef_lock, flags); | 333 | raw_spin_lock_irqsave(&undef_lock, flags); |
| 334 | list_add(&hook->node, &undef_hook); | 334 | list_add(&hook->node, &undef_hook); |
| 335 | spin_unlock_irqrestore(&undef_lock, flags); | 335 | raw_spin_unlock_irqrestore(&undef_lock, flags); |
| 336 | } | 336 | } |
| 337 | 337 | ||
| 338 | void unregister_undef_hook(struct undef_hook *hook) | 338 | void unregister_undef_hook(struct undef_hook *hook) |
| 339 | { | 339 | { |
| 340 | unsigned long flags; | 340 | unsigned long flags; |
| 341 | 341 | ||
| 342 | spin_lock_irqsave(&undef_lock, flags); | 342 | raw_spin_lock_irqsave(&undef_lock, flags); |
| 343 | list_del(&hook->node); | 343 | list_del(&hook->node); |
| 344 | spin_unlock_irqrestore(&undef_lock, flags); | 344 | raw_spin_unlock_irqrestore(&undef_lock, flags); |
| 345 | } | 345 | } |
| 346 | 346 | ||
| 347 | static int call_undef_hook(struct pt_regs *regs, unsigned int instr) | 347 | static int call_undef_hook(struct pt_regs *regs, unsigned int instr) |
| @@ -350,12 +350,12 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr) | |||
| 350 | unsigned long flags; | 350 | unsigned long flags; |
| 351 | int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL; | 351 | int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL; |
| 352 | 352 | ||
| 353 | spin_lock_irqsave(&undef_lock, flags); | 353 | raw_spin_lock_irqsave(&undef_lock, flags); |
| 354 | list_for_each_entry(hook, &undef_hook, node) | 354 | list_for_each_entry(hook, &undef_hook, node) |
| 355 | if ((instr & hook->instr_mask) == hook->instr_val && | 355 | if ((instr & hook->instr_mask) == hook->instr_val && |
| 356 | (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) | 356 | (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) |
| 357 | fn = hook->fn; | 357 | fn = hook->fn; |
| 358 | spin_unlock_irqrestore(&undef_lock, flags); | 358 | raw_spin_unlock_irqrestore(&undef_lock, flags); |
| 359 | 359 | ||
| 360 | return fn ? fn(regs, instr) : 1; | 360 | return fn ? fn(regs, instr) : 1; |
| 361 | } | 361 | } |
diff --git a/arch/arm/mach-footbridge/include/mach/hardware.h b/arch/arm/mach-footbridge/include/mach/hardware.h index 15d54981674c..e3d6ccac2162 100644 --- a/arch/arm/mach-footbridge/include/mach/hardware.h +++ b/arch/arm/mach-footbridge/include/mach/hardware.h | |||
| @@ -93,7 +93,7 @@ | |||
| 93 | #define CPLD_FLASH_WR_ENABLE 1 | 93 | #define CPLD_FLASH_WR_ENABLE 1 |
| 94 | 94 | ||
| 95 | #ifndef __ASSEMBLY__ | 95 | #ifndef __ASSEMBLY__ |
| 96 | extern spinlock_t nw_gpio_lock; | 96 | extern raw_spinlock_t nw_gpio_lock; |
| 97 | extern void nw_gpio_modify_op(unsigned int mask, unsigned int set); | 97 | extern void nw_gpio_modify_op(unsigned int mask, unsigned int set); |
| 98 | extern void nw_gpio_modify_io(unsigned int mask, unsigned int in); | 98 | extern void nw_gpio_modify_io(unsigned int mask, unsigned int in); |
| 99 | extern unsigned int nw_gpio_read(void); | 99 | extern unsigned int nw_gpio_read(void); |
diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c index 4cbc2e65ce3a..0f7aeff486c9 100644 --- a/arch/arm/mach-footbridge/netwinder-hw.c +++ b/arch/arm/mach-footbridge/netwinder-hw.c | |||
| @@ -68,7 +68,7 @@ static inline void wb977_ww(int reg, int val) | |||
| 68 | /* | 68 | /* |
| 69 | * This is a lock for accessing ports GP1_IO_BASE and GP2_IO_BASE | 69 | * This is a lock for accessing ports GP1_IO_BASE and GP2_IO_BASE |
| 70 | */ | 70 | */ |
| 71 | DEFINE_SPINLOCK(nw_gpio_lock); | 71 | DEFINE_RAW_SPINLOCK(nw_gpio_lock); |
| 72 | EXPORT_SYMBOL(nw_gpio_lock); | 72 | EXPORT_SYMBOL(nw_gpio_lock); |
| 73 | 73 | ||
| 74 | static unsigned int current_gpio_op; | 74 | static unsigned int current_gpio_op; |
| @@ -327,9 +327,9 @@ static inline void wb977_init_gpio(void) | |||
| 327 | /* | 327 | /* |
| 328 | * Set Group1/Group2 outputs | 328 | * Set Group1/Group2 outputs |
| 329 | */ | 329 | */ |
| 330 | spin_lock_irqsave(&nw_gpio_lock, flags); | 330 | raw_spin_lock_irqsave(&nw_gpio_lock, flags); |
| 331 | nw_gpio_modify_op(-1, GPIO_RED_LED | GPIO_FAN); | 331 | nw_gpio_modify_op(-1, GPIO_RED_LED | GPIO_FAN); |
| 332 | spin_unlock_irqrestore(&nw_gpio_lock, flags); | 332 | raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); |
| 333 | } | 333 | } |
| 334 | 334 | ||
| 335 | /* | 335 | /* |
| @@ -390,9 +390,9 @@ static void __init cpld_init(void) | |||
| 390 | { | 390 | { |
| 391 | unsigned long flags; | 391 | unsigned long flags; |
| 392 | 392 | ||
| 393 | spin_lock_irqsave(&nw_gpio_lock, flags); | 393 | raw_spin_lock_irqsave(&nw_gpio_lock, flags); |
| 394 | nw_cpld_modify(-1, CPLD_UNMUTE | CPLD_7111_DISABLE); | 394 | nw_cpld_modify(-1, CPLD_UNMUTE | CPLD_7111_DISABLE); |
| 395 | spin_unlock_irqrestore(&nw_gpio_lock, flags); | 395 | raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); |
| 396 | } | 396 | } |
| 397 | 397 | ||
| 398 | static unsigned char rwa_unlock[] __initdata = | 398 | static unsigned char rwa_unlock[] __initdata = |
| @@ -616,9 +616,9 @@ static int __init nw_hw_init(void) | |||
| 616 | cpld_init(); | 616 | cpld_init(); |
| 617 | rwa010_init(); | 617 | rwa010_init(); |
| 618 | 618 | ||
| 619 | spin_lock_irqsave(&nw_gpio_lock, flags); | 619 | raw_spin_lock_irqsave(&nw_gpio_lock, flags); |
| 620 | nw_gpio_modify_op(GPIO_RED_LED|GPIO_GREEN_LED, DEFAULT_LEDS); | 620 | nw_gpio_modify_op(GPIO_RED_LED|GPIO_GREEN_LED, DEFAULT_LEDS); |
| 621 | spin_unlock_irqrestore(&nw_gpio_lock, flags); | 621 | raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); |
| 622 | } | 622 | } |
| 623 | return 0; | 623 | return 0; |
| 624 | } | 624 | } |
diff --git a/arch/arm/mach-footbridge/netwinder-leds.c b/arch/arm/mach-footbridge/netwinder-leds.c index 00269fe0be8a..e57102e871fc 100644 --- a/arch/arm/mach-footbridge/netwinder-leds.c +++ b/arch/arm/mach-footbridge/netwinder-leds.c | |||
| @@ -31,13 +31,13 @@ | |||
| 31 | static char led_state; | 31 | static char led_state; |
| 32 | static char hw_led_state; | 32 | static char hw_led_state; |
| 33 | 33 | ||
| 34 | static DEFINE_SPINLOCK(leds_lock); | 34 | static DEFINE_RAW_SPINLOCK(leds_lock); |
| 35 | 35 | ||
| 36 | static void netwinder_leds_event(led_event_t evt) | 36 | static void netwinder_leds_event(led_event_t evt) |
| 37 | { | 37 | { |
| 38 | unsigned long flags; | 38 | unsigned long flags; |
| 39 | 39 | ||
| 40 | spin_lock_irqsave(&leds_lock, flags); | 40 | raw_spin_lock_irqsave(&leds_lock, flags); |
| 41 | 41 | ||
| 42 | switch (evt) { | 42 | switch (evt) { |
| 43 | case led_start: | 43 | case led_start: |
| @@ -117,12 +117,12 @@ static void netwinder_leds_event(led_event_t evt) | |||
| 117 | break; | 117 | break; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | spin_unlock_irqrestore(&leds_lock, flags); | 120 | raw_spin_unlock_irqrestore(&leds_lock, flags); |
| 121 | 121 | ||
| 122 | if (led_state & LED_STATE_ENABLED) { | 122 | if (led_state & LED_STATE_ENABLED) { |
| 123 | spin_lock_irqsave(&nw_gpio_lock, flags); | 123 | raw_spin_lock_irqsave(&nw_gpio_lock, flags); |
| 124 | nw_gpio_modify_op(GPIO_RED_LED | GPIO_GREEN_LED, hw_led_state); | 124 | nw_gpio_modify_op(GPIO_RED_LED | GPIO_GREEN_LED, hw_led_state); |
| 125 | spin_unlock_irqrestore(&nw_gpio_lock, flags); | 125 | raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); |
| 126 | } | 126 | } |
| 127 | } | 127 | } |
| 128 | 128 | ||
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c index 82ebc8d772d3..4b38e13667ac 100644 --- a/arch/arm/mach-integrator/core.c +++ b/arch/arm/mach-integrator/core.c | |||
| @@ -209,7 +209,7 @@ static struct amba_pl010_data integrator_uart_data = { | |||
| 209 | 209 | ||
| 210 | #define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_CTRL) | 210 | #define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_CTRL) |
| 211 | 211 | ||
| 212 | static DEFINE_SPINLOCK(cm_lock); | 212 | static DEFINE_RAW_SPINLOCK(cm_lock); |
| 213 | 213 | ||
| 214 | /** | 214 | /** |
| 215 | * cm_control - update the CM_CTRL register. | 215 | * cm_control - update the CM_CTRL register. |
| @@ -221,10 +221,10 @@ void cm_control(u32 mask, u32 set) | |||
| 221 | unsigned long flags; | 221 | unsigned long flags; |
| 222 | u32 val; | 222 | u32 val; |
| 223 | 223 | ||
| 224 | spin_lock_irqsave(&cm_lock, flags); | 224 | raw_spin_lock_irqsave(&cm_lock, flags); |
| 225 | val = readl(CM_CTRL) & ~mask; | 225 | val = readl(CM_CTRL) & ~mask; |
| 226 | writel(val | set, CM_CTRL); | 226 | writel(val | set, CM_CTRL); |
| 227 | spin_unlock_irqrestore(&cm_lock, flags); | 227 | raw_spin_unlock_irqrestore(&cm_lock, flags); |
| 228 | } | 228 | } |
| 229 | 229 | ||
| 230 | EXPORT_SYMBOL(cm_control); | 230 | EXPORT_SYMBOL(cm_control); |
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c index 11b86e5b71c2..b4d8f8b8a085 100644 --- a/arch/arm/mach-integrator/pci_v3.c +++ b/arch/arm/mach-integrator/pci_v3.c | |||
| @@ -163,7 +163,7 @@ | |||
| 163 | * 7:2 register number | 163 | * 7:2 register number |
| 164 | * | 164 | * |
| 165 | */ | 165 | */ |
| 166 | static DEFINE_SPINLOCK(v3_lock); | 166 | static DEFINE_RAW_SPINLOCK(v3_lock); |
| 167 | 167 | ||
| 168 | #define PCI_BUS_NONMEM_START 0x00000000 | 168 | #define PCI_BUS_NONMEM_START 0x00000000 |
| 169 | #define PCI_BUS_NONMEM_SIZE SZ_256M | 169 | #define PCI_BUS_NONMEM_SIZE SZ_256M |
| @@ -284,7 +284,7 @@ static int v3_read_config(struct pci_bus *bus, unsigned int devfn, int where, | |||
| 284 | unsigned long flags; | 284 | unsigned long flags; |
| 285 | u32 v; | 285 | u32 v; |
| 286 | 286 | ||
| 287 | spin_lock_irqsave(&v3_lock, flags); | 287 | raw_spin_lock_irqsave(&v3_lock, flags); |
| 288 | addr = v3_open_config_window(bus, devfn, where); | 288 | addr = v3_open_config_window(bus, devfn, where); |
| 289 | 289 | ||
| 290 | switch (size) { | 290 | switch (size) { |
| @@ -302,7 +302,7 @@ static int v3_read_config(struct pci_bus *bus, unsigned int devfn, int where, | |||
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | v3_close_config_window(); | 304 | v3_close_config_window(); |
| 305 | spin_unlock_irqrestore(&v3_lock, flags); | 305 | raw_spin_unlock_irqrestore(&v3_lock, flags); |
| 306 | 306 | ||
| 307 | *val = v; | 307 | *val = v; |
| 308 | return PCIBIOS_SUCCESSFUL; | 308 | return PCIBIOS_SUCCESSFUL; |
| @@ -314,7 +314,7 @@ static int v3_write_config(struct pci_bus *bus, unsigned int devfn, int where, | |||
| 314 | unsigned long addr; | 314 | unsigned long addr; |
| 315 | unsigned long flags; | 315 | unsigned long flags; |
| 316 | 316 | ||
| 317 | spin_lock_irqsave(&v3_lock, flags); | 317 | raw_spin_lock_irqsave(&v3_lock, flags); |
| 318 | addr = v3_open_config_window(bus, devfn, where); | 318 | addr = v3_open_config_window(bus, devfn, where); |
| 319 | 319 | ||
| 320 | switch (size) { | 320 | switch (size) { |
| @@ -335,7 +335,7 @@ static int v3_write_config(struct pci_bus *bus, unsigned int devfn, int where, | |||
| 335 | } | 335 | } |
| 336 | 336 | ||
| 337 | v3_close_config_window(); | 337 | v3_close_config_window(); |
| 338 | spin_unlock_irqrestore(&v3_lock, flags); | 338 | raw_spin_unlock_irqrestore(&v3_lock, flags); |
| 339 | 339 | ||
| 340 | return PCIBIOS_SUCCESSFUL; | 340 | return PCIBIOS_SUCCESSFUL; |
| 341 | } | 341 | } |
| @@ -513,7 +513,7 @@ void __init pci_v3_preinit(void) | |||
| 513 | hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); | 513 | hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); |
| 514 | hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); | 514 | hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); |
| 515 | 515 | ||
| 516 | spin_lock_irqsave(&v3_lock, flags); | 516 | raw_spin_lock_irqsave(&v3_lock, flags); |
| 517 | 517 | ||
| 518 | /* | 518 | /* |
| 519 | * Unlock V3 registers, but only if they were previously locked. | 519 | * Unlock V3 registers, but only if they were previously locked. |
| @@ -586,7 +586,7 @@ void __init pci_v3_preinit(void) | |||
| 586 | printk(KERN_ERR "PCI: unable to grab PCI error " | 586 | printk(KERN_ERR "PCI: unable to grab PCI error " |
| 587 | "interrupt: %d\n", ret); | 587 | "interrupt: %d\n", ret); |
| 588 | 588 | ||
| 589 | spin_unlock_irqrestore(&v3_lock, flags); | 589 | raw_spin_unlock_irqrestore(&v3_lock, flags); |
| 590 | } | 590 | } |
| 591 | 591 | ||
| 592 | void __init pci_v3_postinit(void) | 592 | void __init pci_v3_postinit(void) |
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index 85245e48099c..f72a3a893c47 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c | |||
| @@ -54,7 +54,7 @@ unsigned long ixp4xx_pci_reg_base = 0; | |||
| 54 | * these transactions are atomic or we will end up | 54 | * these transactions are atomic or we will end up |
| 55 | * with corrupt data on the bus or in a driver. | 55 | * with corrupt data on the bus or in a driver. |
| 56 | */ | 56 | */ |
| 57 | static DEFINE_SPINLOCK(ixp4xx_pci_lock); | 57 | static DEFINE_RAW_SPINLOCK(ixp4xx_pci_lock); |
| 58 | 58 | ||
| 59 | /* | 59 | /* |
| 60 | * Read from PCI config space | 60 | * Read from PCI config space |
| @@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(ixp4xx_pci_lock); | |||
| 62 | static void crp_read(u32 ad_cbe, u32 *data) | 62 | static void crp_read(u32 ad_cbe, u32 *data) |
| 63 | { | 63 | { |
| 64 | unsigned long flags; | 64 | unsigned long flags; |
| 65 | spin_lock_irqsave(&ixp4xx_pci_lock, flags); | 65 | raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags); |
| 66 | *PCI_CRP_AD_CBE = ad_cbe; | 66 | *PCI_CRP_AD_CBE = ad_cbe; |
| 67 | *data = *PCI_CRP_RDATA; | 67 | *data = *PCI_CRP_RDATA; |
| 68 | spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); | 68 | raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | /* | 71 | /* |
| @@ -74,10 +74,10 @@ static void crp_read(u32 ad_cbe, u32 *data) | |||
| 74 | static void crp_write(u32 ad_cbe, u32 data) | 74 | static void crp_write(u32 ad_cbe, u32 data) |
| 75 | { | 75 | { |
| 76 | unsigned long flags; | 76 | unsigned long flags; |
| 77 | spin_lock_irqsave(&ixp4xx_pci_lock, flags); | 77 | raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags); |
| 78 | *PCI_CRP_AD_CBE = CRP_AD_CBE_WRITE | ad_cbe; | 78 | *PCI_CRP_AD_CBE = CRP_AD_CBE_WRITE | ad_cbe; |
| 79 | *PCI_CRP_WDATA = data; | 79 | *PCI_CRP_WDATA = data; |
| 80 | spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); | 80 | raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | static inline int check_master_abort(void) | 83 | static inline int check_master_abort(void) |
| @@ -101,7 +101,7 @@ int ixp4xx_pci_read_errata(u32 addr, u32 cmd, u32* data) | |||
| 101 | int retval = 0; | 101 | int retval = 0; |
| 102 | int i; | 102 | int i; |
| 103 | 103 | ||
| 104 | spin_lock_irqsave(&ixp4xx_pci_lock, flags); | 104 | raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags); |
| 105 | 105 | ||
| 106 | *PCI_NP_AD = addr; | 106 | *PCI_NP_AD = addr; |
| 107 | 107 | ||
| @@ -118,7 +118,7 @@ int ixp4xx_pci_read_errata(u32 addr, u32 cmd, u32* data) | |||
| 118 | if(check_master_abort()) | 118 | if(check_master_abort()) |
| 119 | retval = 1; | 119 | retval = 1; |
| 120 | 120 | ||
| 121 | spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); | 121 | raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); |
| 122 | return retval; | 122 | return retval; |
| 123 | } | 123 | } |
| 124 | 124 | ||
| @@ -127,7 +127,7 @@ int ixp4xx_pci_read_no_errata(u32 addr, u32 cmd, u32* data) | |||
| 127 | unsigned long flags; | 127 | unsigned long flags; |
| 128 | int retval = 0; | 128 | int retval = 0; |
| 129 | 129 | ||
| 130 | spin_lock_irqsave(&ixp4xx_pci_lock, flags); | 130 | raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags); |
| 131 | 131 | ||
| 132 | *PCI_NP_AD = addr; | 132 | *PCI_NP_AD = addr; |
| 133 | 133 | ||
| @@ -140,7 +140,7 @@ int ixp4xx_pci_read_no_errata(u32 addr, u32 cmd, u32* data) | |||
| 140 | if(check_master_abort()) | 140 | if(check_master_abort()) |
| 141 | retval = 1; | 141 | retval = 1; |
| 142 | 142 | ||
| 143 | spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); | 143 | raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); |
| 144 | return retval; | 144 | return retval; |
| 145 | } | 145 | } |
| 146 | 146 | ||
| @@ -149,7 +149,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data) | |||
| 149 | unsigned long flags; | 149 | unsigned long flags; |
| 150 | int retval = 0; | 150 | int retval = 0; |
| 151 | 151 | ||
| 152 | spin_lock_irqsave(&ixp4xx_pci_lock, flags); | 152 | raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags); |
| 153 | 153 | ||
| 154 | *PCI_NP_AD = addr; | 154 | *PCI_NP_AD = addr; |
| 155 | 155 | ||
| @@ -162,7 +162,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data) | |||
| 162 | if(check_master_abort()) | 162 | if(check_master_abort()) |
| 163 | retval = 1; | 163 | retval = 1; |
| 164 | 164 | ||
| 165 | spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); | 165 | raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); |
| 166 | return retval; | 166 | return retval; |
| 167 | } | 167 | } |
| 168 | 168 | ||
diff --git a/arch/arm/mach-shark/leds.c b/arch/arm/mach-shark/leds.c index c9e32de4adf9..ccd49189bbd0 100644 --- a/arch/arm/mach-shark/leds.c +++ b/arch/arm/mach-shark/leds.c | |||
| @@ -36,7 +36,7 @@ static char led_state; | |||
| 36 | static short hw_led_state; | 36 | static short hw_led_state; |
| 37 | static short saved_state; | 37 | static short saved_state; |
| 38 | 38 | ||
| 39 | static DEFINE_SPINLOCK(leds_lock); | 39 | static DEFINE_RAW_SPINLOCK(leds_lock); |
| 40 | 40 | ||
| 41 | short sequoia_read(int addr) { | 41 | short sequoia_read(int addr) { |
| 42 | outw(addr,0x24); | 42 | outw(addr,0x24); |
| @@ -52,7 +52,7 @@ static void sequoia_leds_event(led_event_t evt) | |||
| 52 | { | 52 | { |
| 53 | unsigned long flags; | 53 | unsigned long flags; |
| 54 | 54 | ||
| 55 | spin_lock_irqsave(&leds_lock, flags); | 55 | raw_spin_lock_irqsave(&leds_lock, flags); |
| 56 | 56 | ||
| 57 | hw_led_state = sequoia_read(0x09); | 57 | hw_led_state = sequoia_read(0x09); |
| 58 | 58 | ||
| @@ -144,7 +144,7 @@ static void sequoia_leds_event(led_event_t evt) | |||
| 144 | if (led_state & LED_STATE_ENABLED) | 144 | if (led_state & LED_STATE_ENABLED) |
| 145 | sequoia_write(hw_led_state,0x09); | 145 | sequoia_write(hw_led_state,0x09); |
| 146 | 146 | ||
| 147 | spin_unlock_irqrestore(&leds_lock, flags); | 147 | raw_spin_unlock_irqrestore(&leds_lock, flags); |
| 148 | } | 148 | } |
| 149 | 149 | ||
| 150 | static int __init leds_init(void) | 150 | static int __init leds_init(void) |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 3f9b9980478e..8ac9e9f84790 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | #define CACHE_LINE_SIZE 32 | 29 | #define CACHE_LINE_SIZE 32 |
| 30 | 30 | ||
| 31 | static void __iomem *l2x0_base; | 31 | static void __iomem *l2x0_base; |
| 32 | static DEFINE_SPINLOCK(l2x0_lock); | 32 | static DEFINE_RAW_SPINLOCK(l2x0_lock); |
| 33 | static uint32_t l2x0_way_mask; /* Bitmask of active ways */ | 33 | static uint32_t l2x0_way_mask; /* Bitmask of active ways */ |
| 34 | static uint32_t l2x0_size; | 34 | static uint32_t l2x0_size; |
| 35 | 35 | ||
| @@ -126,9 +126,9 @@ static void l2x0_cache_sync(void) | |||
| 126 | { | 126 | { |
| 127 | unsigned long flags; | 127 | unsigned long flags; |
| 128 | 128 | ||
| 129 | spin_lock_irqsave(&l2x0_lock, flags); | 129 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 130 | cache_sync(); | 130 | cache_sync(); |
| 131 | spin_unlock_irqrestore(&l2x0_lock, flags); | 131 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 132 | } | 132 | } |
| 133 | 133 | ||
| 134 | static void __l2x0_flush_all(void) | 134 | static void __l2x0_flush_all(void) |
| @@ -145,9 +145,9 @@ static void l2x0_flush_all(void) | |||
| 145 | unsigned long flags; | 145 | unsigned long flags; |
| 146 | 146 | ||
| 147 | /* clean all ways */ | 147 | /* clean all ways */ |
| 148 | spin_lock_irqsave(&l2x0_lock, flags); | 148 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 149 | __l2x0_flush_all(); | 149 | __l2x0_flush_all(); |
| 150 | spin_unlock_irqrestore(&l2x0_lock, flags); | 150 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 151 | } | 151 | } |
| 152 | 152 | ||
| 153 | static void l2x0_clean_all(void) | 153 | static void l2x0_clean_all(void) |
| @@ -155,11 +155,11 @@ static void l2x0_clean_all(void) | |||
| 155 | unsigned long flags; | 155 | unsigned long flags; |
| 156 | 156 | ||
| 157 | /* clean all ways */ | 157 | /* clean all ways */ |
| 158 | spin_lock_irqsave(&l2x0_lock, flags); | 158 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 159 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); | 159 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); |
| 160 | cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); | 160 | cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); |
| 161 | cache_sync(); | 161 | cache_sync(); |
| 162 | spin_unlock_irqrestore(&l2x0_lock, flags); | 162 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 163 | } | 163 | } |
| 164 | 164 | ||
| 165 | static void l2x0_inv_all(void) | 165 | static void l2x0_inv_all(void) |
| @@ -167,13 +167,13 @@ static void l2x0_inv_all(void) | |||
| 167 | unsigned long flags; | 167 | unsigned long flags; |
| 168 | 168 | ||
| 169 | /* invalidate all ways */ | 169 | /* invalidate all ways */ |
| 170 | spin_lock_irqsave(&l2x0_lock, flags); | 170 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 171 | /* Invalidating when L2 is enabled is a nono */ | 171 | /* Invalidating when L2 is enabled is a nono */ |
| 172 | BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); | 172 | BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); |
| 173 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); | 173 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); |
| 174 | cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); | 174 | cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); |
| 175 | cache_sync(); | 175 | cache_sync(); |
| 176 | spin_unlock_irqrestore(&l2x0_lock, flags); | 176 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 177 | } | 177 | } |
| 178 | 178 | ||
| 179 | static void l2x0_inv_range(unsigned long start, unsigned long end) | 179 | static void l2x0_inv_range(unsigned long start, unsigned long end) |
| @@ -181,7 +181,7 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) | |||
| 181 | void __iomem *base = l2x0_base; | 181 | void __iomem *base = l2x0_base; |
| 182 | unsigned long flags; | 182 | unsigned long flags; |
| 183 | 183 | ||
| 184 | spin_lock_irqsave(&l2x0_lock, flags); | 184 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 185 | if (start & (CACHE_LINE_SIZE - 1)) { | 185 | if (start & (CACHE_LINE_SIZE - 1)) { |
| 186 | start &= ~(CACHE_LINE_SIZE - 1); | 186 | start &= ~(CACHE_LINE_SIZE - 1); |
| 187 | debug_writel(0x03); | 187 | debug_writel(0x03); |
| @@ -206,13 +206,13 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) | |||
| 206 | } | 206 | } |
| 207 | 207 | ||
| 208 | if (blk_end < end) { | 208 | if (blk_end < end) { |
| 209 | spin_unlock_irqrestore(&l2x0_lock, flags); | 209 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 210 | spin_lock_irqsave(&l2x0_lock, flags); | 210 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 211 | } | 211 | } |
| 212 | } | 212 | } |
| 213 | cache_wait(base + L2X0_INV_LINE_PA, 1); | 213 | cache_wait(base + L2X0_INV_LINE_PA, 1); |
| 214 | cache_sync(); | 214 | cache_sync(); |
| 215 | spin_unlock_irqrestore(&l2x0_lock, flags); | 215 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 216 | } | 216 | } |
| 217 | 217 | ||
| 218 | static void l2x0_clean_range(unsigned long start, unsigned long end) | 218 | static void l2x0_clean_range(unsigned long start, unsigned long end) |
| @@ -225,7 +225,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) | |||
| 225 | return; | 225 | return; |
| 226 | } | 226 | } |
| 227 | 227 | ||
| 228 | spin_lock_irqsave(&l2x0_lock, flags); | 228 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 229 | start &= ~(CACHE_LINE_SIZE - 1); | 229 | start &= ~(CACHE_LINE_SIZE - 1); |
| 230 | while (start < end) { | 230 | while (start < end) { |
| 231 | unsigned long blk_end = start + min(end - start, 4096UL); | 231 | unsigned long blk_end = start + min(end - start, 4096UL); |
| @@ -236,13 +236,13 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) | |||
| 236 | } | 236 | } |
| 237 | 237 | ||
| 238 | if (blk_end < end) { | 238 | if (blk_end < end) { |
| 239 | spin_unlock_irqrestore(&l2x0_lock, flags); | 239 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 240 | spin_lock_irqsave(&l2x0_lock, flags); | 240 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 241 | } | 241 | } |
| 242 | } | 242 | } |
| 243 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | 243 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); |
| 244 | cache_sync(); | 244 | cache_sync(); |
| 245 | spin_unlock_irqrestore(&l2x0_lock, flags); | 245 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 246 | } | 246 | } |
| 247 | 247 | ||
| 248 | static void l2x0_flush_range(unsigned long start, unsigned long end) | 248 | static void l2x0_flush_range(unsigned long start, unsigned long end) |
| @@ -255,7 +255,7 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) | |||
| 255 | return; | 255 | return; |
| 256 | } | 256 | } |
| 257 | 257 | ||
| 258 | spin_lock_irqsave(&l2x0_lock, flags); | 258 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 259 | start &= ~(CACHE_LINE_SIZE - 1); | 259 | start &= ~(CACHE_LINE_SIZE - 1); |
| 260 | while (start < end) { | 260 | while (start < end) { |
| 261 | unsigned long blk_end = start + min(end - start, 4096UL); | 261 | unsigned long blk_end = start + min(end - start, 4096UL); |
| @@ -268,24 +268,24 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) | |||
| 268 | debug_writel(0x00); | 268 | debug_writel(0x00); |
| 269 | 269 | ||
| 270 | if (blk_end < end) { | 270 | if (blk_end < end) { |
| 271 | spin_unlock_irqrestore(&l2x0_lock, flags); | 271 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 272 | spin_lock_irqsave(&l2x0_lock, flags); | 272 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 273 | } | 273 | } |
| 274 | } | 274 | } |
| 275 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); | 275 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); |
| 276 | cache_sync(); | 276 | cache_sync(); |
| 277 | spin_unlock_irqrestore(&l2x0_lock, flags); | 277 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 278 | } | 278 | } |
| 279 | 279 | ||
| 280 | static void l2x0_disable(void) | 280 | static void l2x0_disable(void) |
| 281 | { | 281 | { |
| 282 | unsigned long flags; | 282 | unsigned long flags; |
| 283 | 283 | ||
| 284 | spin_lock_irqsave(&l2x0_lock, flags); | 284 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
| 285 | __l2x0_flush_all(); | 285 | __l2x0_flush_all(); |
| 286 | writel_relaxed(0, l2x0_base + L2X0_CTRL); | 286 | writel_relaxed(0, l2x0_base + L2X0_CTRL); |
| 287 | dsb(); | 287 | dsb(); |
| 288 | spin_unlock_irqrestore(&l2x0_lock, flags); | 288 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
| 289 | } | 289 | } |
| 290 | 290 | ||
| 291 | static void l2x0_unlock(__u32 cache_id) | 291 | static void l2x0_unlock(__u32 cache_id) |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index b0ee9ba3cfab..93aac068da94 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
| 17 | #include <asm/tlbflush.h> | 17 | #include <asm/tlbflush.h> |
| 18 | 18 | ||
| 19 | static DEFINE_SPINLOCK(cpu_asid_lock); | 19 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
| 20 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; | 20 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; |
| 21 | #ifdef CONFIG_SMP | 21 | #ifdef CONFIG_SMP |
| 22 | DEFINE_PER_CPU(struct mm_struct *, current_mm); | 22 | DEFINE_PER_CPU(struct mm_struct *, current_mm); |
| @@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm); | |||
| 31 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 31 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 32 | { | 32 | { |
| 33 | mm->context.id = 0; | 33 | mm->context.id = 0; |
| 34 | spin_lock_init(&mm->context.id_lock); | 34 | raw_spin_lock_init(&mm->context.id_lock); |
| 35 | } | 35 | } |
| 36 | 36 | ||
| 37 | static void flush_context(void) | 37 | static void flush_context(void) |
| @@ -58,7 +58,7 @@ static void set_mm_context(struct mm_struct *mm, unsigned int asid) | |||
| 58 | * the broadcast. This function is also called via IPI so the | 58 | * the broadcast. This function is also called via IPI so the |
| 59 | * mm->context.id_lock has to be IRQ-safe. | 59 | * mm->context.id_lock has to be IRQ-safe. |
| 60 | */ | 60 | */ |
| 61 | spin_lock_irqsave(&mm->context.id_lock, flags); | 61 | raw_spin_lock_irqsave(&mm->context.id_lock, flags); |
| 62 | if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { | 62 | if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { |
| 63 | /* | 63 | /* |
| 64 | * Old version of ASID found. Set the new one and | 64 | * Old version of ASID found. Set the new one and |
| @@ -67,7 +67,7 @@ static void set_mm_context(struct mm_struct *mm, unsigned int asid) | |||
| 67 | mm->context.id = asid; | 67 | mm->context.id = asid; |
| 68 | cpumask_clear(mm_cpumask(mm)); | 68 | cpumask_clear(mm_cpumask(mm)); |
| 69 | } | 69 | } |
| 70 | spin_unlock_irqrestore(&mm->context.id_lock, flags); | 70 | raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); |
| 71 | 71 | ||
| 72 | /* | 72 | /* |
| 73 | * Set the mm_cpumask(mm) bit for the current CPU. | 73 | * Set the mm_cpumask(mm) bit for the current CPU. |
| @@ -117,7 +117,7 @@ void __new_context(struct mm_struct *mm) | |||
| 117 | { | 117 | { |
| 118 | unsigned int asid; | 118 | unsigned int asid; |
| 119 | 119 | ||
| 120 | spin_lock(&cpu_asid_lock); | 120 | raw_spin_lock(&cpu_asid_lock); |
| 121 | #ifdef CONFIG_SMP | 121 | #ifdef CONFIG_SMP |
| 122 | /* | 122 | /* |
| 123 | * Check the ASID again, in case the change was broadcast from | 123 | * Check the ASID again, in case the change was broadcast from |
| @@ -125,7 +125,7 @@ void __new_context(struct mm_struct *mm) | |||
| 125 | */ | 125 | */ |
| 126 | if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { | 126 | if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { |
| 127 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | 127 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
| 128 | spin_unlock(&cpu_asid_lock); | 128 | raw_spin_unlock(&cpu_asid_lock); |
| 129 | return; | 129 | return; |
| 130 | } | 130 | } |
| 131 | #endif | 131 | #endif |
| @@ -153,5 +153,5 @@ void __new_context(struct mm_struct *mm) | |||
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | set_mm_context(mm, asid); | 155 | set_mm_context(mm, asid); |
| 156 | spin_unlock(&cpu_asid_lock); | 156 | raw_spin_unlock(&cpu_asid_lock); |
| 157 | } | 157 | } |
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index b8061519ce77..7d0a8c230342 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c | |||
| @@ -30,7 +30,7 @@ | |||
| 30 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ | 30 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ |
| 31 | L_PTE_MT_MINICACHE) | 31 | L_PTE_MT_MINICACHE) |
| 32 | 32 | ||
| 33 | static DEFINE_SPINLOCK(minicache_lock); | 33 | static DEFINE_RAW_SPINLOCK(minicache_lock); |
| 34 | 34 | ||
| 35 | /* | 35 | /* |
| 36 | * ARMv4 mini-dcache optimised copy_user_highpage | 36 | * ARMv4 mini-dcache optimised copy_user_highpage |
| @@ -76,14 +76,14 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, | |||
| 76 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) | 76 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) |
| 77 | __flush_dcache_page(page_mapping(from), from); | 77 | __flush_dcache_page(page_mapping(from), from); |
| 78 | 78 | ||
| 79 | spin_lock(&minicache_lock); | 79 | raw_spin_lock(&minicache_lock); |
| 80 | 80 | ||
| 81 | set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); | 81 | set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); |
| 82 | flush_tlb_kernel_page(0xffff8000); | 82 | flush_tlb_kernel_page(0xffff8000); |
| 83 | 83 | ||
| 84 | mc_copy_user_page((void *)0xffff8000, kto); | 84 | mc_copy_user_page((void *)0xffff8000, kto); |
| 85 | 85 | ||
| 86 | spin_unlock(&minicache_lock); | 86 | raw_spin_unlock(&minicache_lock); |
| 87 | 87 | ||
| 88 | kunmap_atomic(kto, KM_USER1); | 88 | kunmap_atomic(kto, KM_USER1); |
| 89 | } | 89 | } |
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 63cca0097130..3d9a1552cef6 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
| @@ -27,7 +27,7 @@ | |||
| 27 | #define from_address (0xffff8000) | 27 | #define from_address (0xffff8000) |
| 28 | #define to_address (0xffffc000) | 28 | #define to_address (0xffffc000) |
| 29 | 29 | ||
| 30 | static DEFINE_SPINLOCK(v6_lock); | 30 | static DEFINE_RAW_SPINLOCK(v6_lock); |
| 31 | 31 | ||
| 32 | /* | 32 | /* |
| 33 | * Copy the user page. No aliasing to deal with so we can just | 33 | * Copy the user page. No aliasing to deal with so we can just |
| @@ -88,7 +88,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, | |||
| 88 | * Now copy the page using the same cache colour as the | 88 | * Now copy the page using the same cache colour as the |
| 89 | * pages ultimate destination. | 89 | * pages ultimate destination. |
| 90 | */ | 90 | */ |
| 91 | spin_lock(&v6_lock); | 91 | raw_spin_lock(&v6_lock); |
| 92 | 92 | ||
| 93 | set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); | 93 | set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); |
| 94 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); | 94 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); |
| @@ -101,7 +101,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, | |||
| 101 | 101 | ||
| 102 | copy_page((void *)kto, (void *)kfrom); | 102 | copy_page((void *)kto, (void *)kfrom); |
| 103 | 103 | ||
| 104 | spin_unlock(&v6_lock); | 104 | raw_spin_unlock(&v6_lock); |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | /* | 107 | /* |
| @@ -121,13 +121,13 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad | |||
| 121 | * Now clear the page using the same cache colour as | 121 | * Now clear the page using the same cache colour as |
| 122 | * the pages ultimate destination. | 122 | * the pages ultimate destination. |
| 123 | */ | 123 | */ |
| 124 | spin_lock(&v6_lock); | 124 | raw_spin_lock(&v6_lock); |
| 125 | 125 | ||
| 126 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); | 126 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); |
| 127 | flush_tlb_kernel_page(to); | 127 | flush_tlb_kernel_page(to); |
| 128 | clear_page((void *)to); | 128 | clear_page((void *)to); |
| 129 | 129 | ||
| 130 | spin_unlock(&v6_lock); | 130 | raw_spin_unlock(&v6_lock); |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | struct cpu_user_fns v6_user_fns __initdata = { | 133 | struct cpu_user_fns v6_user_fns __initdata = { |
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 649bbcd325bf..610c24ced310 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ | 32 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ |
| 33 | L_PTE_MT_MINICACHE) | 33 | L_PTE_MT_MINICACHE) |
| 34 | 34 | ||
| 35 | static DEFINE_SPINLOCK(minicache_lock); | 35 | static DEFINE_RAW_SPINLOCK(minicache_lock); |
| 36 | 36 | ||
| 37 | /* | 37 | /* |
| 38 | * XScale mini-dcache optimised copy_user_highpage | 38 | * XScale mini-dcache optimised copy_user_highpage |
| @@ -98,14 +98,14 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, | |||
| 98 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) | 98 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) |
| 99 | __flush_dcache_page(page_mapping(from), from); | 99 | __flush_dcache_page(page_mapping(from), from); |
| 100 | 100 | ||
| 101 | spin_lock(&minicache_lock); | 101 | raw_spin_lock(&minicache_lock); |
| 102 | 102 | ||
| 103 | set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); | 103 | set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); |
| 104 | flush_tlb_kernel_page(COPYPAGE_MINICACHE); | 104 | flush_tlb_kernel_page(COPYPAGE_MINICACHE); |
| 105 | 105 | ||
| 106 | mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); | 106 | mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); |
| 107 | 107 | ||
| 108 | spin_unlock(&minicache_lock); | 108 | raw_spin_unlock(&minicache_lock); |
| 109 | 109 | ||
| 110 | kunmap_atomic(kto, KM_USER1); | 110 | kunmap_atomic(kto, KM_USER1); |
| 111 | } | 111 | } |
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index 984cd2029158..3330feca7502 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c | |||
| @@ -47,7 +47,7 @@ struct uic { | |||
| 47 | int index; | 47 | int index; |
| 48 | int dcrbase; | 48 | int dcrbase; |
| 49 | 49 | ||
| 50 | spinlock_t lock; | 50 | raw_spinlock_t lock; |
| 51 | 51 | ||
| 52 | /* The remapper for this UIC */ | 52 | /* The remapper for this UIC */ |
| 53 | struct irq_host *irqhost; | 53 | struct irq_host *irqhost; |
| @@ -61,14 +61,14 @@ static void uic_unmask_irq(struct irq_data *d) | |||
| 61 | u32 er, sr; | 61 | u32 er, sr; |
| 62 | 62 | ||
| 63 | sr = 1 << (31-src); | 63 | sr = 1 << (31-src); |
| 64 | spin_lock_irqsave(&uic->lock, flags); | 64 | raw_spin_lock_irqsave(&uic->lock, flags); |
| 65 | /* ack level-triggered interrupts here */ | 65 | /* ack level-triggered interrupts here */ |
| 66 | if (irqd_is_level_type(d)) | 66 | if (irqd_is_level_type(d)) |
| 67 | mtdcr(uic->dcrbase + UIC_SR, sr); | 67 | mtdcr(uic->dcrbase + UIC_SR, sr); |
| 68 | er = mfdcr(uic->dcrbase + UIC_ER); | 68 | er = mfdcr(uic->dcrbase + UIC_ER); |
| 69 | er |= sr; | 69 | er |= sr; |
| 70 | mtdcr(uic->dcrbase + UIC_ER, er); | 70 | mtdcr(uic->dcrbase + UIC_ER, er); |
| 71 | spin_unlock_irqrestore(&uic->lock, flags); | 71 | raw_spin_unlock_irqrestore(&uic->lock, flags); |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | static void uic_mask_irq(struct irq_data *d) | 74 | static void uic_mask_irq(struct irq_data *d) |
| @@ -78,11 +78,11 @@ static void uic_mask_irq(struct irq_data *d) | |||
| 78 | unsigned long flags; | 78 | unsigned long flags; |
| 79 | u32 er; | 79 | u32 er; |
| 80 | 80 | ||
| 81 | spin_lock_irqsave(&uic->lock, flags); | 81 | raw_spin_lock_irqsave(&uic->lock, flags); |
| 82 | er = mfdcr(uic->dcrbase + UIC_ER); | 82 | er = mfdcr(uic->dcrbase + UIC_ER); |
| 83 | er &= ~(1 << (31 - src)); | 83 | er &= ~(1 << (31 - src)); |
| 84 | mtdcr(uic->dcrbase + UIC_ER, er); | 84 | mtdcr(uic->dcrbase + UIC_ER, er); |
| 85 | spin_unlock_irqrestore(&uic->lock, flags); | 85 | raw_spin_unlock_irqrestore(&uic->lock, flags); |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | static void uic_ack_irq(struct irq_data *d) | 88 | static void uic_ack_irq(struct irq_data *d) |
| @@ -91,9 +91,9 @@ static void uic_ack_irq(struct irq_data *d) | |||
| 91 | unsigned int src = irqd_to_hwirq(d); | 91 | unsigned int src = irqd_to_hwirq(d); |
| 92 | unsigned long flags; | 92 | unsigned long flags; |
| 93 | 93 | ||
| 94 | spin_lock_irqsave(&uic->lock, flags); | 94 | raw_spin_lock_irqsave(&uic->lock, flags); |
| 95 | mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src)); | 95 | mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src)); |
| 96 | spin_unlock_irqrestore(&uic->lock, flags); | 96 | raw_spin_unlock_irqrestore(&uic->lock, flags); |
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | static void uic_mask_ack_irq(struct irq_data *d) | 99 | static void uic_mask_ack_irq(struct irq_data *d) |
| @@ -104,7 +104,7 @@ static void uic_mask_ack_irq(struct irq_data *d) | |||
| 104 | u32 er, sr; | 104 | u32 er, sr; |
| 105 | 105 | ||
| 106 | sr = 1 << (31-src); | 106 | sr = 1 << (31-src); |
| 107 | spin_lock_irqsave(&uic->lock, flags); | 107 | raw_spin_lock_irqsave(&uic->lock, flags); |
| 108 | er = mfdcr(uic->dcrbase + UIC_ER); | 108 | er = mfdcr(uic->dcrbase + UIC_ER); |
| 109 | er &= ~sr; | 109 | er &= ~sr; |
| 110 | mtdcr(uic->dcrbase + UIC_ER, er); | 110 | mtdcr(uic->dcrbase + UIC_ER, er); |
| @@ -118,7 +118,7 @@ static void uic_mask_ack_irq(struct irq_data *d) | |||
| 118 | */ | 118 | */ |
| 119 | if (!irqd_is_level_type(d)) | 119 | if (!irqd_is_level_type(d)) |
| 120 | mtdcr(uic->dcrbase + UIC_SR, sr); | 120 | mtdcr(uic->dcrbase + UIC_SR, sr); |
| 121 | spin_unlock_irqrestore(&uic->lock, flags); | 121 | raw_spin_unlock_irqrestore(&uic->lock, flags); |
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type) | 124 | static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type) |
| @@ -152,7 +152,7 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
| 152 | 152 | ||
| 153 | mask = ~(1 << (31 - src)); | 153 | mask = ~(1 << (31 - src)); |
| 154 | 154 | ||
| 155 | spin_lock_irqsave(&uic->lock, flags); | 155 | raw_spin_lock_irqsave(&uic->lock, flags); |
| 156 | tr = mfdcr(uic->dcrbase + UIC_TR); | 156 | tr = mfdcr(uic->dcrbase + UIC_TR); |
| 157 | pr = mfdcr(uic->dcrbase + UIC_PR); | 157 | pr = mfdcr(uic->dcrbase + UIC_PR); |
| 158 | tr = (tr & mask) | (trigger << (31-src)); | 158 | tr = (tr & mask) | (trigger << (31-src)); |
| @@ -161,7 +161,7 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
| 161 | mtdcr(uic->dcrbase + UIC_PR, pr); | 161 | mtdcr(uic->dcrbase + UIC_PR, pr); |
| 162 | mtdcr(uic->dcrbase + UIC_TR, tr); | 162 | mtdcr(uic->dcrbase + UIC_TR, tr); |
| 163 | 163 | ||
| 164 | spin_unlock_irqrestore(&uic->lock, flags); | 164 | raw_spin_unlock_irqrestore(&uic->lock, flags); |
| 165 | 165 | ||
| 166 | return 0; | 166 | return 0; |
| 167 | } | 167 | } |
| @@ -254,7 +254,7 @@ static struct uic * __init uic_init_one(struct device_node *node) | |||
| 254 | if (! uic) | 254 | if (! uic) |
| 255 | return NULL; /* FIXME: panic? */ | 255 | return NULL; /* FIXME: panic? */ |
| 256 | 256 | ||
| 257 | spin_lock_init(&uic->lock); | 257 | raw_spin_lock_init(&uic->lock); |
| 258 | indexp = of_get_property(node, "cell-index", &len); | 258 | indexp = of_get_property(node, "cell-index", &len); |
| 259 | if (!indexp || (len != sizeof(u32))) { | 259 | if (!indexp || (len != sizeof(u32))) { |
| 260 | printk(KERN_ERR "uic: Device node %s has missing or invalid " | 260 | printk(KERN_ERR "uic: Device node %s has missing or invalid " |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 8694ef56459d..38e49bc95ffc 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
| @@ -28,7 +28,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); | |||
| 28 | * cmci_discover_lock protects against parallel discovery attempts | 28 | * cmci_discover_lock protects against parallel discovery attempts |
| 29 | * which could race against each other. | 29 | * which could race against each other. |
| 30 | */ | 30 | */ |
| 31 | static DEFINE_SPINLOCK(cmci_discover_lock); | 31 | static DEFINE_RAW_SPINLOCK(cmci_discover_lock); |
| 32 | 32 | ||
| 33 | #define CMCI_THRESHOLD 1 | 33 | #define CMCI_THRESHOLD 1 |
| 34 | 34 | ||
| @@ -85,7 +85,7 @@ static void cmci_discover(int banks, int boot) | |||
| 85 | int hdr = 0; | 85 | int hdr = 0; |
| 86 | int i; | 86 | int i; |
| 87 | 87 | ||
| 88 | spin_lock_irqsave(&cmci_discover_lock, flags); | 88 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); |
| 89 | for (i = 0; i < banks; i++) { | 89 | for (i = 0; i < banks; i++) { |
| 90 | u64 val; | 90 | u64 val; |
| 91 | 91 | ||
| @@ -116,7 +116,7 @@ static void cmci_discover(int banks, int boot) | |||
| 116 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); | 116 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); |
| 117 | } | 117 | } |
| 118 | } | 118 | } |
| 119 | spin_unlock_irqrestore(&cmci_discover_lock, flags); | 119 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); |
| 120 | if (hdr) | 120 | if (hdr) |
| 121 | printk(KERN_CONT "\n"); | 121 | printk(KERN_CONT "\n"); |
| 122 | } | 122 | } |
| @@ -150,7 +150,7 @@ void cmci_clear(void) | |||
| 150 | 150 | ||
| 151 | if (!cmci_supported(&banks)) | 151 | if (!cmci_supported(&banks)) |
| 152 | return; | 152 | return; |
| 153 | spin_lock_irqsave(&cmci_discover_lock, flags); | 153 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); |
| 154 | for (i = 0; i < banks; i++) { | 154 | for (i = 0; i < banks; i++) { |
| 155 | if (!test_bit(i, __get_cpu_var(mce_banks_owned))) | 155 | if (!test_bit(i, __get_cpu_var(mce_banks_owned))) |
| 156 | continue; | 156 | continue; |
| @@ -160,7 +160,7 @@ void cmci_clear(void) | |||
| 160 | wrmsrl(MSR_IA32_MCx_CTL2(i), val); | 160 | wrmsrl(MSR_IA32_MCx_CTL2(i), val); |
| 161 | __clear_bit(i, __get_cpu_var(mce_banks_owned)); | 161 | __clear_bit(i, __get_cpu_var(mce_banks_owned)); |
| 162 | } | 162 | } |
| 163 | spin_unlock_irqrestore(&cmci_discover_lock, flags); | 163 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); |
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | /* | 166 | /* |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 68894fdc034b..96646b3aeca8 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
| @@ -355,10 +355,10 @@ static void nmi_cpu_setup(void *dummy) | |||
| 355 | int cpu = smp_processor_id(); | 355 | int cpu = smp_processor_id(); |
| 356 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); | 356 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); |
| 357 | nmi_cpu_save_registers(msrs); | 357 | nmi_cpu_save_registers(msrs); |
| 358 | spin_lock(&oprofilefs_lock); | 358 | raw_spin_lock(&oprofilefs_lock); |
| 359 | model->setup_ctrs(model, msrs); | 359 | model->setup_ctrs(model, msrs); |
| 360 | nmi_cpu_setup_mux(cpu, msrs); | 360 | nmi_cpu_setup_mux(cpu, msrs); |
| 361 | spin_unlock(&oprofilefs_lock); | 361 | raw_spin_unlock(&oprofilefs_lock); |
| 362 | per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC); | 362 | per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC); |
| 363 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 363 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 364 | } | 364 | } |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 2e69e09ff03e..9b88f9828d8c 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
| @@ -852,7 +852,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
| 852 | } | 852 | } |
| 853 | 853 | ||
| 854 | static int c3_cpu_count; | 854 | static int c3_cpu_count; |
| 855 | static DEFINE_SPINLOCK(c3_lock); | 855 | static DEFINE_RAW_SPINLOCK(c3_lock); |
| 856 | 856 | ||
| 857 | /** | 857 | /** |
| 858 | * acpi_idle_enter_bm - enters C3 with proper BM handling | 858 | * acpi_idle_enter_bm - enters C3 with proper BM handling |
| @@ -930,12 +930,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
| 930 | * without doing anything. | 930 | * without doing anything. |
| 931 | */ | 931 | */ |
| 932 | if (pr->flags.bm_check && pr->flags.bm_control) { | 932 | if (pr->flags.bm_check && pr->flags.bm_control) { |
| 933 | spin_lock(&c3_lock); | 933 | raw_spin_lock(&c3_lock); |
| 934 | c3_cpu_count++; | 934 | c3_cpu_count++; |
| 935 | /* Disable bus master arbitration when all CPUs are in C3 */ | 935 | /* Disable bus master arbitration when all CPUs are in C3 */ |
| 936 | if (c3_cpu_count == num_online_cpus()) | 936 | if (c3_cpu_count == num_online_cpus()) |
| 937 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); | 937 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); |
| 938 | spin_unlock(&c3_lock); | 938 | raw_spin_unlock(&c3_lock); |
| 939 | } else if (!pr->flags.bm_check) { | 939 | } else if (!pr->flags.bm_check) { |
| 940 | ACPI_FLUSH_CPU_CACHE(); | 940 | ACPI_FLUSH_CPU_CACHE(); |
| 941 | } | 941 | } |
| @@ -944,10 +944,10 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
| 944 | 944 | ||
| 945 | /* Re-enable bus master arbitration */ | 945 | /* Re-enable bus master arbitration */ |
| 946 | if (pr->flags.bm_check && pr->flags.bm_control) { | 946 | if (pr->flags.bm_check && pr->flags.bm_control) { |
| 947 | spin_lock(&c3_lock); | 947 | raw_spin_lock(&c3_lock); |
| 948 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); | 948 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); |
| 949 | c3_cpu_count--; | 949 | c3_cpu_count--; |
| 950 | spin_unlock(&c3_lock); | 950 | raw_spin_unlock(&c3_lock); |
| 951 | } | 951 | } |
| 952 | kt2 = ktime_get_real(); | 952 | kt2 = ktime_get_real(); |
| 953 | idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); | 953 | idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); |
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index 4abd089a094f..25ec0bb05198 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c | |||
| @@ -35,7 +35,7 @@ MODULE_VERSION(DCA_VERSION); | |||
| 35 | MODULE_LICENSE("GPL"); | 35 | MODULE_LICENSE("GPL"); |
| 36 | MODULE_AUTHOR("Intel Corporation"); | 36 | MODULE_AUTHOR("Intel Corporation"); |
| 37 | 37 | ||
| 38 | static DEFINE_SPINLOCK(dca_lock); | 38 | static DEFINE_RAW_SPINLOCK(dca_lock); |
| 39 | 39 | ||
| 40 | static LIST_HEAD(dca_domains); | 40 | static LIST_HEAD(dca_domains); |
| 41 | 41 | ||
| @@ -101,10 +101,10 @@ static void unregister_dca_providers(void) | |||
| 101 | 101 | ||
| 102 | INIT_LIST_HEAD(&unregistered_providers); | 102 | INIT_LIST_HEAD(&unregistered_providers); |
| 103 | 103 | ||
| 104 | spin_lock_irqsave(&dca_lock, flags); | 104 | raw_spin_lock_irqsave(&dca_lock, flags); |
| 105 | 105 | ||
| 106 | if (list_empty(&dca_domains)) { | 106 | if (list_empty(&dca_domains)) { |
| 107 | spin_unlock_irqrestore(&dca_lock, flags); | 107 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
| 108 | return; | 108 | return; |
| 109 | } | 109 | } |
| 110 | 110 | ||
| @@ -116,7 +116,7 @@ static void unregister_dca_providers(void) | |||
| 116 | 116 | ||
| 117 | dca_free_domain(domain); | 117 | dca_free_domain(domain); |
| 118 | 118 | ||
| 119 | spin_unlock_irqrestore(&dca_lock, flags); | 119 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
| 120 | 120 | ||
| 121 | list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) { | 121 | list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) { |
| 122 | dca_sysfs_remove_provider(dca); | 122 | dca_sysfs_remove_provider(dca); |
| @@ -144,13 +144,8 @@ static struct dca_domain *dca_get_domain(struct device *dev) | |||
| 144 | domain = dca_find_domain(rc); | 144 | domain = dca_find_domain(rc); |
| 145 | 145 | ||
| 146 | if (!domain) { | 146 | if (!domain) { |
| 147 | if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) { | 147 | if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) |
| 148 | dca_providers_blocked = 1; | 148 | dca_providers_blocked = 1; |
| 149 | } else { | ||
| 150 | domain = dca_allocate_domain(rc); | ||
| 151 | if (domain) | ||
| 152 | list_add(&domain->node, &dca_domains); | ||
| 153 | } | ||
| 154 | } | 149 | } |
| 155 | 150 | ||
| 156 | return domain; | 151 | return domain; |
| @@ -198,19 +193,19 @@ int dca_add_requester(struct device *dev) | |||
| 198 | if (!dev) | 193 | if (!dev) |
| 199 | return -EFAULT; | 194 | return -EFAULT; |
| 200 | 195 | ||
| 201 | spin_lock_irqsave(&dca_lock, flags); | 196 | raw_spin_lock_irqsave(&dca_lock, flags); |
| 202 | 197 | ||
| 203 | /* check if the requester has not been added already */ | 198 | /* check if the requester has not been added already */ |
| 204 | dca = dca_find_provider_by_dev(dev); | 199 | dca = dca_find_provider_by_dev(dev); |
| 205 | if (dca) { | 200 | if (dca) { |
| 206 | spin_unlock_irqrestore(&dca_lock, flags); | 201 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
| 207 | return -EEXIST; | 202 | return -EEXIST; |
| 208 | } | 203 | } |
| 209 | 204 | ||
| 210 | pci_rc = dca_pci_rc_from_dev(dev); | 205 | pci_rc = dca_pci_rc_from_dev(dev); |
| 211 | domain = dca_find_domain(pci_rc); | 206 | domain = dca_find_domain(pci_rc); |
| 212 | if (!domain) { | 207 | if (!domain) { |
| 213 | spin_unlock_irqrestore(&dca_lock, flags); | 208 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
| 214 | return -ENODEV; | 209 | return -ENODEV; |
| 215 | } | 210 | } |
| 216 | 211 | ||
| @@ -220,17 +215,17 @@ int dca_add_requester(struct device *dev) | |||
| 220 | break; | 215 | break; |
| 221 | } | 216 | } |
| 222 | 217 | ||
| 223 | spin_unlock_irqrestore(&dca_lock, flags); | 218 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
| 224 | 219 | ||
| 225 | if (slot < 0) | 220 | if (slot < 0) |
| 226 | return slot; | 221 | return slot; |
| 227 | 222 | ||
| 228 | err = dca_sysfs_add_req(dca, dev, slot); | 223 | err = dca_sysfs_add_req(dca, dev, slot); |
| 229 | if (err) { | 224 | if (err) { |
| 230 | spin_lock_irqsave(&dca_lock, flags); | 225 | raw_spin_lock_irqsave(&dca_lock, flags); |
| 231 | if (dca == dca_find_provider_by_dev(dev)) | 226 | if (dca == dca_find_provider_by_dev(dev)) |
| 232 | dca->ops->remove_requester(dca, dev); | 227 | dca->ops->remove_requester(dca, dev); |
| 233 | spin_unlock_irqrestore(&dca_lock, flags); | 228 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
| 234 | return err; | 229 | return err; |
| 235 | } | 230 | } |
| 236 | 231 | ||
| @@ -251,14 +246,14 @@ int dca_remove_requester(struct device *dev) | |||
| 251 | if (!dev) | 246 | if (!dev) |
| 252 | return -EFAULT; | 247 | return -EFAULT; |
| 253 | 248 | ||
| 254 | spin_lock_irqsave(&dca_lock, flags); | 249 | raw_spin_lock_irqsave(&dca_lock, flags); |
| 255 | dca = dca_find_provider_by_dev(dev); | 250 | dca = dca_find_provider_by_dev(dev); |
| 256 | if (!dca) { | 251 | if (!dca) { |
| 257 | spin_unlock_irqrestore(&dca_lock, flags); | 252 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
| 258 | return -ENODEV; | 253 | return -ENODEV; |
| 259 | } | 254 | } |
| 260 | slot = dca->ops->remove_requester(dca, dev); | 255 | slot = dca->ops->remove_requester(dca, dev); |
| 261 | spin_unlock_irqrestore(&dca_lock, flags); | 256 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
| 262 | 257 | ||
| 263 | if (slot < 0) | 258 | if (slot < 0) |
| 264 | return slot; | 259 | return slot; |
| @@ -280,16 +275,16 @@ u8 dca_common_get_tag(struct device *dev, int cpu) | |||
| 280 | u8 tag; | 275 | u8 tag; |
| 281 | unsigned long flags; | 276 | unsigned long flags; |
| 282 | 277 | ||
| 283 | spin_lock_irqsave(&dca_lock, flags); | 278 | raw_spin_lock_irqsave(&dca_lock, flags); |
| 284 | 279 | ||
| 285 | dca = dca_find_provider_by_dev(dev); | 280 | dca = dca_find_provider_by_dev(dev); |
| 286 | if (!dca) { | 281 | if (!dca) { |
| 287 | spin_unlock_irqrestore(&dca_lock, flags); | 282 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
| 288 | return -ENODEV; | 283 | return -ENODEV; |
| 289 | } | 284 | } |
| 290 | tag = dca->ops->get_tag(dca, dev, cpu); | 285 | tag = dca->ops->get_tag(dca, dev, cpu); |
| 291 | 286 | ||
| 292 | spin_unlock_irqrestore(&dca_lock, flags); | 287 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
| 293 | return tag; | 288 | return tag; |
| 294 | } | 289 | } |
| 295 | 290 | ||
| @@ -360,36 +355,51 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev) | |||
| 360 | { | 355 | { |
| 361 | int err; | 356 | int err; |
| 362 | unsigned long flags; | 357 | unsigned long flags; |
| 363 | struct dca_domain *domain; | 358 | struct dca_domain *domain, *newdomain = NULL; |
| 364 | 359 | ||
| 365 | spin_lock_irqsave(&dca_lock, flags); | 360 | raw_spin_lock_irqsave(&dca_lock, flags); |
| 366 | if (dca_providers_blocked) { | 361 | if (dca_providers_blocked) { |
| 367 | spin_unlock_irqrestore(&dca_lock, flags); | 362 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
| 368 | return -ENODEV; | 363 | return -ENODEV; |
| 369 | } | 364 | } |
| 370 | spin_unlock_irqrestore(&dca_lock, flags); | 365 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
| 371 | 366 | ||
| 372 | err = dca_sysfs_add_provider(dca, dev); | 367 | err = dca_sysfs_add_provider(dca, dev); |
| 373 | if (err) | 368 | if (err) |
| 374 | return err; | 369 | return err; |
| 375 | 370 | ||
| 376 | spin_lock_irqsave(&dca_lock, flags); | 371 | raw_spin_lock_irqsave(&dca_lock, flags); |
| 377 | domain = dca_get_domain(dev); | 372 | domain = dca_get_domain(dev); |
| 378 | if (!domain) { | 373 | if (!domain) { |
| 374 | struct pci_bus *rc; | ||
| 375 | |||
| 379 | if (dca_providers_blocked) { | 376 | if (dca_providers_blocked) { |
| 380 | spin_unlock_irqrestore(&dca_lock, flags); | 377 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
| 381 | dca_sysfs_remove_provider(dca); | 378 | dca_sysfs_remove_provider(dca); |
| 382 | unregister_dca_providers(); | 379 | unregister_dca_providers(); |
| 383 | } else { | 380 | return -ENODEV; |
| 384 | spin_unlock_irqrestore(&dca_lock, flags); | 381 | } |
| 382 | |||
| 383 | raw_spin_unlock_irqrestore(&dca_lock, flags); | ||
| 384 | rc = dca_pci_rc_from_dev(dev); | ||
| 385 | newdomain = dca_allocate_domain(rc); | ||
| 386 | if (!newdomain) | ||
| 387 | return -ENODEV; | ||
| 388 | raw_spin_lock_irqsave(&dca_lock, flags); | ||
| 389 | /* Recheck, we might have raced after dropping the lock */ | ||
| 390 | domain = dca_get_domain(dev); | ||
| 391 | if (!domain) { | ||
| 392 | domain = newdomain; | ||
| 393 | newdomain = NULL; | ||
| 394 | list_add(&domain->node, &dca_domains); | ||
| 385 | } | 395 | } |
| 386 | return -ENODEV; | ||
| 387 | } | 396 | } |
| 388 | list_add(&dca->node, &domain->dca_providers); | 397 | list_add(&dca->node, &domain->dca_providers); |
| 389 | spin_unlock_irqrestore(&dca_lock, flags); | 398 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
| 390 | 399 | ||
| 391 | blocking_notifier_call_chain(&dca_provider_chain, | 400 | blocking_notifier_call_chain(&dca_provider_chain, |
| 392 | DCA_PROVIDER_ADD, NULL); | 401 | DCA_PROVIDER_ADD, NULL); |
| 402 | kfree(newdomain); | ||
| 393 | return 0; | 403 | return 0; |
| 394 | } | 404 | } |
| 395 | EXPORT_SYMBOL_GPL(register_dca_provider); | 405 | EXPORT_SYMBOL_GPL(register_dca_provider); |
| @@ -407,7 +417,7 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev) | |||
| 407 | blocking_notifier_call_chain(&dca_provider_chain, | 417 | blocking_notifier_call_chain(&dca_provider_chain, |
| 408 | DCA_PROVIDER_REMOVE, NULL); | 418 | DCA_PROVIDER_REMOVE, NULL); |
| 409 | 419 | ||
| 410 | spin_lock_irqsave(&dca_lock, flags); | 420 | raw_spin_lock_irqsave(&dca_lock, flags); |
| 411 | 421 | ||
| 412 | list_del(&dca->node); | 422 | list_del(&dca->node); |
| 413 | 423 | ||
| @@ -416,7 +426,7 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev) | |||
| 416 | if (list_empty(&domain->dca_providers)) | 426 | if (list_empty(&domain->dca_providers)) |
| 417 | dca_free_domain(domain); | 427 | dca_free_domain(domain); |
| 418 | 428 | ||
| 419 | spin_unlock_irqrestore(&dca_lock, flags); | 429 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
| 420 | 430 | ||
| 421 | dca_sysfs_remove_provider(dca); | 431 | dca_sysfs_remove_provider(dca); |
| 422 | } | 432 | } |
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index ab8a4eff072a..a71f55e72be9 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c | |||
| @@ -81,7 +81,7 @@ static struct ipu_irq_map irq_map[CONFIG_MX3_IPU_IRQS]; | |||
| 81 | /* Protects allocations from the above array of maps */ | 81 | /* Protects allocations from the above array of maps */ |
| 82 | static DEFINE_MUTEX(map_lock); | 82 | static DEFINE_MUTEX(map_lock); |
| 83 | /* Protects register accesses and individual mappings */ | 83 | /* Protects register accesses and individual mappings */ |
| 84 | static DEFINE_SPINLOCK(bank_lock); | 84 | static DEFINE_RAW_SPINLOCK(bank_lock); |
| 85 | 85 | ||
| 86 | static struct ipu_irq_map *src2map(unsigned int src) | 86 | static struct ipu_irq_map *src2map(unsigned int src) |
| 87 | { | 87 | { |
| @@ -101,11 +101,11 @@ static void ipu_irq_unmask(struct irq_data *d) | |||
| 101 | uint32_t reg; | 101 | uint32_t reg; |
| 102 | unsigned long lock_flags; | 102 | unsigned long lock_flags; |
| 103 | 103 | ||
| 104 | spin_lock_irqsave(&bank_lock, lock_flags); | 104 | raw_spin_lock_irqsave(&bank_lock, lock_flags); |
| 105 | 105 | ||
| 106 | bank = map->bank; | 106 | bank = map->bank; |
| 107 | if (!bank) { | 107 | if (!bank) { |
| 108 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 108 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
| 109 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); | 109 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
| 110 | return; | 110 | return; |
| 111 | } | 111 | } |
| @@ -114,7 +114,7 @@ static void ipu_irq_unmask(struct irq_data *d) | |||
| 114 | reg |= (1UL << (map->source & 31)); | 114 | reg |= (1UL << (map->source & 31)); |
| 115 | ipu_write_reg(bank->ipu, reg, bank->control); | 115 | ipu_write_reg(bank->ipu, reg, bank->control); |
| 116 | 116 | ||
| 117 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 117 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | static void ipu_irq_mask(struct irq_data *d) | 120 | static void ipu_irq_mask(struct irq_data *d) |
| @@ -124,11 +124,11 @@ static void ipu_irq_mask(struct irq_data *d) | |||
| 124 | uint32_t reg; | 124 | uint32_t reg; |
| 125 | unsigned long lock_flags; | 125 | unsigned long lock_flags; |
| 126 | 126 | ||
| 127 | spin_lock_irqsave(&bank_lock, lock_flags); | 127 | raw_spin_lock_irqsave(&bank_lock, lock_flags); |
| 128 | 128 | ||
| 129 | bank = map->bank; | 129 | bank = map->bank; |
| 130 | if (!bank) { | 130 | if (!bank) { |
| 131 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 131 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
| 132 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); | 132 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
| 133 | return; | 133 | return; |
| 134 | } | 134 | } |
| @@ -137,7 +137,7 @@ static void ipu_irq_mask(struct irq_data *d) | |||
| 137 | reg &= ~(1UL << (map->source & 31)); | 137 | reg &= ~(1UL << (map->source & 31)); |
| 138 | ipu_write_reg(bank->ipu, reg, bank->control); | 138 | ipu_write_reg(bank->ipu, reg, bank->control); |
| 139 | 139 | ||
| 140 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 140 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | static void ipu_irq_ack(struct irq_data *d) | 143 | static void ipu_irq_ack(struct irq_data *d) |
| @@ -146,17 +146,17 @@ static void ipu_irq_ack(struct irq_data *d) | |||
| 146 | struct ipu_irq_bank *bank; | 146 | struct ipu_irq_bank *bank; |
| 147 | unsigned long lock_flags; | 147 | unsigned long lock_flags; |
| 148 | 148 | ||
| 149 | spin_lock_irqsave(&bank_lock, lock_flags); | 149 | raw_spin_lock_irqsave(&bank_lock, lock_flags); |
| 150 | 150 | ||
| 151 | bank = map->bank; | 151 | bank = map->bank; |
| 152 | if (!bank) { | 152 | if (!bank) { |
| 153 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 153 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
| 154 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); | 154 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
| 155 | return; | 155 | return; |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status); | 158 | ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status); |
| 159 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 159 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
| 160 | } | 160 | } |
| 161 | 161 | ||
| 162 | /** | 162 | /** |
| @@ -172,11 +172,11 @@ bool ipu_irq_status(unsigned int irq) | |||
| 172 | unsigned long lock_flags; | 172 | unsigned long lock_flags; |
| 173 | bool ret; | 173 | bool ret; |
| 174 | 174 | ||
| 175 | spin_lock_irqsave(&bank_lock, lock_flags); | 175 | raw_spin_lock_irqsave(&bank_lock, lock_flags); |
| 176 | bank = map->bank; | 176 | bank = map->bank; |
| 177 | ret = bank && ipu_read_reg(bank->ipu, bank->status) & | 177 | ret = bank && ipu_read_reg(bank->ipu, bank->status) & |
| 178 | (1UL << (map->source & 31)); | 178 | (1UL << (map->source & 31)); |
| 179 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 179 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
| 180 | 180 | ||
| 181 | return ret; | 181 | return ret; |
| 182 | } | 182 | } |
| @@ -213,10 +213,10 @@ int ipu_irq_map(unsigned int source) | |||
| 213 | if (irq_map[i].source < 0) { | 213 | if (irq_map[i].source < 0) { |
| 214 | unsigned long lock_flags; | 214 | unsigned long lock_flags; |
| 215 | 215 | ||
| 216 | spin_lock_irqsave(&bank_lock, lock_flags); | 216 | raw_spin_lock_irqsave(&bank_lock, lock_flags); |
| 217 | irq_map[i].source = source; | 217 | irq_map[i].source = source; |
| 218 | irq_map[i].bank = irq_bank + source / 32; | 218 | irq_map[i].bank = irq_bank + source / 32; |
| 219 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 219 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
| 220 | 220 | ||
| 221 | ret = irq_map[i].irq; | 221 | ret = irq_map[i].irq; |
| 222 | pr_debug("IPU: mapped source %u to IRQ %u\n", | 222 | pr_debug("IPU: mapped source %u to IRQ %u\n", |
| @@ -252,10 +252,10 @@ int ipu_irq_unmap(unsigned int source) | |||
| 252 | pr_debug("IPU: unmapped source %u from IRQ %u\n", | 252 | pr_debug("IPU: unmapped source %u from IRQ %u\n", |
| 253 | source, irq_map[i].irq); | 253 | source, irq_map[i].irq); |
| 254 | 254 | ||
| 255 | spin_lock_irqsave(&bank_lock, lock_flags); | 255 | raw_spin_lock_irqsave(&bank_lock, lock_flags); |
| 256 | irq_map[i].source = -EINVAL; | 256 | irq_map[i].source = -EINVAL; |
| 257 | irq_map[i].bank = NULL; | 257 | irq_map[i].bank = NULL; |
| 258 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 258 | raw_spin_unlock_irqrestore(&bank_lock, lock_flags); |
| 259 | 259 | ||
| 260 | ret = 0; | 260 | ret = 0; |
| 261 | break; | 261 | break; |
| @@ -276,7 +276,7 @@ static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) | |||
| 276 | for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) { | 276 | for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) { |
| 277 | struct ipu_irq_bank *bank = irq_bank + i; | 277 | struct ipu_irq_bank *bank = irq_bank + i; |
| 278 | 278 | ||
| 279 | spin_lock(&bank_lock); | 279 | raw_spin_lock(&bank_lock); |
| 280 | status = ipu_read_reg(ipu, bank->status); | 280 | status = ipu_read_reg(ipu, bank->status); |
| 281 | /* | 281 | /* |
| 282 | * Don't think we have to clear all interrupts here, they will | 282 | * Don't think we have to clear all interrupts here, they will |
| @@ -284,18 +284,18 @@ static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) | |||
| 284 | * might want to clear unhandled interrupts after the loop... | 284 | * might want to clear unhandled interrupts after the loop... |
| 285 | */ | 285 | */ |
| 286 | status &= ipu_read_reg(ipu, bank->control); | 286 | status &= ipu_read_reg(ipu, bank->control); |
| 287 | spin_unlock(&bank_lock); | 287 | raw_spin_unlock(&bank_lock); |
| 288 | while ((line = ffs(status))) { | 288 | while ((line = ffs(status))) { |
| 289 | struct ipu_irq_map *map; | 289 | struct ipu_irq_map *map; |
| 290 | 290 | ||
| 291 | line--; | 291 | line--; |
| 292 | status &= ~(1UL << line); | 292 | status &= ~(1UL << line); |
| 293 | 293 | ||
| 294 | spin_lock(&bank_lock); | 294 | raw_spin_lock(&bank_lock); |
| 295 | map = src2map(32 * i + line); | 295 | map = src2map(32 * i + line); |
| 296 | if (map) | 296 | if (map) |
| 297 | irq = map->irq; | 297 | irq = map->irq; |
| 298 | spin_unlock(&bank_lock); | 298 | raw_spin_unlock(&bank_lock); |
| 299 | 299 | ||
| 300 | if (!map) { | 300 | if (!map) { |
| 301 | pr_err("IPU: Interrupt on unmapped source %u bank %d\n", | 301 | pr_err("IPU: Interrupt on unmapped source %u bank %d\n", |
| @@ -317,22 +317,22 @@ static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc) | |||
| 317 | for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) { | 317 | for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) { |
| 318 | struct ipu_irq_bank *bank = irq_bank + i; | 318 | struct ipu_irq_bank *bank = irq_bank + i; |
| 319 | 319 | ||
| 320 | spin_lock(&bank_lock); | 320 | raw_spin_lock(&bank_lock); |
| 321 | status = ipu_read_reg(ipu, bank->status); | 321 | status = ipu_read_reg(ipu, bank->status); |
| 322 | /* Not clearing all interrupts, see above */ | 322 | /* Not clearing all interrupts, see above */ |
| 323 | status &= ipu_read_reg(ipu, bank->control); | 323 | status &= ipu_read_reg(ipu, bank->control); |
| 324 | spin_unlock(&bank_lock); | 324 | raw_spin_unlock(&bank_lock); |
| 325 | while ((line = ffs(status))) { | 325 | while ((line = ffs(status))) { |
| 326 | struct ipu_irq_map *map; | 326 | struct ipu_irq_map *map; |
| 327 | 327 | ||
| 328 | line--; | 328 | line--; |
| 329 | status &= ~(1UL << line); | 329 | status &= ~(1UL << line); |
| 330 | 330 | ||
| 331 | spin_lock(&bank_lock); | 331 | raw_spin_lock(&bank_lock); |
| 332 | map = src2map(32 * i + line); | 332 | map = src2map(32 * i + line); |
| 333 | if (map) | 333 | if (map) |
| 334 | irq = map->irq; | 334 | irq = map->irq; |
| 335 | spin_unlock(&bank_lock); | 335 | raw_spin_unlock(&bank_lock); |
| 336 | 336 | ||
| 337 | if (!map) { | 337 | if (!map) { |
| 338 | pr_err("IPU: Interrupt on unmapped source %u bank %d\n", | 338 | pr_err("IPU: Interrupt on unmapped source %u bank %d\n", |
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 587e8f2d38d8..35c1e17fce1d 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
| @@ -652,7 +652,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
| 652 | (unsigned long long)iommu->cap, | 652 | (unsigned long long)iommu->cap, |
| 653 | (unsigned long long)iommu->ecap); | 653 | (unsigned long long)iommu->ecap); |
| 654 | 654 | ||
| 655 | spin_lock_init(&iommu->register_lock); | 655 | raw_spin_lock_init(&iommu->register_lock); |
| 656 | 656 | ||
| 657 | drhd->iommu = iommu; | 657 | drhd->iommu = iommu; |
| 658 | return 0; | 658 | return 0; |
| @@ -771,11 +771,11 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
| 771 | restart: | 771 | restart: |
| 772 | rc = 0; | 772 | rc = 0; |
| 773 | 773 | ||
| 774 | spin_lock_irqsave(&qi->q_lock, flags); | 774 | raw_spin_lock_irqsave(&qi->q_lock, flags); |
| 775 | while (qi->free_cnt < 3) { | 775 | while (qi->free_cnt < 3) { |
| 776 | spin_unlock_irqrestore(&qi->q_lock, flags); | 776 | raw_spin_unlock_irqrestore(&qi->q_lock, flags); |
| 777 | cpu_relax(); | 777 | cpu_relax(); |
| 778 | spin_lock_irqsave(&qi->q_lock, flags); | 778 | raw_spin_lock_irqsave(&qi->q_lock, flags); |
| 779 | } | 779 | } |
| 780 | 780 | ||
| 781 | index = qi->free_head; | 781 | index = qi->free_head; |
| @@ -815,15 +815,15 @@ restart: | |||
| 815 | if (rc) | 815 | if (rc) |
| 816 | break; | 816 | break; |
| 817 | 817 | ||
| 818 | spin_unlock(&qi->q_lock); | 818 | raw_spin_unlock(&qi->q_lock); |
| 819 | cpu_relax(); | 819 | cpu_relax(); |
| 820 | spin_lock(&qi->q_lock); | 820 | raw_spin_lock(&qi->q_lock); |
| 821 | } | 821 | } |
| 822 | 822 | ||
| 823 | qi->desc_status[index] = QI_DONE; | 823 | qi->desc_status[index] = QI_DONE; |
| 824 | 824 | ||
| 825 | reclaim_free_desc(qi); | 825 | reclaim_free_desc(qi); |
| 826 | spin_unlock_irqrestore(&qi->q_lock, flags); | 826 | raw_spin_unlock_irqrestore(&qi->q_lock, flags); |
| 827 | 827 | ||
| 828 | if (rc == -EAGAIN) | 828 | if (rc == -EAGAIN) |
| 829 | goto restart; | 829 | goto restart; |
| @@ -912,7 +912,7 @@ void dmar_disable_qi(struct intel_iommu *iommu) | |||
| 912 | if (!ecap_qis(iommu->ecap)) | 912 | if (!ecap_qis(iommu->ecap)) |
| 913 | return; | 913 | return; |
| 914 | 914 | ||
| 915 | spin_lock_irqsave(&iommu->register_lock, flags); | 915 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
| 916 | 916 | ||
| 917 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | 917 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); |
| 918 | if (!(sts & DMA_GSTS_QIES)) | 918 | if (!(sts & DMA_GSTS_QIES)) |
| @@ -932,7 +932,7 @@ void dmar_disable_qi(struct intel_iommu *iommu) | |||
| 932 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, | 932 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, |
| 933 | !(sts & DMA_GSTS_QIES), sts); | 933 | !(sts & DMA_GSTS_QIES), sts); |
| 934 | end: | 934 | end: |
| 935 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 935 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
| 936 | } | 936 | } |
| 937 | 937 | ||
| 938 | /* | 938 | /* |
| @@ -947,7 +947,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu) | |||
| 947 | qi->free_head = qi->free_tail = 0; | 947 | qi->free_head = qi->free_tail = 0; |
| 948 | qi->free_cnt = QI_LENGTH; | 948 | qi->free_cnt = QI_LENGTH; |
| 949 | 949 | ||
| 950 | spin_lock_irqsave(&iommu->register_lock, flags); | 950 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
| 951 | 951 | ||
| 952 | /* write zero to the tail reg */ | 952 | /* write zero to the tail reg */ |
| 953 | writel(0, iommu->reg + DMAR_IQT_REG); | 953 | writel(0, iommu->reg + DMAR_IQT_REG); |
| @@ -960,7 +960,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu) | |||
| 960 | /* Make sure hardware complete it */ | 960 | /* Make sure hardware complete it */ |
| 961 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); | 961 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); |
| 962 | 962 | ||
| 963 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 963 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
| 964 | } | 964 | } |
| 965 | 965 | ||
| 966 | /* | 966 | /* |
| @@ -1009,7 +1009,7 @@ int dmar_enable_qi(struct intel_iommu *iommu) | |||
| 1009 | qi->free_head = qi->free_tail = 0; | 1009 | qi->free_head = qi->free_tail = 0; |
| 1010 | qi->free_cnt = QI_LENGTH; | 1010 | qi->free_cnt = QI_LENGTH; |
| 1011 | 1011 | ||
| 1012 | spin_lock_init(&qi->q_lock); | 1012 | raw_spin_lock_init(&qi->q_lock); |
| 1013 | 1013 | ||
| 1014 | __dmar_enable_qi(iommu); | 1014 | __dmar_enable_qi(iommu); |
| 1015 | 1015 | ||
| @@ -1075,11 +1075,11 @@ void dmar_msi_unmask(struct irq_data *data) | |||
| 1075 | unsigned long flag; | 1075 | unsigned long flag; |
| 1076 | 1076 | ||
| 1077 | /* unmask it */ | 1077 | /* unmask it */ |
| 1078 | spin_lock_irqsave(&iommu->register_lock, flag); | 1078 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1079 | writel(0, iommu->reg + DMAR_FECTL_REG); | 1079 | writel(0, iommu->reg + DMAR_FECTL_REG); |
| 1080 | /* Read a reg to force flush the post write */ | 1080 | /* Read a reg to force flush the post write */ |
| 1081 | readl(iommu->reg + DMAR_FECTL_REG); | 1081 | readl(iommu->reg + DMAR_FECTL_REG); |
| 1082 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1082 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1083 | } | 1083 | } |
| 1084 | 1084 | ||
| 1085 | void dmar_msi_mask(struct irq_data *data) | 1085 | void dmar_msi_mask(struct irq_data *data) |
| @@ -1088,11 +1088,11 @@ void dmar_msi_mask(struct irq_data *data) | |||
| 1088 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); | 1088 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); |
| 1089 | 1089 | ||
| 1090 | /* mask it */ | 1090 | /* mask it */ |
| 1091 | spin_lock_irqsave(&iommu->register_lock, flag); | 1091 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1092 | writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); | 1092 | writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); |
| 1093 | /* Read a reg to force flush the post write */ | 1093 | /* Read a reg to force flush the post write */ |
| 1094 | readl(iommu->reg + DMAR_FECTL_REG); | 1094 | readl(iommu->reg + DMAR_FECTL_REG); |
| 1095 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1095 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1096 | } | 1096 | } |
| 1097 | 1097 | ||
| 1098 | void dmar_msi_write(int irq, struct msi_msg *msg) | 1098 | void dmar_msi_write(int irq, struct msi_msg *msg) |
| @@ -1100,11 +1100,11 @@ void dmar_msi_write(int irq, struct msi_msg *msg) | |||
| 1100 | struct intel_iommu *iommu = irq_get_handler_data(irq); | 1100 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
| 1101 | unsigned long flag; | 1101 | unsigned long flag; |
| 1102 | 1102 | ||
| 1103 | spin_lock_irqsave(&iommu->register_lock, flag); | 1103 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1104 | writel(msg->data, iommu->reg + DMAR_FEDATA_REG); | 1104 | writel(msg->data, iommu->reg + DMAR_FEDATA_REG); |
| 1105 | writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); | 1105 | writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); |
| 1106 | writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); | 1106 | writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); |
| 1107 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1107 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1108 | } | 1108 | } |
| 1109 | 1109 | ||
| 1110 | void dmar_msi_read(int irq, struct msi_msg *msg) | 1110 | void dmar_msi_read(int irq, struct msi_msg *msg) |
| @@ -1112,11 +1112,11 @@ void dmar_msi_read(int irq, struct msi_msg *msg) | |||
| 1112 | struct intel_iommu *iommu = irq_get_handler_data(irq); | 1112 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
| 1113 | unsigned long flag; | 1113 | unsigned long flag; |
| 1114 | 1114 | ||
| 1115 | spin_lock_irqsave(&iommu->register_lock, flag); | 1115 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1116 | msg->data = readl(iommu->reg + DMAR_FEDATA_REG); | 1116 | msg->data = readl(iommu->reg + DMAR_FEDATA_REG); |
| 1117 | msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); | 1117 | msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); |
| 1118 | msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); | 1118 | msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); |
| 1119 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1119 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1120 | } | 1120 | } |
| 1121 | 1121 | ||
| 1122 | static int dmar_fault_do_one(struct intel_iommu *iommu, int type, | 1122 | static int dmar_fault_do_one(struct intel_iommu *iommu, int type, |
| @@ -1153,7 +1153,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) | |||
| 1153 | u32 fault_status; | 1153 | u32 fault_status; |
| 1154 | unsigned long flag; | 1154 | unsigned long flag; |
| 1155 | 1155 | ||
| 1156 | spin_lock_irqsave(&iommu->register_lock, flag); | 1156 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1157 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | 1157 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); |
| 1158 | if (fault_status) | 1158 | if (fault_status) |
| 1159 | printk(KERN_ERR "DRHD: handling fault status reg %x\n", | 1159 | printk(KERN_ERR "DRHD: handling fault status reg %x\n", |
| @@ -1192,7 +1192,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) | |||
| 1192 | writel(DMA_FRCD_F, iommu->reg + reg + | 1192 | writel(DMA_FRCD_F, iommu->reg + reg + |
| 1193 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | 1193 | fault_index * PRIMARY_FAULT_REG_LEN + 12); |
| 1194 | 1194 | ||
| 1195 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1195 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1196 | 1196 | ||
| 1197 | dmar_fault_do_one(iommu, type, fault_reason, | 1197 | dmar_fault_do_one(iommu, type, fault_reason, |
| 1198 | source_id, guest_addr); | 1198 | source_id, guest_addr); |
| @@ -1200,14 +1200,14 @@ irqreturn_t dmar_fault(int irq, void *dev_id) | |||
| 1200 | fault_index++; | 1200 | fault_index++; |
| 1201 | if (fault_index >= cap_num_fault_regs(iommu->cap)) | 1201 | if (fault_index >= cap_num_fault_regs(iommu->cap)) |
| 1202 | fault_index = 0; | 1202 | fault_index = 0; |
| 1203 | spin_lock_irqsave(&iommu->register_lock, flag); | 1203 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1204 | } | 1204 | } |
| 1205 | clear_rest: | 1205 | clear_rest: |
| 1206 | /* clear all the other faults */ | 1206 | /* clear all the other faults */ |
| 1207 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | 1207 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); |
| 1208 | writel(fault_status, iommu->reg + DMAR_FSTS_REG); | 1208 | writel(fault_status, iommu->reg + DMAR_FSTS_REG); |
| 1209 | 1209 | ||
| 1210 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1210 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1211 | return IRQ_HANDLED; | 1211 | return IRQ_HANDLED; |
| 1212 | } | 1212 | } |
| 1213 | 1213 | ||
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index f28d933c7927..be1953c239b0 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -939,7 +939,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu) | |||
| 939 | 939 | ||
| 940 | addr = iommu->root_entry; | 940 | addr = iommu->root_entry; |
| 941 | 941 | ||
| 942 | spin_lock_irqsave(&iommu->register_lock, flag); | 942 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 943 | dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); | 943 | dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); |
| 944 | 944 | ||
| 945 | writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); | 945 | writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); |
| @@ -948,7 +948,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu) | |||
| 948 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | 948 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
| 949 | readl, (sts & DMA_GSTS_RTPS), sts); | 949 | readl, (sts & DMA_GSTS_RTPS), sts); |
| 950 | 950 | ||
| 951 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 951 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 952 | } | 952 | } |
| 953 | 953 | ||
| 954 | static void iommu_flush_write_buffer(struct intel_iommu *iommu) | 954 | static void iommu_flush_write_buffer(struct intel_iommu *iommu) |
| @@ -959,14 +959,14 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu) | |||
| 959 | if (!rwbf_quirk && !cap_rwbf(iommu->cap)) | 959 | if (!rwbf_quirk && !cap_rwbf(iommu->cap)) |
| 960 | return; | 960 | return; |
| 961 | 961 | ||
| 962 | spin_lock_irqsave(&iommu->register_lock, flag); | 962 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 963 | writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); | 963 | writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); |
| 964 | 964 | ||
| 965 | /* Make sure hardware complete it */ | 965 | /* Make sure hardware complete it */ |
| 966 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | 966 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
| 967 | readl, (!(val & DMA_GSTS_WBFS)), val); | 967 | readl, (!(val & DMA_GSTS_WBFS)), val); |
| 968 | 968 | ||
| 969 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 969 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 970 | } | 970 | } |
| 971 | 971 | ||
| 972 | /* return value determine if we need a write buffer flush */ | 972 | /* return value determine if we need a write buffer flush */ |
| @@ -993,14 +993,14 @@ static void __iommu_flush_context(struct intel_iommu *iommu, | |||
| 993 | } | 993 | } |
| 994 | val |= DMA_CCMD_ICC; | 994 | val |= DMA_CCMD_ICC; |
| 995 | 995 | ||
| 996 | spin_lock_irqsave(&iommu->register_lock, flag); | 996 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 997 | dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); | 997 | dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); |
| 998 | 998 | ||
| 999 | /* Make sure hardware complete it */ | 999 | /* Make sure hardware complete it */ |
| 1000 | IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, | 1000 | IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, |
| 1001 | dmar_readq, (!(val & DMA_CCMD_ICC)), val); | 1001 | dmar_readq, (!(val & DMA_CCMD_ICC)), val); |
| 1002 | 1002 | ||
| 1003 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1003 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1004 | } | 1004 | } |
| 1005 | 1005 | ||
| 1006 | /* return value determine if we need a write buffer flush */ | 1006 | /* return value determine if we need a write buffer flush */ |
| @@ -1039,7 +1039,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | |||
| 1039 | if (cap_write_drain(iommu->cap)) | 1039 | if (cap_write_drain(iommu->cap)) |
| 1040 | val |= DMA_TLB_WRITE_DRAIN; | 1040 | val |= DMA_TLB_WRITE_DRAIN; |
| 1041 | 1041 | ||
| 1042 | spin_lock_irqsave(&iommu->register_lock, flag); | 1042 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1043 | /* Note: Only uses first TLB reg currently */ | 1043 | /* Note: Only uses first TLB reg currently */ |
| 1044 | if (val_iva) | 1044 | if (val_iva) |
| 1045 | dmar_writeq(iommu->reg + tlb_offset, val_iva); | 1045 | dmar_writeq(iommu->reg + tlb_offset, val_iva); |
| @@ -1049,7 +1049,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | |||
| 1049 | IOMMU_WAIT_OP(iommu, tlb_offset + 8, | 1049 | IOMMU_WAIT_OP(iommu, tlb_offset + 8, |
| 1050 | dmar_readq, (!(val & DMA_TLB_IVT)), val); | 1050 | dmar_readq, (!(val & DMA_TLB_IVT)), val); |
| 1051 | 1051 | ||
| 1052 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1052 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1053 | 1053 | ||
| 1054 | /* check IOTLB invalidation granularity */ | 1054 | /* check IOTLB invalidation granularity */ |
| 1055 | if (DMA_TLB_IAIG(val) == 0) | 1055 | if (DMA_TLB_IAIG(val) == 0) |
| @@ -1165,7 +1165,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) | |||
| 1165 | u32 pmen; | 1165 | u32 pmen; |
| 1166 | unsigned long flags; | 1166 | unsigned long flags; |
| 1167 | 1167 | ||
| 1168 | spin_lock_irqsave(&iommu->register_lock, flags); | 1168 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
| 1169 | pmen = readl(iommu->reg + DMAR_PMEN_REG); | 1169 | pmen = readl(iommu->reg + DMAR_PMEN_REG); |
| 1170 | pmen &= ~DMA_PMEN_EPM; | 1170 | pmen &= ~DMA_PMEN_EPM; |
| 1171 | writel(pmen, iommu->reg + DMAR_PMEN_REG); | 1171 | writel(pmen, iommu->reg + DMAR_PMEN_REG); |
| @@ -1174,7 +1174,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) | |||
| 1174 | IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, | 1174 | IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, |
| 1175 | readl, !(pmen & DMA_PMEN_PRS), pmen); | 1175 | readl, !(pmen & DMA_PMEN_PRS), pmen); |
| 1176 | 1176 | ||
| 1177 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 1177 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
| 1178 | } | 1178 | } |
| 1179 | 1179 | ||
| 1180 | static int iommu_enable_translation(struct intel_iommu *iommu) | 1180 | static int iommu_enable_translation(struct intel_iommu *iommu) |
| @@ -1182,7 +1182,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu) | |||
| 1182 | u32 sts; | 1182 | u32 sts; |
| 1183 | unsigned long flags; | 1183 | unsigned long flags; |
| 1184 | 1184 | ||
| 1185 | spin_lock_irqsave(&iommu->register_lock, flags); | 1185 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
| 1186 | iommu->gcmd |= DMA_GCMD_TE; | 1186 | iommu->gcmd |= DMA_GCMD_TE; |
| 1187 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | 1187 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
| 1188 | 1188 | ||
| @@ -1190,7 +1190,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu) | |||
| 1190 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | 1190 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
| 1191 | readl, (sts & DMA_GSTS_TES), sts); | 1191 | readl, (sts & DMA_GSTS_TES), sts); |
| 1192 | 1192 | ||
| 1193 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 1193 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
| 1194 | return 0; | 1194 | return 0; |
| 1195 | } | 1195 | } |
| 1196 | 1196 | ||
| @@ -1199,7 +1199,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu) | |||
| 1199 | u32 sts; | 1199 | u32 sts; |
| 1200 | unsigned long flag; | 1200 | unsigned long flag; |
| 1201 | 1201 | ||
| 1202 | spin_lock_irqsave(&iommu->register_lock, flag); | 1202 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1203 | iommu->gcmd &= ~DMA_GCMD_TE; | 1203 | iommu->gcmd &= ~DMA_GCMD_TE; |
| 1204 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | 1204 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
| 1205 | 1205 | ||
| @@ -1207,7 +1207,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu) | |||
| 1207 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | 1207 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
| 1208 | readl, (!(sts & DMA_GSTS_TES)), sts); | 1208 | readl, (!(sts & DMA_GSTS_TES)), sts); |
| 1209 | 1209 | ||
| 1210 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1210 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1211 | return 0; | 1211 | return 0; |
| 1212 | } | 1212 | } |
| 1213 | 1213 | ||
| @@ -3329,7 +3329,7 @@ static int iommu_suspend(void) | |||
| 3329 | for_each_active_iommu(iommu, drhd) { | 3329 | for_each_active_iommu(iommu, drhd) { |
| 3330 | iommu_disable_translation(iommu); | 3330 | iommu_disable_translation(iommu); |
| 3331 | 3331 | ||
| 3332 | spin_lock_irqsave(&iommu->register_lock, flag); | 3332 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 3333 | 3333 | ||
| 3334 | iommu->iommu_state[SR_DMAR_FECTL_REG] = | 3334 | iommu->iommu_state[SR_DMAR_FECTL_REG] = |
| 3335 | readl(iommu->reg + DMAR_FECTL_REG); | 3335 | readl(iommu->reg + DMAR_FECTL_REG); |
| @@ -3340,7 +3340,7 @@ static int iommu_suspend(void) | |||
| 3340 | iommu->iommu_state[SR_DMAR_FEUADDR_REG] = | 3340 | iommu->iommu_state[SR_DMAR_FEUADDR_REG] = |
| 3341 | readl(iommu->reg + DMAR_FEUADDR_REG); | 3341 | readl(iommu->reg + DMAR_FEUADDR_REG); |
| 3342 | 3342 | ||
| 3343 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 3343 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 3344 | } | 3344 | } |
| 3345 | return 0; | 3345 | return 0; |
| 3346 | 3346 | ||
| @@ -3367,7 +3367,7 @@ static void iommu_resume(void) | |||
| 3367 | 3367 | ||
| 3368 | for_each_active_iommu(iommu, drhd) { | 3368 | for_each_active_iommu(iommu, drhd) { |
| 3369 | 3369 | ||
| 3370 | spin_lock_irqsave(&iommu->register_lock, flag); | 3370 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 3371 | 3371 | ||
| 3372 | writel(iommu->iommu_state[SR_DMAR_FECTL_REG], | 3372 | writel(iommu->iommu_state[SR_DMAR_FECTL_REG], |
| 3373 | iommu->reg + DMAR_FECTL_REG); | 3373 | iommu->reg + DMAR_FECTL_REG); |
| @@ -3378,7 +3378,7 @@ static void iommu_resume(void) | |||
| 3378 | writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], | 3378 | writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], |
| 3379 | iommu->reg + DMAR_FEUADDR_REG); | 3379 | iommu->reg + DMAR_FEUADDR_REG); |
| 3380 | 3380 | ||
| 3381 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 3381 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 3382 | } | 3382 | } |
| 3383 | 3383 | ||
| 3384 | for_each_active_iommu(iommu, drhd) | 3384 | for_each_active_iommu(iommu, drhd) |
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c index cfb0dd4bf0b6..07c9f189f314 100644 --- a/drivers/iommu/intr_remapping.c +++ b/drivers/iommu/intr_remapping.c | |||
| @@ -54,7 +54,7 @@ static __init int setup_intremap(char *str) | |||
| 54 | } | 54 | } |
| 55 | early_param("intremap", setup_intremap); | 55 | early_param("intremap", setup_intremap); |
| 56 | 56 | ||
| 57 | static DEFINE_SPINLOCK(irq_2_ir_lock); | 57 | static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); |
| 58 | 58 | ||
| 59 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | 59 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
| 60 | { | 60 | { |
| @@ -71,12 +71,12 @@ int get_irte(int irq, struct irte *entry) | |||
| 71 | if (!entry || !irq_iommu) | 71 | if (!entry || !irq_iommu) |
| 72 | return -1; | 72 | return -1; |
| 73 | 73 | ||
| 74 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 74 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
| 75 | 75 | ||
| 76 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 76 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
| 77 | *entry = *(irq_iommu->iommu->ir_table->base + index); | 77 | *entry = *(irq_iommu->iommu->ir_table->base + index); |
| 78 | 78 | ||
| 79 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 79 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
| 80 | return 0; | 80 | return 0; |
| 81 | } | 81 | } |
| 82 | 82 | ||
| @@ -110,7 +110,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
| 110 | return -1; | 110 | return -1; |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 113 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
| 114 | do { | 114 | do { |
| 115 | for (i = index; i < index + count; i++) | 115 | for (i = index; i < index + count; i++) |
| 116 | if (table->base[i].present) | 116 | if (table->base[i].present) |
| @@ -122,7 +122,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
| 122 | index = (index + count) % INTR_REMAP_TABLE_ENTRIES; | 122 | index = (index + count) % INTR_REMAP_TABLE_ENTRIES; |
| 123 | 123 | ||
| 124 | if (index == start_index) { | 124 | if (index == start_index) { |
| 125 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 125 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
| 126 | printk(KERN_ERR "can't allocate an IRTE\n"); | 126 | printk(KERN_ERR "can't allocate an IRTE\n"); |
| 127 | return -1; | 127 | return -1; |
| 128 | } | 128 | } |
| @@ -136,7 +136,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
| 136 | irq_iommu->sub_handle = 0; | 136 | irq_iommu->sub_handle = 0; |
| 137 | irq_iommu->irte_mask = mask; | 137 | irq_iommu->irte_mask = mask; |
| 138 | 138 | ||
| 139 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 139 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
| 140 | 140 | ||
| 141 | return index; | 141 | return index; |
| 142 | } | 142 | } |
| @@ -161,10 +161,10 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle) | |||
| 161 | if (!irq_iommu) | 161 | if (!irq_iommu) |
| 162 | return -1; | 162 | return -1; |
| 163 | 163 | ||
| 164 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 164 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
| 165 | *sub_handle = irq_iommu->sub_handle; | 165 | *sub_handle = irq_iommu->sub_handle; |
| 166 | index = irq_iommu->irte_index; | 166 | index = irq_iommu->irte_index; |
| 167 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 167 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
| 168 | return index; | 168 | return index; |
| 169 | } | 169 | } |
| 170 | 170 | ||
| @@ -176,14 +176,14 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |||
| 176 | if (!irq_iommu) | 176 | if (!irq_iommu) |
| 177 | return -1; | 177 | return -1; |
| 178 | 178 | ||
| 179 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 179 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
| 180 | 180 | ||
| 181 | irq_iommu->iommu = iommu; | 181 | irq_iommu->iommu = iommu; |
| 182 | irq_iommu->irte_index = index; | 182 | irq_iommu->irte_index = index; |
| 183 | irq_iommu->sub_handle = subhandle; | 183 | irq_iommu->sub_handle = subhandle; |
| 184 | irq_iommu->irte_mask = 0; | 184 | irq_iommu->irte_mask = 0; |
| 185 | 185 | ||
| 186 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 186 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
| 187 | 187 | ||
| 188 | return 0; | 188 | return 0; |
| 189 | } | 189 | } |
| @@ -199,7 +199,7 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
| 199 | if (!irq_iommu) | 199 | if (!irq_iommu) |
| 200 | return -1; | 200 | return -1; |
| 201 | 201 | ||
| 202 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 202 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
| 203 | 203 | ||
| 204 | iommu = irq_iommu->iommu; | 204 | iommu = irq_iommu->iommu; |
| 205 | 205 | ||
| @@ -211,7 +211,7 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
| 211 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); | 211 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
| 212 | 212 | ||
| 213 | rc = qi_flush_iec(iommu, index, 0); | 213 | rc = qi_flush_iec(iommu, index, 0); |
| 214 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 214 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
| 215 | 215 | ||
| 216 | return rc; | 216 | return rc; |
| 217 | } | 217 | } |
| @@ -279,7 +279,7 @@ int free_irte(int irq) | |||
| 279 | if (!irq_iommu) | 279 | if (!irq_iommu) |
| 280 | return -1; | 280 | return -1; |
| 281 | 281 | ||
| 282 | spin_lock_irqsave(&irq_2_ir_lock, flags); | 282 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
| 283 | 283 | ||
| 284 | rc = clear_entries(irq_iommu); | 284 | rc = clear_entries(irq_iommu); |
| 285 | 285 | ||
| @@ -288,7 +288,7 @@ int free_irte(int irq) | |||
| 288 | irq_iommu->sub_handle = 0; | 288 | irq_iommu->sub_handle = 0; |
| 289 | irq_iommu->irte_mask = 0; | 289 | irq_iommu->irte_mask = 0; |
| 290 | 290 | ||
| 291 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 291 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
| 292 | 292 | ||
| 293 | return rc; | 293 | return rc; |
| 294 | } | 294 | } |
| @@ -418,7 +418,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) | |||
| 418 | 418 | ||
| 419 | addr = virt_to_phys((void *)iommu->ir_table->base); | 419 | addr = virt_to_phys((void *)iommu->ir_table->base); |
| 420 | 420 | ||
| 421 | spin_lock_irqsave(&iommu->register_lock, flags); | 421 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
| 422 | 422 | ||
| 423 | dmar_writeq(iommu->reg + DMAR_IRTA_REG, | 423 | dmar_writeq(iommu->reg + DMAR_IRTA_REG, |
| 424 | (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); | 424 | (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); |
| @@ -429,7 +429,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) | |||
| 429 | 429 | ||
| 430 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | 430 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
| 431 | readl, (sts & DMA_GSTS_IRTPS), sts); | 431 | readl, (sts & DMA_GSTS_IRTPS), sts); |
| 432 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 432 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
| 433 | 433 | ||
| 434 | /* | 434 | /* |
| 435 | * global invalidation of interrupt entry cache before enabling | 435 | * global invalidation of interrupt entry cache before enabling |
| @@ -437,7 +437,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) | |||
| 437 | */ | 437 | */ |
| 438 | qi_global_iec(iommu); | 438 | qi_global_iec(iommu); |
| 439 | 439 | ||
| 440 | spin_lock_irqsave(&iommu->register_lock, flags); | 440 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
| 441 | 441 | ||
| 442 | /* Enable interrupt-remapping */ | 442 | /* Enable interrupt-remapping */ |
| 443 | iommu->gcmd |= DMA_GCMD_IRE; | 443 | iommu->gcmd |= DMA_GCMD_IRE; |
| @@ -446,7 +446,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) | |||
| 446 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | 446 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
| 447 | readl, (sts & DMA_GSTS_IRES), sts); | 447 | readl, (sts & DMA_GSTS_IRES), sts); |
| 448 | 448 | ||
| 449 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 449 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
| 450 | } | 450 | } |
| 451 | 451 | ||
| 452 | 452 | ||
| @@ -494,7 +494,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu) | |||
| 494 | */ | 494 | */ |
| 495 | qi_global_iec(iommu); | 495 | qi_global_iec(iommu); |
| 496 | 496 | ||
| 497 | spin_lock_irqsave(&iommu->register_lock, flags); | 497 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
| 498 | 498 | ||
| 499 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | 499 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); |
| 500 | if (!(sts & DMA_GSTS_IRES)) | 500 | if (!(sts & DMA_GSTS_IRES)) |
| @@ -507,7 +507,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu) | |||
| 507 | readl, !(sts & DMA_GSTS_IRES), sts); | 507 | readl, !(sts & DMA_GSTS_IRES), sts); |
| 508 | 508 | ||
| 509 | end: | 509 | end: |
| 510 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 510 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
| 511 | } | 511 | } |
| 512 | 512 | ||
| 513 | static int __init dmar_x2apic_optout(void) | 513 | static int __init dmar_x2apic_optout(void) |
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c index dd87e86048be..c0cc4e7ff023 100644 --- a/drivers/oprofile/event_buffer.c +++ b/drivers/oprofile/event_buffer.c | |||
| @@ -82,10 +82,10 @@ int alloc_event_buffer(void) | |||
| 82 | { | 82 | { |
| 83 | unsigned long flags; | 83 | unsigned long flags; |
| 84 | 84 | ||
| 85 | spin_lock_irqsave(&oprofilefs_lock, flags); | 85 | raw_spin_lock_irqsave(&oprofilefs_lock, flags); |
| 86 | buffer_size = oprofile_buffer_size; | 86 | buffer_size = oprofile_buffer_size; |
| 87 | buffer_watershed = oprofile_buffer_watershed; | 87 | buffer_watershed = oprofile_buffer_watershed; |
| 88 | spin_unlock_irqrestore(&oprofilefs_lock, flags); | 88 | raw_spin_unlock_irqrestore(&oprofilefs_lock, flags); |
| 89 | 89 | ||
| 90 | if (buffer_watershed >= buffer_size) | 90 | if (buffer_watershed >= buffer_size) |
| 91 | return -EINVAL; | 91 | return -EINVAL; |
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c index 94796f39bc47..da14432806c6 100644 --- a/drivers/oprofile/oprofile_perf.c +++ b/drivers/oprofile/oprofile_perf.c | |||
| @@ -160,9 +160,9 @@ static int oprofile_perf_create_files(struct super_block *sb, struct dentry *roo | |||
| 160 | 160 | ||
| 161 | static int oprofile_perf_setup(void) | 161 | static int oprofile_perf_setup(void) |
| 162 | { | 162 | { |
| 163 | spin_lock(&oprofilefs_lock); | 163 | raw_spin_lock(&oprofilefs_lock); |
| 164 | op_perf_setup(); | 164 | op_perf_setup(); |
| 165 | spin_unlock(&oprofilefs_lock); | 165 | raw_spin_unlock(&oprofilefs_lock); |
| 166 | return 0; | 166 | return 0; |
| 167 | } | 167 | } |
| 168 | 168 | ||
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c index e9ff6f7770be..d0de6cc2d7a5 100644 --- a/drivers/oprofile/oprofilefs.c +++ b/drivers/oprofile/oprofilefs.c | |||
| @@ -21,7 +21,7 @@ | |||
| 21 | 21 | ||
| 22 | #define OPROFILEFS_MAGIC 0x6f70726f | 22 | #define OPROFILEFS_MAGIC 0x6f70726f |
| 23 | 23 | ||
| 24 | DEFINE_SPINLOCK(oprofilefs_lock); | 24 | DEFINE_RAW_SPINLOCK(oprofilefs_lock); |
| 25 | 25 | ||
| 26 | static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode) | 26 | static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode) |
| 27 | { | 27 | { |
| @@ -76,9 +76,9 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_ | |||
| 76 | if (copy_from_user(tmpbuf, buf, count)) | 76 | if (copy_from_user(tmpbuf, buf, count)) |
| 77 | return -EFAULT; | 77 | return -EFAULT; |
| 78 | 78 | ||
| 79 | spin_lock_irqsave(&oprofilefs_lock, flags); | 79 | raw_spin_lock_irqsave(&oprofilefs_lock, flags); |
| 80 | *val = simple_strtoul(tmpbuf, NULL, 0); | 80 | *val = simple_strtoul(tmpbuf, NULL, 0); |
| 81 | spin_unlock_irqrestore(&oprofilefs_lock, flags); | 81 | raw_spin_unlock_irqrestore(&oprofilefs_lock, flags); |
| 82 | return 0; | 82 | return 0; |
| 83 | } | 83 | } |
| 84 | 84 | ||
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 915fd74da7a2..d449a74d4a31 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c | |||
| @@ -50,7 +50,7 @@ | |||
| 50 | #include <video/vga.h> | 50 | #include <video/vga.h> |
| 51 | #include <asm/io.h> | 51 | #include <asm/io.h> |
| 52 | 52 | ||
| 53 | static DEFINE_SPINLOCK(vga_lock); | 53 | static DEFINE_RAW_SPINLOCK(vga_lock); |
| 54 | static int cursor_size_lastfrom; | 54 | static int cursor_size_lastfrom; |
| 55 | static int cursor_size_lastto; | 55 | static int cursor_size_lastto; |
| 56 | static u32 vgacon_xres; | 56 | static u32 vgacon_xres; |
| @@ -157,7 +157,7 @@ static inline void write_vga(unsigned char reg, unsigned int val) | |||
| 157 | * ddprintk might set the console position from interrupt | 157 | * ddprintk might set the console position from interrupt |
| 158 | * handlers, thus the write has to be IRQ-atomic. | 158 | * handlers, thus the write has to be IRQ-atomic. |
| 159 | */ | 159 | */ |
| 160 | spin_lock_irqsave(&vga_lock, flags); | 160 | raw_spin_lock_irqsave(&vga_lock, flags); |
| 161 | 161 | ||
| 162 | #ifndef SLOW_VGA | 162 | #ifndef SLOW_VGA |
| 163 | v1 = reg + (val & 0xff00); | 163 | v1 = reg + (val & 0xff00); |
| @@ -170,7 +170,7 @@ static inline void write_vga(unsigned char reg, unsigned int val) | |||
| 170 | outb_p(reg + 1, vga_video_port_reg); | 170 | outb_p(reg + 1, vga_video_port_reg); |
| 171 | outb_p(val & 0xff, vga_video_port_val); | 171 | outb_p(val & 0xff, vga_video_port_val); |
| 172 | #endif | 172 | #endif |
| 173 | spin_unlock_irqrestore(&vga_lock, flags); | 173 | raw_spin_unlock_irqrestore(&vga_lock, flags); |
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | static inline void vga_set_mem_top(struct vc_data *c) | 176 | static inline void vga_set_mem_top(struct vc_data *c) |
| @@ -664,7 +664,7 @@ static void vgacon_set_cursor_size(int xpos, int from, int to) | |||
| 664 | cursor_size_lastfrom = from; | 664 | cursor_size_lastfrom = from; |
| 665 | cursor_size_lastto = to; | 665 | cursor_size_lastto = to; |
| 666 | 666 | ||
| 667 | spin_lock_irqsave(&vga_lock, flags); | 667 | raw_spin_lock_irqsave(&vga_lock, flags); |
| 668 | if (vga_video_type >= VIDEO_TYPE_VGAC) { | 668 | if (vga_video_type >= VIDEO_TYPE_VGAC) { |
| 669 | outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg); | 669 | outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg); |
| 670 | curs = inb_p(vga_video_port_val); | 670 | curs = inb_p(vga_video_port_val); |
| @@ -682,7 +682,7 @@ static void vgacon_set_cursor_size(int xpos, int from, int to) | |||
| 682 | outb_p(curs, vga_video_port_val); | 682 | outb_p(curs, vga_video_port_val); |
| 683 | outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg); | 683 | outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg); |
| 684 | outb_p(cure, vga_video_port_val); | 684 | outb_p(cure, vga_video_port_val); |
| 685 | spin_unlock_irqrestore(&vga_lock, flags); | 685 | raw_spin_unlock_irqrestore(&vga_lock, flags); |
| 686 | } | 686 | } |
| 687 | 687 | ||
| 688 | static void vgacon_cursor(struct vc_data *c, int mode) | 688 | static void vgacon_cursor(struct vc_data *c, int mode) |
| @@ -757,7 +757,7 @@ static int vgacon_doresize(struct vc_data *c, | |||
| 757 | unsigned int scanlines = height * c->vc_font.height; | 757 | unsigned int scanlines = height * c->vc_font.height; |
| 758 | u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan; | 758 | u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan; |
| 759 | 759 | ||
| 760 | spin_lock_irqsave(&vga_lock, flags); | 760 | raw_spin_lock_irqsave(&vga_lock, flags); |
| 761 | 761 | ||
| 762 | vgacon_xres = width * VGA_FONTWIDTH; | 762 | vgacon_xres = width * VGA_FONTWIDTH; |
| 763 | vgacon_yres = height * c->vc_font.height; | 763 | vgacon_yres = height * c->vc_font.height; |
| @@ -808,7 +808,7 @@ static int vgacon_doresize(struct vc_data *c, | |||
| 808 | outb_p(vsync_end, vga_video_port_val); | 808 | outb_p(vsync_end, vga_video_port_val); |
| 809 | } | 809 | } |
| 810 | 810 | ||
| 811 | spin_unlock_irqrestore(&vga_lock, flags); | 811 | raw_spin_unlock_irqrestore(&vga_lock, flags); |
| 812 | return 0; | 812 | return 0; |
| 813 | } | 813 | } |
| 814 | 814 | ||
| @@ -891,11 +891,11 @@ static void vga_vesa_blank(struct vgastate *state, int mode) | |||
| 891 | { | 891 | { |
| 892 | /* save original values of VGA controller registers */ | 892 | /* save original values of VGA controller registers */ |
| 893 | if (!vga_vesa_blanked) { | 893 | if (!vga_vesa_blanked) { |
| 894 | spin_lock_irq(&vga_lock); | 894 | raw_spin_lock_irq(&vga_lock); |
| 895 | vga_state.SeqCtrlIndex = vga_r(state->vgabase, VGA_SEQ_I); | 895 | vga_state.SeqCtrlIndex = vga_r(state->vgabase, VGA_SEQ_I); |
| 896 | vga_state.CrtCtrlIndex = inb_p(vga_video_port_reg); | 896 | vga_state.CrtCtrlIndex = inb_p(vga_video_port_reg); |
| 897 | vga_state.CrtMiscIO = vga_r(state->vgabase, VGA_MIS_R); | 897 | vga_state.CrtMiscIO = vga_r(state->vgabase, VGA_MIS_R); |
| 898 | spin_unlock_irq(&vga_lock); | 898 | raw_spin_unlock_irq(&vga_lock); |
| 899 | 899 | ||
| 900 | outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */ | 900 | outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */ |
| 901 | vga_state.HorizontalTotal = inb_p(vga_video_port_val); | 901 | vga_state.HorizontalTotal = inb_p(vga_video_port_val); |
| @@ -918,7 +918,7 @@ static void vga_vesa_blank(struct vgastate *state, int mode) | |||
| 918 | 918 | ||
| 919 | /* assure that video is enabled */ | 919 | /* assure that video is enabled */ |
| 920 | /* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */ | 920 | /* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */ |
| 921 | spin_lock_irq(&vga_lock); | 921 | raw_spin_lock_irq(&vga_lock); |
| 922 | vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode | 0x20); | 922 | vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode | 0x20); |
| 923 | 923 | ||
| 924 | /* test for vertical retrace in process.... */ | 924 | /* test for vertical retrace in process.... */ |
| @@ -954,13 +954,13 @@ static void vga_vesa_blank(struct vgastate *state, int mode) | |||
| 954 | /* restore both index registers */ | 954 | /* restore both index registers */ |
| 955 | vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex); | 955 | vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex); |
| 956 | outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg); | 956 | outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg); |
| 957 | spin_unlock_irq(&vga_lock); | 957 | raw_spin_unlock_irq(&vga_lock); |
| 958 | } | 958 | } |
| 959 | 959 | ||
| 960 | static void vga_vesa_unblank(struct vgastate *state) | 960 | static void vga_vesa_unblank(struct vgastate *state) |
| 961 | { | 961 | { |
| 962 | /* restore original values of VGA controller registers */ | 962 | /* restore original values of VGA controller registers */ |
| 963 | spin_lock_irq(&vga_lock); | 963 | raw_spin_lock_irq(&vga_lock); |
| 964 | vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO); | 964 | vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO); |
| 965 | 965 | ||
| 966 | outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */ | 966 | outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */ |
| @@ -985,7 +985,7 @@ static void vga_vesa_unblank(struct vgastate *state) | |||
| 985 | /* restore index/control registers */ | 985 | /* restore index/control registers */ |
| 986 | vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex); | 986 | vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex); |
| 987 | outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg); | 987 | outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg); |
| 988 | spin_unlock_irq(&vga_lock); | 988 | raw_spin_unlock_irq(&vga_lock); |
| 989 | } | 989 | } |
| 990 | 990 | ||
| 991 | static void vga_pal_blank(struct vgastate *state) | 991 | static void vga_pal_blank(struct vgastate *state) |
| @@ -1104,7 +1104,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) | |||
| 1104 | charmap += 4 * cmapsz; | 1104 | charmap += 4 * cmapsz; |
| 1105 | #endif | 1105 | #endif |
| 1106 | 1106 | ||
| 1107 | spin_lock_irq(&vga_lock); | 1107 | raw_spin_lock_irq(&vga_lock); |
| 1108 | /* First, the Sequencer */ | 1108 | /* First, the Sequencer */ |
| 1109 | vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1); | 1109 | vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1); |
| 1110 | /* CPU writes only to map 2 */ | 1110 | /* CPU writes only to map 2 */ |
| @@ -1120,7 +1120,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) | |||
| 1120 | vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x00); | 1120 | vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x00); |
| 1121 | /* map start at A000:0000 */ | 1121 | /* map start at A000:0000 */ |
| 1122 | vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x00); | 1122 | vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x00); |
| 1123 | spin_unlock_irq(&vga_lock); | 1123 | raw_spin_unlock_irq(&vga_lock); |
| 1124 | 1124 | ||
| 1125 | if (arg) { | 1125 | if (arg) { |
| 1126 | if (set) | 1126 | if (set) |
| @@ -1147,7 +1147,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) | |||
| 1147 | } | 1147 | } |
| 1148 | } | 1148 | } |
| 1149 | 1149 | ||
| 1150 | spin_lock_irq(&vga_lock); | 1150 | raw_spin_lock_irq(&vga_lock); |
| 1151 | /* First, the sequencer, Synchronous reset */ | 1151 | /* First, the sequencer, Synchronous reset */ |
| 1152 | vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x01); | 1152 | vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x01); |
| 1153 | /* CPU writes to maps 0 and 1 */ | 1153 | /* CPU writes to maps 0 and 1 */ |
| @@ -1186,7 +1186,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) | |||
| 1186 | inb_p(video_port_status); | 1186 | inb_p(video_port_status); |
| 1187 | vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0); | 1187 | vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0); |
| 1188 | } | 1188 | } |
| 1189 | spin_unlock_irq(&vga_lock); | 1189 | raw_spin_unlock_irq(&vga_lock); |
| 1190 | return 0; | 1190 | return 0; |
| 1191 | } | 1191 | } |
| 1192 | 1192 | ||
| @@ -1211,26 +1211,26 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight) | |||
| 1211 | registers; they are write-only on EGA, but it appears that they | 1211 | registers; they are write-only on EGA, but it appears that they |
| 1212 | are all don't care bits on EGA, so I guess it doesn't matter. */ | 1212 | are all don't care bits on EGA, so I guess it doesn't matter. */ |
| 1213 | 1213 | ||
| 1214 | spin_lock_irq(&vga_lock); | 1214 | raw_spin_lock_irq(&vga_lock); |
| 1215 | outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */ | 1215 | outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */ |
| 1216 | ovr = inb_p(vga_video_port_val); | 1216 | ovr = inb_p(vga_video_port_val); |
| 1217 | outb_p(0x09, vga_video_port_reg); /* Font size register */ | 1217 | outb_p(0x09, vga_video_port_reg); /* Font size register */ |
| 1218 | fsr = inb_p(vga_video_port_val); | 1218 | fsr = inb_p(vga_video_port_val); |
| 1219 | spin_unlock_irq(&vga_lock); | 1219 | raw_spin_unlock_irq(&vga_lock); |
| 1220 | 1220 | ||
| 1221 | vde = maxscan & 0xff; /* Vertical display end reg */ | 1221 | vde = maxscan & 0xff; /* Vertical display end reg */ |
| 1222 | ovr = (ovr & 0xbd) + /* Overflow register */ | 1222 | ovr = (ovr & 0xbd) + /* Overflow register */ |
| 1223 | ((maxscan & 0x100) >> 7) + ((maxscan & 0x200) >> 3); | 1223 | ((maxscan & 0x100) >> 7) + ((maxscan & 0x200) >> 3); |
| 1224 | fsr = (fsr & 0xe0) + (fontheight - 1); /* Font size register */ | 1224 | fsr = (fsr & 0xe0) + (fontheight - 1); /* Font size register */ |
| 1225 | 1225 | ||
| 1226 | spin_lock_irq(&vga_lock); | 1226 | raw_spin_lock_irq(&vga_lock); |
| 1227 | outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */ | 1227 | outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */ |
| 1228 | outb_p(ovr, vga_video_port_val); | 1228 | outb_p(ovr, vga_video_port_val); |
| 1229 | outb_p(0x09, vga_video_port_reg); /* Font size */ | 1229 | outb_p(0x09, vga_video_port_reg); /* Font size */ |
| 1230 | outb_p(fsr, vga_video_port_val); | 1230 | outb_p(fsr, vga_video_port_val); |
| 1231 | outb_p(0x12, vga_video_port_reg); /* Vertical display limit */ | 1231 | outb_p(0x12, vga_video_port_reg); /* Vertical display limit */ |
| 1232 | outb_p(vde, vga_video_port_val); | 1232 | outb_p(vde, vga_video_port_val); |
| 1233 | spin_unlock_irq(&vga_lock); | 1233 | raw_spin_unlock_irq(&vga_lock); |
| 1234 | vga_video_font_height = fontheight; | 1234 | vga_video_font_height = fontheight; |
| 1235 | 1235 | ||
| 1236 | for (i = 0; i < MAX_NR_CONSOLES; i++) { | 1236 | for (i = 0; i < MAX_NR_CONSOLES; i++) { |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index d14e058aaeed..08ffab01e76c 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
| @@ -42,7 +42,7 @@ extern struct fs_struct init_fs; | |||
| 42 | .cputimer = { \ | 42 | .cputimer = { \ |
| 43 | .cputime = INIT_CPUTIME, \ | 43 | .cputime = INIT_CPUTIME, \ |
| 44 | .running = 0, \ | 44 | .running = 0, \ |
| 45 | .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ | 45 | .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ |
| 46 | }, \ | 46 | }, \ |
| 47 | .cred_guard_mutex = \ | 47 | .cred_guard_mutex = \ |
| 48 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ | 48 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 235b8879af45..e6ca56de9936 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
| @@ -271,7 +271,7 @@ struct qi_desc { | |||
| 271 | }; | 271 | }; |
| 272 | 272 | ||
| 273 | struct q_inval { | 273 | struct q_inval { |
| 274 | spinlock_t q_lock; | 274 | raw_spinlock_t q_lock; |
| 275 | struct qi_desc *desc; /* invalidation queue */ | 275 | struct qi_desc *desc; /* invalidation queue */ |
| 276 | int *desc_status; /* desc status */ | 276 | int *desc_status; /* desc status */ |
| 277 | int free_head; /* first free entry */ | 277 | int free_head; /* first free entry */ |
| @@ -311,7 +311,7 @@ struct intel_iommu { | |||
| 311 | u64 cap; | 311 | u64 cap; |
| 312 | u64 ecap; | 312 | u64 ecap; |
| 313 | u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ | 313 | u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ |
| 314 | spinlock_t register_lock; /* protect register handling */ | 314 | raw_spinlock_t register_lock; /* protect register handling */ |
| 315 | int seq_id; /* sequence id of the iommu */ | 315 | int seq_id; /* sequence id of the iommu */ |
| 316 | int agaw; /* agaw of this iommu */ | 316 | int agaw; /* agaw of this iommu */ |
| 317 | int msagaw; /* max sagaw of this iommu */ | 317 | int msagaw; /* max sagaw of this iommu */ |
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index dd7c12e875bc..dce6e4dbeda7 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
| @@ -181,7 +181,7 @@ struct kretprobe { | |||
| 181 | int nmissed; | 181 | int nmissed; |
| 182 | size_t data_size; | 182 | size_t data_size; |
| 183 | struct hlist_head free_instances; | 183 | struct hlist_head free_instances; |
| 184 | spinlock_t lock; | 184 | raw_spinlock_t lock; |
| 185 | }; | 185 | }; |
| 186 | 186 | ||
| 187 | struct kretprobe_instance { | 187 | struct kretprobe_instance { |
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 49c8727eeb57..a4c562453f6b 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h | |||
| @@ -166,7 +166,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co | |||
| 166 | int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count); | 166 | int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count); |
| 167 | 167 | ||
| 168 | /** lock for read/write safety */ | 168 | /** lock for read/write safety */ |
| 169 | extern spinlock_t oprofilefs_lock; | 169 | extern raw_spinlock_t oprofilefs_lock; |
| 170 | 170 | ||
| 171 | /** | 171 | /** |
| 172 | * Add the contents of a circular buffer to the event buffer. | 172 | * Add the contents of a circular buffer to the event buffer. |
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 5edc9014263a..b9df9ed1adc0 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | #ifdef CONFIG_SMP | 16 | #ifdef CONFIG_SMP |
| 17 | 17 | ||
| 18 | struct percpu_counter { | 18 | struct percpu_counter { |
| 19 | spinlock_t lock; | 19 | raw_spinlock_t lock; |
| 20 | s64 count; | 20 | s64 count; |
| 21 | #ifdef CONFIG_HOTPLUG_CPU | 21 | #ifdef CONFIG_HOTPLUG_CPU |
| 22 | struct list_head list; /* All percpu_counters are on a list */ | 22 | struct list_head list; /* All percpu_counters are on a list */ |
diff --git a/include/linux/proportions.h b/include/linux/proportions.h index cf793bbbd05e..ef35bb73f69b 100644 --- a/include/linux/proportions.h +++ b/include/linux/proportions.h | |||
| @@ -58,7 +58,7 @@ struct prop_local_percpu { | |||
| 58 | */ | 58 | */ |
| 59 | int shift; | 59 | int shift; |
| 60 | unsigned long period; | 60 | unsigned long period; |
| 61 | spinlock_t lock; /* protect the snapshot state */ | 61 | raw_spinlock_t lock; /* protect the snapshot state */ |
| 62 | }; | 62 | }; |
| 63 | 63 | ||
| 64 | int prop_local_init_percpu(struct prop_local_percpu *pl); | 64 | int prop_local_init_percpu(struct prop_local_percpu *pl); |
| @@ -106,11 +106,11 @@ struct prop_local_single { | |||
| 106 | */ | 106 | */ |
| 107 | unsigned long period; | 107 | unsigned long period; |
| 108 | int shift; | 108 | int shift; |
| 109 | spinlock_t lock; /* protect the snapshot state */ | 109 | raw_spinlock_t lock; /* protect the snapshot state */ |
| 110 | }; | 110 | }; |
| 111 | 111 | ||
| 112 | #define INIT_PROP_LOCAL_SINGLE(name) \ | 112 | #define INIT_PROP_LOCAL_SINGLE(name) \ |
| 113 | { .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ | 113 | { .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | int prop_local_init_single(struct prop_local_single *pl); | 116 | int prop_local_init_single(struct prop_local_single *pl); |
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 2f007157fab9..e11ccb4cf48d 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | #define DEFAULT_RATELIMIT_BURST 10 | 8 | #define DEFAULT_RATELIMIT_BURST 10 |
| 9 | 9 | ||
| 10 | struct ratelimit_state { | 10 | struct ratelimit_state { |
| 11 | spinlock_t lock; /* protect the state */ | 11 | raw_spinlock_t lock; /* protect the state */ |
| 12 | 12 | ||
| 13 | int interval; | 13 | int interval; |
| 14 | int burst; | 14 | int burst; |
| @@ -20,7 +20,7 @@ struct ratelimit_state { | |||
| 20 | #define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ | 20 | #define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ |
| 21 | \ | 21 | \ |
| 22 | struct ratelimit_state name = { \ | 22 | struct ratelimit_state name = { \ |
| 23 | .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ | 23 | .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ |
| 24 | .interval = interval_init, \ | 24 | .interval = interval_init, \ |
| 25 | .burst = burst_init, \ | 25 | .burst = burst_init, \ |
| 26 | } | 26 | } |
| @@ -28,7 +28,7 @@ struct ratelimit_state { | |||
| 28 | static inline void ratelimit_state_init(struct ratelimit_state *rs, | 28 | static inline void ratelimit_state_init(struct ratelimit_state *rs, |
| 29 | int interval, int burst) | 29 | int interval, int burst) |
| 30 | { | 30 | { |
| 31 | spin_lock_init(&rs->lock); | 31 | raw_spin_lock_init(&rs->lock); |
| 32 | rs->interval = interval; | 32 | rs->interval = interval; |
| 33 | rs->burst = burst; | 33 | rs->burst = burst; |
| 34 | rs->printed = 0; | 34 | rs->printed = 0; |
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h index 34701241b673..d5b13bc07a0b 100644 --- a/include/linux/rwsem-spinlock.h +++ b/include/linux/rwsem-spinlock.h | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | */ | 22 | */ |
| 23 | struct rw_semaphore { | 23 | struct rw_semaphore { |
| 24 | __s32 activity; | 24 | __s32 activity; |
| 25 | spinlock_t wait_lock; | 25 | raw_spinlock_t wait_lock; |
| 26 | struct list_head wait_list; | 26 | struct list_head wait_list; |
| 27 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 27 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 28 | struct lockdep_map dep_map; | 28 | struct lockdep_map dep_map; |
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 6a6741440cb7..63d406554391 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h | |||
| @@ -25,7 +25,7 @@ struct rw_semaphore; | |||
| 25 | /* All arch specific implementations share the same struct */ | 25 | /* All arch specific implementations share the same struct */ |
| 26 | struct rw_semaphore { | 26 | struct rw_semaphore { |
| 27 | long count; | 27 | long count; |
| 28 | spinlock_t wait_lock; | 28 | raw_spinlock_t wait_lock; |
| 29 | struct list_head wait_list; | 29 | struct list_head wait_list; |
| 30 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 30 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 31 | struct lockdep_map dep_map; | 31 | struct lockdep_map dep_map; |
| @@ -56,9 +56,11 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) | |||
| 56 | # define __RWSEM_DEP_MAP_INIT(lockname) | 56 | # define __RWSEM_DEP_MAP_INIT(lockname) |
| 57 | #endif | 57 | #endif |
| 58 | 58 | ||
| 59 | #define __RWSEM_INITIALIZER(name) \ | 59 | #define __RWSEM_INITIALIZER(name) \ |
| 60 | { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED(name.wait_lock), \ | 60 | { RWSEM_UNLOCKED_VALUE, \ |
| 61 | LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } | 61 | __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ |
| 62 | LIST_HEAD_INIT((name).wait_list) \ | ||
| 63 | __RWSEM_DEP_MAP_INIT(name) } | ||
| 62 | 64 | ||
| 63 | #define DECLARE_RWSEM(name) \ | 65 | #define DECLARE_RWSEM(name) \ |
| 64 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | 66 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 600eb0a6f89a..1be699dd32a5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -510,7 +510,7 @@ struct task_cputime { | |||
| 510 | struct thread_group_cputimer { | 510 | struct thread_group_cputimer { |
| 511 | struct task_cputime cputime; | 511 | struct task_cputime cputime; |
| 512 | int running; | 512 | int running; |
| 513 | spinlock_t lock; | 513 | raw_spinlock_t lock; |
| 514 | }; | 514 | }; |
| 515 | 515 | ||
| 516 | #include <linux/rwsem.h> | 516 | #include <linux/rwsem.h> |
| @@ -2566,7 +2566,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); | |||
| 2566 | 2566 | ||
| 2567 | static inline void thread_group_cputime_init(struct signal_struct *sig) | 2567 | static inline void thread_group_cputime_init(struct signal_struct *sig) |
| 2568 | { | 2568 | { |
| 2569 | spin_lock_init(&sig->cputimer.lock); | 2569 | raw_spin_lock_init(&sig->cputimer.lock); |
| 2570 | } | 2570 | } |
| 2571 | 2571 | ||
| 2572 | /* | 2572 | /* |
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h index 39fa04966aa8..dc368b8ce215 100644 --- a/include/linux/semaphore.h +++ b/include/linux/semaphore.h | |||
| @@ -14,14 +14,14 @@ | |||
| 14 | 14 | ||
| 15 | /* Please don't access any members of this structure directly */ | 15 | /* Please don't access any members of this structure directly */ |
| 16 | struct semaphore { | 16 | struct semaphore { |
| 17 | spinlock_t lock; | 17 | raw_spinlock_t lock; |
| 18 | unsigned int count; | 18 | unsigned int count; |
| 19 | struct list_head wait_list; | 19 | struct list_head wait_list; |
| 20 | }; | 20 | }; |
| 21 | 21 | ||
| 22 | #define __SEMAPHORE_INITIALIZER(name, n) \ | 22 | #define __SEMAPHORE_INITIALIZER(name, n) \ |
| 23 | { \ | 23 | { \ |
| 24 | .lock = __SPIN_LOCK_UNLOCKED((name).lock), \ | 24 | .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \ |
| 25 | .count = n, \ | 25 | .count = n, \ |
| 26 | .wait_list = LIST_HEAD_INIT((name).wait_list), \ | 26 | .wait_list = LIST_HEAD_INIT((name).wait_list), \ |
| 27 | } | 27 | } |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 1d2b6ceea95d..453100a4159d 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -265,7 +265,7 @@ list_for_each_entry(_root, &roots, root_list) | |||
| 265 | /* the list of cgroups eligible for automatic release. Protected by | 265 | /* the list of cgroups eligible for automatic release. Protected by |
| 266 | * release_list_lock */ | 266 | * release_list_lock */ |
| 267 | static LIST_HEAD(release_list); | 267 | static LIST_HEAD(release_list); |
| 268 | static DEFINE_SPINLOCK(release_list_lock); | 268 | static DEFINE_RAW_SPINLOCK(release_list_lock); |
| 269 | static void cgroup_release_agent(struct work_struct *work); | 269 | static void cgroup_release_agent(struct work_struct *work); |
| 270 | static DECLARE_WORK(release_agent_work, cgroup_release_agent); | 270 | static DECLARE_WORK(release_agent_work, cgroup_release_agent); |
| 271 | static void check_for_release(struct cgroup *cgrp); | 271 | static void check_for_release(struct cgroup *cgrp); |
| @@ -4014,11 +4014,11 @@ again: | |||
| 4014 | finish_wait(&cgroup_rmdir_waitq, &wait); | 4014 | finish_wait(&cgroup_rmdir_waitq, &wait); |
| 4015 | clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); | 4015 | clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); |
| 4016 | 4016 | ||
| 4017 | spin_lock(&release_list_lock); | 4017 | raw_spin_lock(&release_list_lock); |
| 4018 | set_bit(CGRP_REMOVED, &cgrp->flags); | 4018 | set_bit(CGRP_REMOVED, &cgrp->flags); |
| 4019 | if (!list_empty(&cgrp->release_list)) | 4019 | if (!list_empty(&cgrp->release_list)) |
| 4020 | list_del_init(&cgrp->release_list); | 4020 | list_del_init(&cgrp->release_list); |
| 4021 | spin_unlock(&release_list_lock); | 4021 | raw_spin_unlock(&release_list_lock); |
| 4022 | 4022 | ||
| 4023 | cgroup_lock_hierarchy(cgrp->root); | 4023 | cgroup_lock_hierarchy(cgrp->root); |
| 4024 | /* delete this cgroup from parent->children */ | 4024 | /* delete this cgroup from parent->children */ |
| @@ -4671,13 +4671,13 @@ static void check_for_release(struct cgroup *cgrp) | |||
| 4671 | * already queued for a userspace notification, queue | 4671 | * already queued for a userspace notification, queue |
| 4672 | * it now */ | 4672 | * it now */ |
| 4673 | int need_schedule_work = 0; | 4673 | int need_schedule_work = 0; |
| 4674 | spin_lock(&release_list_lock); | 4674 | raw_spin_lock(&release_list_lock); |
| 4675 | if (!cgroup_is_removed(cgrp) && | 4675 | if (!cgroup_is_removed(cgrp) && |
| 4676 | list_empty(&cgrp->release_list)) { | 4676 | list_empty(&cgrp->release_list)) { |
| 4677 | list_add(&cgrp->release_list, &release_list); | 4677 | list_add(&cgrp->release_list, &release_list); |
| 4678 | need_schedule_work = 1; | 4678 | need_schedule_work = 1; |
| 4679 | } | 4679 | } |
| 4680 | spin_unlock(&release_list_lock); | 4680 | raw_spin_unlock(&release_list_lock); |
| 4681 | if (need_schedule_work) | 4681 | if (need_schedule_work) |
| 4682 | schedule_work(&release_agent_work); | 4682 | schedule_work(&release_agent_work); |
| 4683 | } | 4683 | } |
| @@ -4729,7 +4729,7 @@ static void cgroup_release_agent(struct work_struct *work) | |||
| 4729 | { | 4729 | { |
| 4730 | BUG_ON(work != &release_agent_work); | 4730 | BUG_ON(work != &release_agent_work); |
| 4731 | mutex_lock(&cgroup_mutex); | 4731 | mutex_lock(&cgroup_mutex); |
| 4732 | spin_lock(&release_list_lock); | 4732 | raw_spin_lock(&release_list_lock); |
| 4733 | while (!list_empty(&release_list)) { | 4733 | while (!list_empty(&release_list)) { |
| 4734 | char *argv[3], *envp[3]; | 4734 | char *argv[3], *envp[3]; |
| 4735 | int i; | 4735 | int i; |
| @@ -4738,7 +4738,7 @@ static void cgroup_release_agent(struct work_struct *work) | |||
| 4738 | struct cgroup, | 4738 | struct cgroup, |
| 4739 | release_list); | 4739 | release_list); |
| 4740 | list_del_init(&cgrp->release_list); | 4740 | list_del_init(&cgrp->release_list); |
| 4741 | spin_unlock(&release_list_lock); | 4741 | raw_spin_unlock(&release_list_lock); |
| 4742 | pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 4742 | pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
| 4743 | if (!pathbuf) | 4743 | if (!pathbuf) |
| 4744 | goto continue_free; | 4744 | goto continue_free; |
| @@ -4768,9 +4768,9 @@ static void cgroup_release_agent(struct work_struct *work) | |||
| 4768 | continue_free: | 4768 | continue_free: |
| 4769 | kfree(pathbuf); | 4769 | kfree(pathbuf); |
| 4770 | kfree(agentbuf); | 4770 | kfree(agentbuf); |
| 4771 | spin_lock(&release_list_lock); | 4771 | raw_spin_lock(&release_list_lock); |
| 4772 | } | 4772 | } |
| 4773 | spin_unlock(&release_list_lock); | 4773 | raw_spin_unlock(&release_list_lock); |
| 4774 | mutex_unlock(&cgroup_mutex); | 4774 | mutex_unlock(&cgroup_mutex); |
| 4775 | } | 4775 | } |
| 4776 | 4776 | ||
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index b30fd54eb985..2f193d0ba7f2 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -78,10 +78,10 @@ static bool kprobes_all_disarmed; | |||
| 78 | static DEFINE_MUTEX(kprobe_mutex); | 78 | static DEFINE_MUTEX(kprobe_mutex); |
| 79 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 79 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
| 80 | static struct { | 80 | static struct { |
| 81 | spinlock_t lock ____cacheline_aligned_in_smp; | 81 | raw_spinlock_t lock ____cacheline_aligned_in_smp; |
| 82 | } kretprobe_table_locks[KPROBE_TABLE_SIZE]; | 82 | } kretprobe_table_locks[KPROBE_TABLE_SIZE]; |
| 83 | 83 | ||
| 84 | static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) | 84 | static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) |
| 85 | { | 85 | { |
| 86 | return &(kretprobe_table_locks[hash].lock); | 86 | return &(kretprobe_table_locks[hash].lock); |
| 87 | } | 87 | } |
| @@ -1013,9 +1013,9 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, | |||
| 1013 | hlist_del(&ri->hlist); | 1013 | hlist_del(&ri->hlist); |
| 1014 | INIT_HLIST_NODE(&ri->hlist); | 1014 | INIT_HLIST_NODE(&ri->hlist); |
| 1015 | if (likely(rp)) { | 1015 | if (likely(rp)) { |
| 1016 | spin_lock(&rp->lock); | 1016 | raw_spin_lock(&rp->lock); |
| 1017 | hlist_add_head(&ri->hlist, &rp->free_instances); | 1017 | hlist_add_head(&ri->hlist, &rp->free_instances); |
| 1018 | spin_unlock(&rp->lock); | 1018 | raw_spin_unlock(&rp->lock); |
| 1019 | } else | 1019 | } else |
| 1020 | /* Unregistering */ | 1020 | /* Unregistering */ |
| 1021 | hlist_add_head(&ri->hlist, head); | 1021 | hlist_add_head(&ri->hlist, head); |
| @@ -1026,19 +1026,19 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk, | |||
| 1026 | __acquires(hlist_lock) | 1026 | __acquires(hlist_lock) |
| 1027 | { | 1027 | { |
| 1028 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | 1028 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
| 1029 | spinlock_t *hlist_lock; | 1029 | raw_spinlock_t *hlist_lock; |
| 1030 | 1030 | ||
| 1031 | *head = &kretprobe_inst_table[hash]; | 1031 | *head = &kretprobe_inst_table[hash]; |
| 1032 | hlist_lock = kretprobe_table_lock_ptr(hash); | 1032 | hlist_lock = kretprobe_table_lock_ptr(hash); |
| 1033 | spin_lock_irqsave(hlist_lock, *flags); | 1033 | raw_spin_lock_irqsave(hlist_lock, *flags); |
| 1034 | } | 1034 | } |
| 1035 | 1035 | ||
| 1036 | static void __kprobes kretprobe_table_lock(unsigned long hash, | 1036 | static void __kprobes kretprobe_table_lock(unsigned long hash, |
| 1037 | unsigned long *flags) | 1037 | unsigned long *flags) |
| 1038 | __acquires(hlist_lock) | 1038 | __acquires(hlist_lock) |
| 1039 | { | 1039 | { |
| 1040 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | 1040 | raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
| 1041 | spin_lock_irqsave(hlist_lock, *flags); | 1041 | raw_spin_lock_irqsave(hlist_lock, *flags); |
| 1042 | } | 1042 | } |
| 1043 | 1043 | ||
| 1044 | void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, | 1044 | void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, |
| @@ -1046,18 +1046,18 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, | |||
| 1046 | __releases(hlist_lock) | 1046 | __releases(hlist_lock) |
| 1047 | { | 1047 | { |
| 1048 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | 1048 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
| 1049 | spinlock_t *hlist_lock; | 1049 | raw_spinlock_t *hlist_lock; |
| 1050 | 1050 | ||
| 1051 | hlist_lock = kretprobe_table_lock_ptr(hash); | 1051 | hlist_lock = kretprobe_table_lock_ptr(hash); |
| 1052 | spin_unlock_irqrestore(hlist_lock, *flags); | 1052 | raw_spin_unlock_irqrestore(hlist_lock, *flags); |
| 1053 | } | 1053 | } |
| 1054 | 1054 | ||
| 1055 | static void __kprobes kretprobe_table_unlock(unsigned long hash, | 1055 | static void __kprobes kretprobe_table_unlock(unsigned long hash, |
| 1056 | unsigned long *flags) | 1056 | unsigned long *flags) |
| 1057 | __releases(hlist_lock) | 1057 | __releases(hlist_lock) |
| 1058 | { | 1058 | { |
| 1059 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | 1059 | raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
| 1060 | spin_unlock_irqrestore(hlist_lock, *flags); | 1060 | raw_spin_unlock_irqrestore(hlist_lock, *flags); |
| 1061 | } | 1061 | } |
| 1062 | 1062 | ||
| 1063 | /* | 1063 | /* |
| @@ -1663,12 +1663,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
| 1663 | 1663 | ||
| 1664 | /*TODO: consider to only swap the RA after the last pre_handler fired */ | 1664 | /*TODO: consider to only swap the RA after the last pre_handler fired */ |
| 1665 | hash = hash_ptr(current, KPROBE_HASH_BITS); | 1665 | hash = hash_ptr(current, KPROBE_HASH_BITS); |
| 1666 | spin_lock_irqsave(&rp->lock, flags); | 1666 | raw_spin_lock_irqsave(&rp->lock, flags); |
| 1667 | if (!hlist_empty(&rp->free_instances)) { | 1667 | if (!hlist_empty(&rp->free_instances)) { |
| 1668 | ri = hlist_entry(rp->free_instances.first, | 1668 | ri = hlist_entry(rp->free_instances.first, |
| 1669 | struct kretprobe_instance, hlist); | 1669 | struct kretprobe_instance, hlist); |
| 1670 | hlist_del(&ri->hlist); | 1670 | hlist_del(&ri->hlist); |
| 1671 | spin_unlock_irqrestore(&rp->lock, flags); | 1671 | raw_spin_unlock_irqrestore(&rp->lock, flags); |
| 1672 | 1672 | ||
| 1673 | ri->rp = rp; | 1673 | ri->rp = rp; |
| 1674 | ri->task = current; | 1674 | ri->task = current; |
| @@ -1685,7 +1685,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
| 1685 | kretprobe_table_unlock(hash, &flags); | 1685 | kretprobe_table_unlock(hash, &flags); |
| 1686 | } else { | 1686 | } else { |
| 1687 | rp->nmissed++; | 1687 | rp->nmissed++; |
| 1688 | spin_unlock_irqrestore(&rp->lock, flags); | 1688 | raw_spin_unlock_irqrestore(&rp->lock, flags); |
| 1689 | } | 1689 | } |
| 1690 | return 0; | 1690 | return 0; |
| 1691 | } | 1691 | } |
| @@ -1721,7 +1721,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp) | |||
| 1721 | rp->maxactive = num_possible_cpus(); | 1721 | rp->maxactive = num_possible_cpus(); |
| 1722 | #endif | 1722 | #endif |
| 1723 | } | 1723 | } |
| 1724 | spin_lock_init(&rp->lock); | 1724 | raw_spin_lock_init(&rp->lock); |
| 1725 | INIT_HLIST_HEAD(&rp->free_instances); | 1725 | INIT_HLIST_HEAD(&rp->free_instances); |
| 1726 | for (i = 0; i < rp->maxactive; i++) { | 1726 | for (i = 0; i < rp->maxactive; i++) { |
| 1727 | inst = kmalloc(sizeof(struct kretprobe_instance) + | 1727 | inst = kmalloc(sizeof(struct kretprobe_instance) + |
| @@ -1959,7 +1959,7 @@ static int __init init_kprobes(void) | |||
| 1959 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1959 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
| 1960 | INIT_HLIST_HEAD(&kprobe_table[i]); | 1960 | INIT_HLIST_HEAD(&kprobe_table[i]); |
| 1961 | INIT_HLIST_HEAD(&kretprobe_inst_table[i]); | 1961 | INIT_HLIST_HEAD(&kretprobe_inst_table[i]); |
| 1962 | spin_lock_init(&(kretprobe_table_locks[i].lock)); | 1962 | raw_spin_lock_init(&(kretprobe_table_locks[i].lock)); |
| 1963 | } | 1963 | } |
| 1964 | 1964 | ||
| 1965 | /* | 1965 | /* |
diff --git a/kernel/latencytop.c b/kernel/latencytop.c index 376066e10413..4ac8ebfcab59 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c | |||
| @@ -58,7 +58,7 @@ | |||
| 58 | #include <linux/list.h> | 58 | #include <linux/list.h> |
| 59 | #include <linux/stacktrace.h> | 59 | #include <linux/stacktrace.h> |
| 60 | 60 | ||
| 61 | static DEFINE_SPINLOCK(latency_lock); | 61 | static DEFINE_RAW_SPINLOCK(latency_lock); |
| 62 | 62 | ||
| 63 | #define MAXLR 128 | 63 | #define MAXLR 128 |
| 64 | static struct latency_record latency_record[MAXLR]; | 64 | static struct latency_record latency_record[MAXLR]; |
| @@ -72,19 +72,19 @@ void clear_all_latency_tracing(struct task_struct *p) | |||
| 72 | if (!latencytop_enabled) | 72 | if (!latencytop_enabled) |
| 73 | return; | 73 | return; |
| 74 | 74 | ||
| 75 | spin_lock_irqsave(&latency_lock, flags); | 75 | raw_spin_lock_irqsave(&latency_lock, flags); |
| 76 | memset(&p->latency_record, 0, sizeof(p->latency_record)); | 76 | memset(&p->latency_record, 0, sizeof(p->latency_record)); |
| 77 | p->latency_record_count = 0; | 77 | p->latency_record_count = 0; |
| 78 | spin_unlock_irqrestore(&latency_lock, flags); | 78 | raw_spin_unlock_irqrestore(&latency_lock, flags); |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | static void clear_global_latency_tracing(void) | 81 | static void clear_global_latency_tracing(void) |
| 82 | { | 82 | { |
| 83 | unsigned long flags; | 83 | unsigned long flags; |
| 84 | 84 | ||
| 85 | spin_lock_irqsave(&latency_lock, flags); | 85 | raw_spin_lock_irqsave(&latency_lock, flags); |
| 86 | memset(&latency_record, 0, sizeof(latency_record)); | 86 | memset(&latency_record, 0, sizeof(latency_record)); |
| 87 | spin_unlock_irqrestore(&latency_lock, flags); | 87 | raw_spin_unlock_irqrestore(&latency_lock, flags); |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | static void __sched | 90 | static void __sched |
| @@ -190,7 +190,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) | |||
| 190 | lat.max = usecs; | 190 | lat.max = usecs; |
| 191 | store_stacktrace(tsk, &lat); | 191 | store_stacktrace(tsk, &lat); |
| 192 | 192 | ||
| 193 | spin_lock_irqsave(&latency_lock, flags); | 193 | raw_spin_lock_irqsave(&latency_lock, flags); |
| 194 | 194 | ||
| 195 | account_global_scheduler_latency(tsk, &lat); | 195 | account_global_scheduler_latency(tsk, &lat); |
| 196 | 196 | ||
| @@ -231,7 +231,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) | |||
| 231 | memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); | 231 | memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); |
| 232 | 232 | ||
| 233 | out_unlock: | 233 | out_unlock: |
| 234 | spin_unlock_irqrestore(&latency_lock, flags); | 234 | raw_spin_unlock_irqrestore(&latency_lock, flags); |
| 235 | } | 235 | } |
| 236 | 236 | ||
| 237 | static int lstats_show(struct seq_file *m, void *v) | 237 | static int lstats_show(struct seq_file *m, void *v) |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 91d67ce3a8d5..c081fa967c8f 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -96,8 +96,13 @@ static int graph_lock(void) | |||
| 96 | 96 | ||
| 97 | static inline int graph_unlock(void) | 97 | static inline int graph_unlock(void) |
| 98 | { | 98 | { |
| 99 | if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) | 99 | if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) { |
| 100 | /* | ||
| 101 | * The lockdep graph lock isn't locked while we expect it to | ||
| 102 | * be, we're confused now, bye! | ||
| 103 | */ | ||
| 100 | return DEBUG_LOCKS_WARN_ON(1); | 104 | return DEBUG_LOCKS_WARN_ON(1); |
| 105 | } | ||
| 101 | 106 | ||
| 102 | current->lockdep_recursion--; | 107 | current->lockdep_recursion--; |
| 103 | arch_spin_unlock(&lockdep_lock); | 108 | arch_spin_unlock(&lockdep_lock); |
| @@ -134,6 +139,9 @@ static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; | |||
| 134 | static inline struct lock_class *hlock_class(struct held_lock *hlock) | 139 | static inline struct lock_class *hlock_class(struct held_lock *hlock) |
| 135 | { | 140 | { |
| 136 | if (!hlock->class_idx) { | 141 | if (!hlock->class_idx) { |
| 142 | /* | ||
| 143 | * Someone passed in garbage, we give up. | ||
| 144 | */ | ||
| 137 | DEBUG_LOCKS_WARN_ON(1); | 145 | DEBUG_LOCKS_WARN_ON(1); |
| 138 | return NULL; | 146 | return NULL; |
| 139 | } | 147 | } |
| @@ -687,6 +695,10 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
| 687 | */ | 695 | */ |
| 688 | list_for_each_entry(class, hash_head, hash_entry) { | 696 | list_for_each_entry(class, hash_head, hash_entry) { |
| 689 | if (class->key == key) { | 697 | if (class->key == key) { |
| 698 | /* | ||
| 699 | * Huh! same key, different name? Did someone trample | ||
| 700 | * on some memory? We're most confused. | ||
| 701 | */ | ||
| 690 | WARN_ON_ONCE(class->name != lock->name); | 702 | WARN_ON_ONCE(class->name != lock->name); |
| 691 | return class; | 703 | return class; |
| 692 | } | 704 | } |
| @@ -800,6 +812,10 @@ out_unlock_set: | |||
| 800 | else if (subclass < NR_LOCKDEP_CACHING_CLASSES) | 812 | else if (subclass < NR_LOCKDEP_CACHING_CLASSES) |
| 801 | lock->class_cache[subclass] = class; | 813 | lock->class_cache[subclass] = class; |
| 802 | 814 | ||
| 815 | /* | ||
| 816 | * Hash collision, did we smoke some? We found a class with a matching | ||
| 817 | * hash but the subclass -- which is hashed in -- didn't match. | ||
| 818 | */ | ||
| 803 | if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) | 819 | if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) |
| 804 | return NULL; | 820 | return NULL; |
| 805 | 821 | ||
| @@ -926,7 +942,7 @@ static inline void mark_lock_accessed(struct lock_list *lock, | |||
| 926 | unsigned long nr; | 942 | unsigned long nr; |
| 927 | 943 | ||
| 928 | nr = lock - list_entries; | 944 | nr = lock - list_entries; |
| 929 | WARN_ON(nr >= nr_list_entries); | 945 | WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ |
| 930 | lock->parent = parent; | 946 | lock->parent = parent; |
| 931 | lock->class->dep_gen_id = lockdep_dependency_gen_id; | 947 | lock->class->dep_gen_id = lockdep_dependency_gen_id; |
| 932 | } | 948 | } |
| @@ -936,7 +952,7 @@ static inline unsigned long lock_accessed(struct lock_list *lock) | |||
| 936 | unsigned long nr; | 952 | unsigned long nr; |
| 937 | 953 | ||
| 938 | nr = lock - list_entries; | 954 | nr = lock - list_entries; |
| 939 | WARN_ON(nr >= nr_list_entries); | 955 | WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ |
| 940 | return lock->class->dep_gen_id == lockdep_dependency_gen_id; | 956 | return lock->class->dep_gen_id == lockdep_dependency_gen_id; |
| 941 | } | 957 | } |
| 942 | 958 | ||
| @@ -1196,6 +1212,9 @@ static noinline int print_bfs_bug(int ret) | |||
| 1196 | if (!debug_locks_off_graph_unlock()) | 1212 | if (!debug_locks_off_graph_unlock()) |
| 1197 | return 0; | 1213 | return 0; |
| 1198 | 1214 | ||
| 1215 | /* | ||
| 1216 | * Breadth-first-search failed, graph got corrupted? | ||
| 1217 | */ | ||
| 1199 | WARN(1, "lockdep bfs error:%d\n", ret); | 1218 | WARN(1, "lockdep bfs error:%d\n", ret); |
| 1200 | 1219 | ||
| 1201 | return 0; | 1220 | return 0; |
| @@ -1944,6 +1963,11 @@ out_bug: | |||
| 1944 | if (!debug_locks_off_graph_unlock()) | 1963 | if (!debug_locks_off_graph_unlock()) |
| 1945 | return 0; | 1964 | return 0; |
| 1946 | 1965 | ||
| 1966 | /* | ||
| 1967 | * Clearly we all shouldn't be here, but since we made it we | ||
| 1968 | * can reliable say we messed up our state. See the above two | ||
| 1969 | * gotos for reasons why we could possibly end up here. | ||
| 1970 | */ | ||
| 1947 | WARN_ON(1); | 1971 | WARN_ON(1); |
| 1948 | 1972 | ||
| 1949 | return 0; | 1973 | return 0; |
| @@ -1975,6 +1999,11 @@ static inline int lookup_chain_cache(struct task_struct *curr, | |||
| 1975 | struct held_lock *hlock_curr, *hlock_next; | 1999 | struct held_lock *hlock_curr, *hlock_next; |
| 1976 | int i, j; | 2000 | int i, j; |
| 1977 | 2001 | ||
| 2002 | /* | ||
| 2003 | * We might need to take the graph lock, ensure we've got IRQs | ||
| 2004 | * disabled to make this an IRQ-safe lock.. for recursion reasons | ||
| 2005 | * lockdep won't complain about its own locking errors. | ||
| 2006 | */ | ||
| 1978 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 2007 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 1979 | return 0; | 2008 | return 0; |
| 1980 | /* | 2009 | /* |
| @@ -2126,6 +2155,10 @@ static void check_chain_key(struct task_struct *curr) | |||
| 2126 | hlock = curr->held_locks + i; | 2155 | hlock = curr->held_locks + i; |
| 2127 | if (chain_key != hlock->prev_chain_key) { | 2156 | if (chain_key != hlock->prev_chain_key) { |
| 2128 | debug_locks_off(); | 2157 | debug_locks_off(); |
| 2158 | /* | ||
| 2159 | * We got mighty confused, our chain keys don't match | ||
| 2160 | * with what we expect, someone trample on our task state? | ||
| 2161 | */ | ||
| 2129 | WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", | 2162 | WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", |
| 2130 | curr->lockdep_depth, i, | 2163 | curr->lockdep_depth, i, |
| 2131 | (unsigned long long)chain_key, | 2164 | (unsigned long long)chain_key, |
| @@ -2133,6 +2166,9 @@ static void check_chain_key(struct task_struct *curr) | |||
| 2133 | return; | 2166 | return; |
| 2134 | } | 2167 | } |
| 2135 | id = hlock->class_idx - 1; | 2168 | id = hlock->class_idx - 1; |
| 2169 | /* | ||
| 2170 | * Whoops ran out of static storage again? | ||
| 2171 | */ | ||
| 2136 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) | 2172 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) |
| 2137 | return; | 2173 | return; |
| 2138 | 2174 | ||
| @@ -2144,6 +2180,10 @@ static void check_chain_key(struct task_struct *curr) | |||
| 2144 | } | 2180 | } |
| 2145 | if (chain_key != curr->curr_chain_key) { | 2181 | if (chain_key != curr->curr_chain_key) { |
| 2146 | debug_locks_off(); | 2182 | debug_locks_off(); |
| 2183 | /* | ||
| 2184 | * More smoking hash instead of calculating it, damn see these | ||
| 2185 | * numbers float.. I bet that a pink elephant stepped on my memory. | ||
| 2186 | */ | ||
| 2147 | WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", | 2187 | WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", |
| 2148 | curr->lockdep_depth, i, | 2188 | curr->lockdep_depth, i, |
| 2149 | (unsigned long long)chain_key, | 2189 | (unsigned long long)chain_key, |
| @@ -2525,12 +2565,24 @@ void trace_hardirqs_on_caller(unsigned long ip) | |||
| 2525 | return; | 2565 | return; |
| 2526 | } | 2566 | } |
| 2527 | 2567 | ||
| 2568 | /* | ||
| 2569 | * We're enabling irqs and according to our state above irqs weren't | ||
| 2570 | * already enabled, yet we find the hardware thinks they are in fact | ||
| 2571 | * enabled.. someone messed up their IRQ state tracing. | ||
| 2572 | */ | ||
| 2528 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 2573 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 2529 | return; | 2574 | return; |
| 2530 | 2575 | ||
| 2576 | /* | ||
| 2577 | * See the fine text that goes along with this variable definition. | ||
| 2578 | */ | ||
| 2531 | if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) | 2579 | if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) |
| 2532 | return; | 2580 | return; |
| 2533 | 2581 | ||
| 2582 | /* | ||
| 2583 | * Can't allow enabling interrupts while in an interrupt handler, | ||
| 2584 | * that's general bad form and such. Recursion, limited stack etc.. | ||
| 2585 | */ | ||
| 2534 | if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) | 2586 | if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) |
| 2535 | return; | 2587 | return; |
| 2536 | 2588 | ||
| @@ -2558,6 +2610,10 @@ void trace_hardirqs_off_caller(unsigned long ip) | |||
| 2558 | if (unlikely(!debug_locks || current->lockdep_recursion)) | 2610 | if (unlikely(!debug_locks || current->lockdep_recursion)) |
| 2559 | return; | 2611 | return; |
| 2560 | 2612 | ||
| 2613 | /* | ||
| 2614 | * So we're supposed to get called after you mask local IRQs, but for | ||
| 2615 | * some reason the hardware doesn't quite think you did a proper job. | ||
| 2616 | */ | ||
| 2561 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 2617 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 2562 | return; | 2618 | return; |
| 2563 | 2619 | ||
| @@ -2590,6 +2646,10 @@ void trace_softirqs_on(unsigned long ip) | |||
| 2590 | if (unlikely(!debug_locks || current->lockdep_recursion)) | 2646 | if (unlikely(!debug_locks || current->lockdep_recursion)) |
| 2591 | return; | 2647 | return; |
| 2592 | 2648 | ||
| 2649 | /* | ||
| 2650 | * We fancy IRQs being disabled here, see softirq.c, avoids | ||
| 2651 | * funny state and nesting things. | ||
| 2652 | */ | ||
| 2593 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 2653 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 2594 | return; | 2654 | return; |
| 2595 | 2655 | ||
| @@ -2626,6 +2686,9 @@ void trace_softirqs_off(unsigned long ip) | |||
| 2626 | if (unlikely(!debug_locks || current->lockdep_recursion)) | 2686 | if (unlikely(!debug_locks || current->lockdep_recursion)) |
| 2627 | return; | 2687 | return; |
| 2628 | 2688 | ||
| 2689 | /* | ||
| 2690 | * We fancy IRQs being disabled here, see softirq.c | ||
| 2691 | */ | ||
| 2629 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 2692 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 2630 | return; | 2693 | return; |
| 2631 | 2694 | ||
| @@ -2637,6 +2700,9 @@ void trace_softirqs_off(unsigned long ip) | |||
| 2637 | curr->softirq_disable_ip = ip; | 2700 | curr->softirq_disable_ip = ip; |
| 2638 | curr->softirq_disable_event = ++curr->irq_events; | 2701 | curr->softirq_disable_event = ++curr->irq_events; |
| 2639 | debug_atomic_inc(softirqs_off_events); | 2702 | debug_atomic_inc(softirqs_off_events); |
| 2703 | /* | ||
| 2704 | * Whoops, we wanted softirqs off, so why aren't they? | ||
| 2705 | */ | ||
| 2640 | DEBUG_LOCKS_WARN_ON(!softirq_count()); | 2706 | DEBUG_LOCKS_WARN_ON(!softirq_count()); |
| 2641 | } else | 2707 | } else |
| 2642 | debug_atomic_inc(redundant_softirqs_off); | 2708 | debug_atomic_inc(redundant_softirqs_off); |
| @@ -2661,6 +2727,9 @@ static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags) | |||
| 2661 | if (!(gfp_mask & __GFP_FS)) | 2727 | if (!(gfp_mask & __GFP_FS)) |
| 2662 | return; | 2728 | return; |
| 2663 | 2729 | ||
| 2730 | /* | ||
| 2731 | * Oi! Can't be having __GFP_FS allocations with IRQs disabled. | ||
| 2732 | */ | ||
| 2664 | if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags))) | 2733 | if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags))) |
| 2665 | return; | 2734 | return; |
| 2666 | 2735 | ||
| @@ -2773,13 +2842,13 @@ static int separate_irq_context(struct task_struct *curr, | |||
| 2773 | return 0; | 2842 | return 0; |
| 2774 | } | 2843 | } |
| 2775 | 2844 | ||
| 2776 | #else | 2845 | #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ |
| 2777 | 2846 | ||
| 2778 | static inline | 2847 | static inline |
| 2779 | int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | 2848 | int mark_lock_irq(struct task_struct *curr, struct held_lock *this, |
| 2780 | enum lock_usage_bit new_bit) | 2849 | enum lock_usage_bit new_bit) |
| 2781 | { | 2850 | { |
| 2782 | WARN_ON(1); | 2851 | WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */ |
| 2783 | return 1; | 2852 | return 1; |
| 2784 | } | 2853 | } |
| 2785 | 2854 | ||
| @@ -2799,7 +2868,7 @@ void lockdep_trace_alloc(gfp_t gfp_mask) | |||
| 2799 | { | 2868 | { |
| 2800 | } | 2869 | } |
| 2801 | 2870 | ||
| 2802 | #endif | 2871 | #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ |
| 2803 | 2872 | ||
| 2804 | /* | 2873 | /* |
| 2805 | * Mark a lock with a usage bit, and validate the state transition: | 2874 | * Mark a lock with a usage bit, and validate the state transition: |
| @@ -2880,6 +2949,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
| 2880 | lock->cpu = raw_smp_processor_id(); | 2949 | lock->cpu = raw_smp_processor_id(); |
| 2881 | #endif | 2950 | #endif |
| 2882 | 2951 | ||
| 2952 | /* | ||
| 2953 | * Can't be having no nameless bastards around this place! | ||
| 2954 | */ | ||
| 2883 | if (DEBUG_LOCKS_WARN_ON(!name)) { | 2955 | if (DEBUG_LOCKS_WARN_ON(!name)) { |
| 2884 | lock->name = "NULL"; | 2956 | lock->name = "NULL"; |
| 2885 | return; | 2957 | return; |
| @@ -2887,6 +2959,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
| 2887 | 2959 | ||
| 2888 | lock->name = name; | 2960 | lock->name = name; |
| 2889 | 2961 | ||
| 2962 | /* | ||
| 2963 | * No key, no joy, we need to hash something. | ||
| 2964 | */ | ||
| 2890 | if (DEBUG_LOCKS_WARN_ON(!key)) | 2965 | if (DEBUG_LOCKS_WARN_ON(!key)) |
| 2891 | return; | 2966 | return; |
| 2892 | /* | 2967 | /* |
| @@ -2894,6 +2969,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
| 2894 | */ | 2969 | */ |
| 2895 | if (!static_obj(key)) { | 2970 | if (!static_obj(key)) { |
| 2896 | printk("BUG: key %p not in .data!\n", key); | 2971 | printk("BUG: key %p not in .data!\n", key); |
| 2972 | /* | ||
| 2973 | * What it says above ^^^^^, I suggest you read it. | ||
| 2974 | */ | ||
| 2897 | DEBUG_LOCKS_WARN_ON(1); | 2975 | DEBUG_LOCKS_WARN_ON(1); |
| 2898 | return; | 2976 | return; |
| 2899 | } | 2977 | } |
| @@ -2932,6 +3010,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 2932 | if (unlikely(!debug_locks)) | 3010 | if (unlikely(!debug_locks)) |
| 2933 | return 0; | 3011 | return 0; |
| 2934 | 3012 | ||
| 3013 | /* | ||
| 3014 | * Lockdep should run with IRQs disabled, otherwise we could | ||
| 3015 | * get an interrupt which would want to take locks, which would | ||
| 3016 | * end up in lockdep and have you got a head-ache already? | ||
| 3017 | */ | ||
| 2935 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 3018 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 2936 | return 0; | 3019 | return 0; |
| 2937 | 3020 | ||
| @@ -2963,6 +3046,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 2963 | * dependency checks are done) | 3046 | * dependency checks are done) |
| 2964 | */ | 3047 | */ |
| 2965 | depth = curr->lockdep_depth; | 3048 | depth = curr->lockdep_depth; |
| 3049 | /* | ||
| 3050 | * Ran out of static storage for our per-task lock stack again have we? | ||
| 3051 | */ | ||
| 2966 | if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) | 3052 | if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) |
| 2967 | return 0; | 3053 | return 0; |
| 2968 | 3054 | ||
| @@ -2981,6 +3067,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 2981 | } | 3067 | } |
| 2982 | 3068 | ||
| 2983 | hlock = curr->held_locks + depth; | 3069 | hlock = curr->held_locks + depth; |
| 3070 | /* | ||
| 3071 | * Plain impossible, we just registered it and checked it weren't no | ||
| 3072 | * NULL like.. I bet this mushroom I ate was good! | ||
| 3073 | */ | ||
| 2984 | if (DEBUG_LOCKS_WARN_ON(!class)) | 3074 | if (DEBUG_LOCKS_WARN_ON(!class)) |
| 2985 | return 0; | 3075 | return 0; |
| 2986 | hlock->class_idx = class_idx; | 3076 | hlock->class_idx = class_idx; |
| @@ -3015,11 +3105,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 3015 | * the hash, not class->key. | 3105 | * the hash, not class->key. |
| 3016 | */ | 3106 | */ |
| 3017 | id = class - lock_classes; | 3107 | id = class - lock_classes; |
| 3108 | /* | ||
| 3109 | * Whoops, we did it again.. ran straight out of our static allocation. | ||
| 3110 | */ | ||
| 3018 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) | 3111 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) |
| 3019 | return 0; | 3112 | return 0; |
| 3020 | 3113 | ||
| 3021 | chain_key = curr->curr_chain_key; | 3114 | chain_key = curr->curr_chain_key; |
| 3022 | if (!depth) { | 3115 | if (!depth) { |
| 3116 | /* | ||
| 3117 | * How can we have a chain hash when we ain't got no keys?! | ||
| 3118 | */ | ||
| 3023 | if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) | 3119 | if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) |
| 3024 | return 0; | 3120 | return 0; |
| 3025 | chain_head = 1; | 3121 | chain_head = 1; |
| @@ -3091,6 +3187,9 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, | |||
| 3091 | { | 3187 | { |
| 3092 | if (unlikely(!debug_locks)) | 3188 | if (unlikely(!debug_locks)) |
| 3093 | return 0; | 3189 | return 0; |
| 3190 | /* | ||
| 3191 | * Lockdep should run with IRQs disabled, recursion, head-ache, etc.. | ||
| 3192 | */ | ||
| 3094 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 3193 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 3095 | return 0; | 3194 | return 0; |
| 3096 | 3195 | ||
| @@ -3120,6 +3219,11 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) | |||
| 3120 | if (!class) | 3219 | if (!class) |
| 3121 | return 0; | 3220 | return 0; |
| 3122 | 3221 | ||
| 3222 | /* | ||
| 3223 | * References, but not a lock we're actually ref-counting? | ||
| 3224 | * State got messed up, follow the sites that change ->references | ||
| 3225 | * and try to make sense of it. | ||
| 3226 | */ | ||
| 3123 | if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) | 3227 | if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) |
| 3124 | return 0; | 3228 | return 0; |
| 3125 | 3229 | ||
| @@ -3142,6 +3246,10 @@ __lock_set_class(struct lockdep_map *lock, const char *name, | |||
| 3142 | int i; | 3246 | int i; |
| 3143 | 3247 | ||
| 3144 | depth = curr->lockdep_depth; | 3248 | depth = curr->lockdep_depth; |
| 3249 | /* | ||
| 3250 | * This function is about (re)setting the class of a held lock, | ||
| 3251 | * yet we're not actually holding any locks. Naughty user! | ||
| 3252 | */ | ||
| 3145 | if (DEBUG_LOCKS_WARN_ON(!depth)) | 3253 | if (DEBUG_LOCKS_WARN_ON(!depth)) |
| 3146 | return 0; | 3254 | return 0; |
| 3147 | 3255 | ||
| @@ -3177,6 +3285,10 @@ found_it: | |||
| 3177 | return 0; | 3285 | return 0; |
| 3178 | } | 3286 | } |
| 3179 | 3287 | ||
| 3288 | /* | ||
| 3289 | * I took it apart and put it back together again, except now I have | ||
| 3290 | * these 'spare' parts.. where shall I put them. | ||
| 3291 | */ | ||
| 3180 | if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) | 3292 | if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) |
| 3181 | return 0; | 3293 | return 0; |
| 3182 | return 1; | 3294 | return 1; |
| @@ -3201,6 +3313,10 @@ lock_release_non_nested(struct task_struct *curr, | |||
| 3201 | * of held locks: | 3313 | * of held locks: |
| 3202 | */ | 3314 | */ |
| 3203 | depth = curr->lockdep_depth; | 3315 | depth = curr->lockdep_depth; |
| 3316 | /* | ||
| 3317 | * So we're all set to release this lock.. wait what lock? We don't | ||
| 3318 | * own any locks, you've been drinking again? | ||
| 3319 | */ | ||
| 3204 | if (DEBUG_LOCKS_WARN_ON(!depth)) | 3320 | if (DEBUG_LOCKS_WARN_ON(!depth)) |
| 3205 | return 0; | 3321 | return 0; |
| 3206 | 3322 | ||
| @@ -3253,6 +3369,10 @@ found_it: | |||
| 3253 | return 0; | 3369 | return 0; |
| 3254 | } | 3370 | } |
| 3255 | 3371 | ||
| 3372 | /* | ||
| 3373 | * We had N bottles of beer on the wall, we drank one, but now | ||
| 3374 | * there's not N-1 bottles of beer left on the wall... | ||
| 3375 | */ | ||
| 3256 | if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) | 3376 | if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) |
| 3257 | return 0; | 3377 | return 0; |
| 3258 | return 1; | 3378 | return 1; |
| @@ -3283,6 +3403,9 @@ static int lock_release_nested(struct task_struct *curr, | |||
| 3283 | return lock_release_non_nested(curr, lock, ip); | 3403 | return lock_release_non_nested(curr, lock, ip); |
| 3284 | curr->lockdep_depth--; | 3404 | curr->lockdep_depth--; |
| 3285 | 3405 | ||
| 3406 | /* | ||
| 3407 | * No more locks, but somehow we've got hash left over, who left it? | ||
| 3408 | */ | ||
| 3286 | if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0))) | 3409 | if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0))) |
| 3287 | return 0; | 3410 | return 0; |
| 3288 | 3411 | ||
| @@ -3365,10 +3488,13 @@ static void check_flags(unsigned long flags) | |||
| 3365 | * check if not in hardirq contexts: | 3488 | * check if not in hardirq contexts: |
| 3366 | */ | 3489 | */ |
| 3367 | if (!hardirq_count()) { | 3490 | if (!hardirq_count()) { |
| 3368 | if (softirq_count()) | 3491 | if (softirq_count()) { |
| 3492 | /* like the above, but with softirqs */ | ||
| 3369 | DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); | 3493 | DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); |
| 3370 | else | 3494 | } else { |
| 3495 | /* lick the above, does it taste good? */ | ||
| 3371 | DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); | 3496 | DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); |
| 3497 | } | ||
| 3372 | } | 3498 | } |
| 3373 | 3499 | ||
| 3374 | if (!debug_locks) | 3500 | if (!debug_locks) |
| @@ -3506,6 +3632,10 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
| 3506 | int i, contention_point, contending_point; | 3632 | int i, contention_point, contending_point; |
| 3507 | 3633 | ||
| 3508 | depth = curr->lockdep_depth; | 3634 | depth = curr->lockdep_depth; |
| 3635 | /* | ||
| 3636 | * Whee, we contended on this lock, except it seems we're not | ||
| 3637 | * actually trying to acquire anything much at all.. | ||
| 3638 | */ | ||
| 3509 | if (DEBUG_LOCKS_WARN_ON(!depth)) | 3639 | if (DEBUG_LOCKS_WARN_ON(!depth)) |
| 3510 | return; | 3640 | return; |
| 3511 | 3641 | ||
| @@ -3555,6 +3685,10 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) | |||
| 3555 | int i, cpu; | 3685 | int i, cpu; |
| 3556 | 3686 | ||
| 3557 | depth = curr->lockdep_depth; | 3687 | depth = curr->lockdep_depth; |
| 3688 | /* | ||
| 3689 | * Yay, we acquired ownership of this lock we didn't try to | ||
| 3690 | * acquire, how the heck did that happen? | ||
| 3691 | */ | ||
| 3558 | if (DEBUG_LOCKS_WARN_ON(!depth)) | 3692 | if (DEBUG_LOCKS_WARN_ON(!depth)) |
| 3559 | return; | 3693 | return; |
| 3560 | 3694 | ||
| @@ -3759,8 +3893,12 @@ void lockdep_reset_lock(struct lockdep_map *lock) | |||
| 3759 | match |= class == lock->class_cache[j]; | 3893 | match |= class == lock->class_cache[j]; |
| 3760 | 3894 | ||
| 3761 | if (unlikely(match)) { | 3895 | if (unlikely(match)) { |
| 3762 | if (debug_locks_off_graph_unlock()) | 3896 | if (debug_locks_off_graph_unlock()) { |
| 3897 | /* | ||
| 3898 | * We all just reset everything, how did it match? | ||
| 3899 | */ | ||
| 3763 | WARN_ON(1); | 3900 | WARN_ON(1); |
| 3901 | } | ||
| 3764 | goto out_restore; | 3902 | goto out_restore; |
| 3765 | } | 3903 | } |
| 3766 | } | 3904 | } |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 640ded8f5c48..e7cb76dc18f5 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
| @@ -282,13 +282,13 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) | |||
| 282 | * it. | 282 | * it. |
| 283 | */ | 283 | */ |
| 284 | thread_group_cputime(tsk, &sum); | 284 | thread_group_cputime(tsk, &sum); |
| 285 | spin_lock_irqsave(&cputimer->lock, flags); | 285 | raw_spin_lock_irqsave(&cputimer->lock, flags); |
| 286 | cputimer->running = 1; | 286 | cputimer->running = 1; |
| 287 | update_gt_cputime(&cputimer->cputime, &sum); | 287 | update_gt_cputime(&cputimer->cputime, &sum); |
| 288 | } else | 288 | } else |
| 289 | spin_lock_irqsave(&cputimer->lock, flags); | 289 | raw_spin_lock_irqsave(&cputimer->lock, flags); |
| 290 | *times = cputimer->cputime; | 290 | *times = cputimer->cputime; |
| 291 | spin_unlock_irqrestore(&cputimer->lock, flags); | 291 | raw_spin_unlock_irqrestore(&cputimer->lock, flags); |
| 292 | } | 292 | } |
| 293 | 293 | ||
| 294 | /* | 294 | /* |
| @@ -999,9 +999,9 @@ static void stop_process_timers(struct signal_struct *sig) | |||
| 999 | struct thread_group_cputimer *cputimer = &sig->cputimer; | 999 | struct thread_group_cputimer *cputimer = &sig->cputimer; |
| 1000 | unsigned long flags; | 1000 | unsigned long flags; |
| 1001 | 1001 | ||
| 1002 | spin_lock_irqsave(&cputimer->lock, flags); | 1002 | raw_spin_lock_irqsave(&cputimer->lock, flags); |
| 1003 | cputimer->running = 0; | 1003 | cputimer->running = 0; |
| 1004 | spin_unlock_irqrestore(&cputimer->lock, flags); | 1004 | raw_spin_unlock_irqrestore(&cputimer->lock, flags); |
| 1005 | } | 1005 | } |
| 1006 | 1006 | ||
| 1007 | static u32 onecputick; | 1007 | static u32 onecputick; |
| @@ -1291,9 +1291,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk) | |||
| 1291 | if (sig->cputimer.running) { | 1291 | if (sig->cputimer.running) { |
| 1292 | struct task_cputime group_sample; | 1292 | struct task_cputime group_sample; |
| 1293 | 1293 | ||
| 1294 | spin_lock(&sig->cputimer.lock); | 1294 | raw_spin_lock(&sig->cputimer.lock); |
| 1295 | group_sample = sig->cputimer.cputime; | 1295 | group_sample = sig->cputimer.cputime; |
| 1296 | spin_unlock(&sig->cputimer.lock); | 1296 | raw_spin_unlock(&sig->cputimer.lock); |
| 1297 | 1297 | ||
| 1298 | if (task_cputime_expired(&group_sample, &sig->cputime_expires)) | 1298 | if (task_cputime_expired(&group_sample, &sig->cputime_expires)) |
| 1299 | return 1; | 1299 | return 1; |
diff --git a/kernel/printk.c b/kernel/printk.c index 28a40d8171b8..b7da18391c38 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -100,7 +100,7 @@ static int console_locked, console_suspended; | |||
| 100 | * It is also used in interesting ways to provide interlocking in | 100 | * It is also used in interesting ways to provide interlocking in |
| 101 | * console_unlock();. | 101 | * console_unlock();. |
| 102 | */ | 102 | */ |
| 103 | static DEFINE_SPINLOCK(logbuf_lock); | 103 | static DEFINE_RAW_SPINLOCK(logbuf_lock); |
| 104 | 104 | ||
| 105 | #define LOG_BUF_MASK (log_buf_len-1) | 105 | #define LOG_BUF_MASK (log_buf_len-1) |
| 106 | #define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK]) | 106 | #define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK]) |
| @@ -212,7 +212,7 @@ void __init setup_log_buf(int early) | |||
| 212 | return; | 212 | return; |
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | spin_lock_irqsave(&logbuf_lock, flags); | 215 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
| 216 | log_buf_len = new_log_buf_len; | 216 | log_buf_len = new_log_buf_len; |
| 217 | log_buf = new_log_buf; | 217 | log_buf = new_log_buf; |
| 218 | new_log_buf_len = 0; | 218 | new_log_buf_len = 0; |
| @@ -230,7 +230,7 @@ void __init setup_log_buf(int early) | |||
| 230 | log_start -= offset; | 230 | log_start -= offset; |
| 231 | con_start -= offset; | 231 | con_start -= offset; |
| 232 | log_end -= offset; | 232 | log_end -= offset; |
| 233 | spin_unlock_irqrestore(&logbuf_lock, flags); | 233 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
| 234 | 234 | ||
| 235 | pr_info("log_buf_len: %d\n", log_buf_len); | 235 | pr_info("log_buf_len: %d\n", log_buf_len); |
| 236 | pr_info("early log buf free: %d(%d%%)\n", | 236 | pr_info("early log buf free: %d(%d%%)\n", |
| @@ -365,18 +365,18 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) | |||
| 365 | if (error) | 365 | if (error) |
| 366 | goto out; | 366 | goto out; |
| 367 | i = 0; | 367 | i = 0; |
| 368 | spin_lock_irq(&logbuf_lock); | 368 | raw_spin_lock_irq(&logbuf_lock); |
| 369 | while (!error && (log_start != log_end) && i < len) { | 369 | while (!error && (log_start != log_end) && i < len) { |
| 370 | c = LOG_BUF(log_start); | 370 | c = LOG_BUF(log_start); |
| 371 | log_start++; | 371 | log_start++; |
| 372 | spin_unlock_irq(&logbuf_lock); | 372 | raw_spin_unlock_irq(&logbuf_lock); |
| 373 | error = __put_user(c,buf); | 373 | error = __put_user(c,buf); |
| 374 | buf++; | 374 | buf++; |
| 375 | i++; | 375 | i++; |
| 376 | cond_resched(); | 376 | cond_resched(); |
| 377 | spin_lock_irq(&logbuf_lock); | 377 | raw_spin_lock_irq(&logbuf_lock); |
| 378 | } | 378 | } |
| 379 | spin_unlock_irq(&logbuf_lock); | 379 | raw_spin_unlock_irq(&logbuf_lock); |
| 380 | if (!error) | 380 | if (!error) |
| 381 | error = i; | 381 | error = i; |
| 382 | break; | 382 | break; |
| @@ -399,7 +399,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) | |||
| 399 | count = len; | 399 | count = len; |
| 400 | if (count > log_buf_len) | 400 | if (count > log_buf_len) |
| 401 | count = log_buf_len; | 401 | count = log_buf_len; |
| 402 | spin_lock_irq(&logbuf_lock); | 402 | raw_spin_lock_irq(&logbuf_lock); |
| 403 | if (count > logged_chars) | 403 | if (count > logged_chars) |
| 404 | count = logged_chars; | 404 | count = logged_chars; |
| 405 | if (do_clear) | 405 | if (do_clear) |
| @@ -416,12 +416,12 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) | |||
| 416 | if (j + log_buf_len < log_end) | 416 | if (j + log_buf_len < log_end) |
| 417 | break; | 417 | break; |
| 418 | c = LOG_BUF(j); | 418 | c = LOG_BUF(j); |
| 419 | spin_unlock_irq(&logbuf_lock); | 419 | raw_spin_unlock_irq(&logbuf_lock); |
| 420 | error = __put_user(c,&buf[count-1-i]); | 420 | error = __put_user(c,&buf[count-1-i]); |
| 421 | cond_resched(); | 421 | cond_resched(); |
| 422 | spin_lock_irq(&logbuf_lock); | 422 | raw_spin_lock_irq(&logbuf_lock); |
| 423 | } | 423 | } |
| 424 | spin_unlock_irq(&logbuf_lock); | 424 | raw_spin_unlock_irq(&logbuf_lock); |
| 425 | if (error) | 425 | if (error) |
| 426 | break; | 426 | break; |
| 427 | error = i; | 427 | error = i; |
| @@ -689,7 +689,7 @@ static void zap_locks(void) | |||
| 689 | oops_timestamp = jiffies; | 689 | oops_timestamp = jiffies; |
| 690 | 690 | ||
| 691 | /* If a crash is occurring, make sure we can't deadlock */ | 691 | /* If a crash is occurring, make sure we can't deadlock */ |
| 692 | spin_lock_init(&logbuf_lock); | 692 | raw_spin_lock_init(&logbuf_lock); |
| 693 | /* And make sure that we print immediately */ | 693 | /* And make sure that we print immediately */ |
| 694 | sema_init(&console_sem, 1); | 694 | sema_init(&console_sem, 1); |
| 695 | } | 695 | } |
| @@ -802,9 +802,9 @@ static int console_trylock_for_printk(unsigned int cpu) | |||
| 802 | } | 802 | } |
| 803 | } | 803 | } |
| 804 | printk_cpu = UINT_MAX; | 804 | printk_cpu = UINT_MAX; |
| 805 | spin_unlock(&logbuf_lock); | ||
| 806 | if (wake) | 805 | if (wake) |
| 807 | up(&console_sem); | 806 | up(&console_sem); |
| 807 | raw_spin_unlock(&logbuf_lock); | ||
| 808 | return retval; | 808 | return retval; |
| 809 | } | 809 | } |
| 810 | static const char recursion_bug_msg [] = | 810 | static const char recursion_bug_msg [] = |
| @@ -864,7 +864,7 @@ asmlinkage int vprintk(const char *fmt, va_list args) | |||
| 864 | } | 864 | } |
| 865 | 865 | ||
| 866 | lockdep_off(); | 866 | lockdep_off(); |
| 867 | spin_lock(&logbuf_lock); | 867 | raw_spin_lock(&logbuf_lock); |
| 868 | printk_cpu = this_cpu; | 868 | printk_cpu = this_cpu; |
| 869 | 869 | ||
| 870 | if (recursion_bug) { | 870 | if (recursion_bug) { |
| @@ -1257,14 +1257,14 @@ void console_unlock(void) | |||
| 1257 | 1257 | ||
| 1258 | again: | 1258 | again: |
| 1259 | for ( ; ; ) { | 1259 | for ( ; ; ) { |
| 1260 | spin_lock_irqsave(&logbuf_lock, flags); | 1260 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
| 1261 | wake_klogd |= log_start - log_end; | 1261 | wake_klogd |= log_start - log_end; |
| 1262 | if (con_start == log_end) | 1262 | if (con_start == log_end) |
| 1263 | break; /* Nothing to print */ | 1263 | break; /* Nothing to print */ |
| 1264 | _con_start = con_start; | 1264 | _con_start = con_start; |
| 1265 | _log_end = log_end; | 1265 | _log_end = log_end; |
| 1266 | con_start = log_end; /* Flush */ | 1266 | con_start = log_end; /* Flush */ |
| 1267 | spin_unlock(&logbuf_lock); | 1267 | raw_spin_unlock(&logbuf_lock); |
| 1268 | stop_critical_timings(); /* don't trace print latency */ | 1268 | stop_critical_timings(); /* don't trace print latency */ |
| 1269 | call_console_drivers(_con_start, _log_end); | 1269 | call_console_drivers(_con_start, _log_end); |
| 1270 | start_critical_timings(); | 1270 | start_critical_timings(); |
| @@ -1276,7 +1276,7 @@ again: | |||
| 1276 | if (unlikely(exclusive_console)) | 1276 | if (unlikely(exclusive_console)) |
| 1277 | exclusive_console = NULL; | 1277 | exclusive_console = NULL; |
| 1278 | 1278 | ||
| 1279 | spin_unlock(&logbuf_lock); | 1279 | raw_spin_unlock(&logbuf_lock); |
| 1280 | 1280 | ||
| 1281 | up(&console_sem); | 1281 | up(&console_sem); |
| 1282 | 1282 | ||
| @@ -1286,13 +1286,13 @@ again: | |||
| 1286 | * there's a new owner and the console_unlock() from them will do the | 1286 | * there's a new owner and the console_unlock() from them will do the |
| 1287 | * flush, no worries. | 1287 | * flush, no worries. |
| 1288 | */ | 1288 | */ |
| 1289 | spin_lock(&logbuf_lock); | 1289 | raw_spin_lock(&logbuf_lock); |
| 1290 | if (con_start != log_end) | 1290 | if (con_start != log_end) |
| 1291 | retry = 1; | 1291 | retry = 1; |
| 1292 | spin_unlock_irqrestore(&logbuf_lock, flags); | ||
| 1293 | if (retry && console_trylock()) | 1292 | if (retry && console_trylock()) |
| 1294 | goto again; | 1293 | goto again; |
| 1295 | 1294 | ||
| 1295 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | ||
| 1296 | if (wake_klogd) | 1296 | if (wake_klogd) |
| 1297 | wake_up_klogd(); | 1297 | wake_up_klogd(); |
| 1298 | } | 1298 | } |
| @@ -1522,9 +1522,9 @@ void register_console(struct console *newcon) | |||
| 1522 | * console_unlock(); will print out the buffered messages | 1522 | * console_unlock(); will print out the buffered messages |
| 1523 | * for us. | 1523 | * for us. |
| 1524 | */ | 1524 | */ |
| 1525 | spin_lock_irqsave(&logbuf_lock, flags); | 1525 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
| 1526 | con_start = log_start; | 1526 | con_start = log_start; |
| 1527 | spin_unlock_irqrestore(&logbuf_lock, flags); | 1527 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
| 1528 | /* | 1528 | /* |
| 1529 | * We're about to replay the log buffer. Only do this to the | 1529 | * We're about to replay the log buffer. Only do this to the |
| 1530 | * just-registered console to avoid excessive message spam to | 1530 | * just-registered console to avoid excessive message spam to |
| @@ -1731,10 +1731,10 @@ void kmsg_dump(enum kmsg_dump_reason reason) | |||
| 1731 | /* Theoretically, the log could move on after we do this, but | 1731 | /* Theoretically, the log could move on after we do this, but |
| 1732 | there's not a lot we can do about that. The new messages | 1732 | there's not a lot we can do about that. The new messages |
| 1733 | will overwrite the start of what we dump. */ | 1733 | will overwrite the start of what we dump. */ |
| 1734 | spin_lock_irqsave(&logbuf_lock, flags); | 1734 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
| 1735 | end = log_end & LOG_BUF_MASK; | 1735 | end = log_end & LOG_BUF_MASK; |
| 1736 | chars = logged_chars; | 1736 | chars = logged_chars; |
| 1737 | spin_unlock_irqrestore(&logbuf_lock, flags); | 1737 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
| 1738 | 1738 | ||
| 1739 | if (chars > end) { | 1739 | if (chars > end) { |
| 1740 | s1 = log_buf + log_buf_len - chars + end; | 1740 | s1 = log_buf + log_buf_len - chars + end; |
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c index 3c7cbc2c33be..a2e7e7210f3e 100644 --- a/kernel/rtmutex-debug.c +++ b/kernel/rtmutex-debug.c | |||
| @@ -29,61 +29,6 @@ | |||
| 29 | 29 | ||
| 30 | #include "rtmutex_common.h" | 30 | #include "rtmutex_common.h" |
| 31 | 31 | ||
| 32 | # define TRACE_WARN_ON(x) WARN_ON(x) | ||
| 33 | # define TRACE_BUG_ON(x) BUG_ON(x) | ||
| 34 | |||
| 35 | # define TRACE_OFF() \ | ||
| 36 | do { \ | ||
| 37 | if (rt_trace_on) { \ | ||
| 38 | rt_trace_on = 0; \ | ||
| 39 | console_verbose(); \ | ||
| 40 | if (raw_spin_is_locked(¤t->pi_lock)) \ | ||
| 41 | raw_spin_unlock(¤t->pi_lock); \ | ||
| 42 | } \ | ||
| 43 | } while (0) | ||
| 44 | |||
| 45 | # define TRACE_OFF_NOLOCK() \ | ||
| 46 | do { \ | ||
| 47 | if (rt_trace_on) { \ | ||
| 48 | rt_trace_on = 0; \ | ||
| 49 | console_verbose(); \ | ||
| 50 | } \ | ||
| 51 | } while (0) | ||
| 52 | |||
| 53 | # define TRACE_BUG_LOCKED() \ | ||
| 54 | do { \ | ||
| 55 | TRACE_OFF(); \ | ||
| 56 | BUG(); \ | ||
| 57 | } while (0) | ||
| 58 | |||
| 59 | # define TRACE_WARN_ON_LOCKED(c) \ | ||
| 60 | do { \ | ||
| 61 | if (unlikely(c)) { \ | ||
| 62 | TRACE_OFF(); \ | ||
| 63 | WARN_ON(1); \ | ||
| 64 | } \ | ||
| 65 | } while (0) | ||
| 66 | |||
| 67 | # define TRACE_BUG_ON_LOCKED(c) \ | ||
| 68 | do { \ | ||
| 69 | if (unlikely(c)) \ | ||
| 70 | TRACE_BUG_LOCKED(); \ | ||
| 71 | } while (0) | ||
| 72 | |||
| 73 | #ifdef CONFIG_SMP | ||
| 74 | # define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c) | ||
| 75 | #else | ||
| 76 | # define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0) | ||
| 77 | #endif | ||
| 78 | |||
| 79 | /* | ||
| 80 | * deadlock detection flag. We turn it off when we detect | ||
| 81 | * the first problem because we dont want to recurse back | ||
| 82 | * into the tracing code when doing error printk or | ||
| 83 | * executing a BUG(): | ||
| 84 | */ | ||
| 85 | static int rt_trace_on = 1; | ||
| 86 | |||
| 87 | static void printk_task(struct task_struct *p) | 32 | static void printk_task(struct task_struct *p) |
| 88 | { | 33 | { |
| 89 | if (p) | 34 | if (p) |
| @@ -111,8 +56,8 @@ static void printk_lock(struct rt_mutex *lock, int print_owner) | |||
| 111 | 56 | ||
| 112 | void rt_mutex_debug_task_free(struct task_struct *task) | 57 | void rt_mutex_debug_task_free(struct task_struct *task) |
| 113 | { | 58 | { |
| 114 | WARN_ON(!plist_head_empty(&task->pi_waiters)); | 59 | DEBUG_LOCKS_WARN_ON(!plist_head_empty(&task->pi_waiters)); |
| 115 | WARN_ON(task->pi_blocked_on); | 60 | DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); |
| 116 | } | 61 | } |
| 117 | 62 | ||
| 118 | /* | 63 | /* |
| @@ -125,7 +70,7 @@ void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter, | |||
| 125 | { | 70 | { |
| 126 | struct task_struct *task; | 71 | struct task_struct *task; |
| 127 | 72 | ||
| 128 | if (!rt_trace_on || detect || !act_waiter) | 73 | if (!debug_locks || detect || !act_waiter) |
| 129 | return; | 74 | return; |
| 130 | 75 | ||
| 131 | task = rt_mutex_owner(act_waiter->lock); | 76 | task = rt_mutex_owner(act_waiter->lock); |
| @@ -139,7 +84,7 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) | |||
| 139 | { | 84 | { |
| 140 | struct task_struct *task; | 85 | struct task_struct *task; |
| 141 | 86 | ||
| 142 | if (!waiter->deadlock_lock || !rt_trace_on) | 87 | if (!waiter->deadlock_lock || !debug_locks) |
| 143 | return; | 88 | return; |
| 144 | 89 | ||
| 145 | rcu_read_lock(); | 90 | rcu_read_lock(); |
| @@ -149,7 +94,10 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) | |||
| 149 | return; | 94 | return; |
| 150 | } | 95 | } |
| 151 | 96 | ||
| 152 | TRACE_OFF_NOLOCK(); | 97 | if (!debug_locks_off()) { |
| 98 | rcu_read_unlock(); | ||
| 99 | return; | ||
| 100 | } | ||
| 153 | 101 | ||
| 154 | printk("\n============================================\n"); | 102 | printk("\n============================================\n"); |
| 155 | printk( "[ BUG: circular locking deadlock detected! ]\n"); | 103 | printk( "[ BUG: circular locking deadlock detected! ]\n"); |
| @@ -180,7 +128,6 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) | |||
| 180 | 128 | ||
| 181 | printk("[ turning off deadlock detection." | 129 | printk("[ turning off deadlock detection." |
| 182 | "Please report this trace. ]\n\n"); | 130 | "Please report this trace. ]\n\n"); |
| 183 | local_irq_disable(); | ||
| 184 | } | 131 | } |
| 185 | 132 | ||
| 186 | void debug_rt_mutex_lock(struct rt_mutex *lock) | 133 | void debug_rt_mutex_lock(struct rt_mutex *lock) |
| @@ -189,7 +136,7 @@ void debug_rt_mutex_lock(struct rt_mutex *lock) | |||
| 189 | 136 | ||
| 190 | void debug_rt_mutex_unlock(struct rt_mutex *lock) | 137 | void debug_rt_mutex_unlock(struct rt_mutex *lock) |
| 191 | { | 138 | { |
| 192 | TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current); | 139 | DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current); |
| 193 | } | 140 | } |
| 194 | 141 | ||
| 195 | void | 142 | void |
| @@ -199,7 +146,7 @@ debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner) | |||
| 199 | 146 | ||
| 200 | void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) | 147 | void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) |
| 201 | { | 148 | { |
| 202 | TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock)); | 149 | DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock)); |
| 203 | } | 150 | } |
| 204 | 151 | ||
| 205 | void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) | 152 | void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) |
| @@ -213,8 +160,8 @@ void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) | |||
| 213 | void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) | 160 | void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) |
| 214 | { | 161 | { |
| 215 | put_pid(waiter->deadlock_task_pid); | 162 | put_pid(waiter->deadlock_task_pid); |
| 216 | TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry)); | 163 | DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->list_entry)); |
| 217 | TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); | 164 | DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); |
| 218 | memset(waiter, 0x22, sizeof(*waiter)); | 165 | memset(waiter, 0x22, sizeof(*waiter)); |
| 219 | } | 166 | } |
| 220 | 167 | ||
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 331e01bcd026..87f9e36ea56e 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
| @@ -282,10 +282,10 @@ static inline void account_group_user_time(struct task_struct *tsk, | |||
| 282 | if (!cputimer->running) | 282 | if (!cputimer->running) |
| 283 | return; | 283 | return; |
| 284 | 284 | ||
| 285 | spin_lock(&cputimer->lock); | 285 | raw_spin_lock(&cputimer->lock); |
| 286 | cputimer->cputime.utime = | 286 | cputimer->cputime.utime = |
| 287 | cputime_add(cputimer->cputime.utime, cputime); | 287 | cputime_add(cputimer->cputime.utime, cputime); |
| 288 | spin_unlock(&cputimer->lock); | 288 | raw_spin_unlock(&cputimer->lock); |
| 289 | } | 289 | } |
| 290 | 290 | ||
| 291 | /** | 291 | /** |
| @@ -306,10 +306,10 @@ static inline void account_group_system_time(struct task_struct *tsk, | |||
| 306 | if (!cputimer->running) | 306 | if (!cputimer->running) |
| 307 | return; | 307 | return; |
| 308 | 308 | ||
| 309 | spin_lock(&cputimer->lock); | 309 | raw_spin_lock(&cputimer->lock); |
| 310 | cputimer->cputime.stime = | 310 | cputimer->cputime.stime = |
| 311 | cputime_add(cputimer->cputime.stime, cputime); | 311 | cputime_add(cputimer->cputime.stime, cputime); |
| 312 | spin_unlock(&cputimer->lock); | 312 | raw_spin_unlock(&cputimer->lock); |
| 313 | } | 313 | } |
| 314 | 314 | ||
| 315 | /** | 315 | /** |
| @@ -330,7 +330,7 @@ static inline void account_group_exec_runtime(struct task_struct *tsk, | |||
| 330 | if (!cputimer->running) | 330 | if (!cputimer->running) |
| 331 | return; | 331 | return; |
| 332 | 332 | ||
| 333 | spin_lock(&cputimer->lock); | 333 | raw_spin_lock(&cputimer->lock); |
| 334 | cputimer->cputime.sum_exec_runtime += ns; | 334 | cputimer->cputime.sum_exec_runtime += ns; |
| 335 | spin_unlock(&cputimer->lock); | 335 | raw_spin_unlock(&cputimer->lock); |
| 336 | } | 336 | } |
diff --git a/kernel/semaphore.c b/kernel/semaphore.c index 94a62c0d4ade..d831841e55a7 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c | |||
| @@ -54,12 +54,12 @@ void down(struct semaphore *sem) | |||
| 54 | { | 54 | { |
| 55 | unsigned long flags; | 55 | unsigned long flags; |
| 56 | 56 | ||
| 57 | spin_lock_irqsave(&sem->lock, flags); | 57 | raw_spin_lock_irqsave(&sem->lock, flags); |
| 58 | if (likely(sem->count > 0)) | 58 | if (likely(sem->count > 0)) |
| 59 | sem->count--; | 59 | sem->count--; |
| 60 | else | 60 | else |
| 61 | __down(sem); | 61 | __down(sem); |
| 62 | spin_unlock_irqrestore(&sem->lock, flags); | 62 | raw_spin_unlock_irqrestore(&sem->lock, flags); |
| 63 | } | 63 | } |
| 64 | EXPORT_SYMBOL(down); | 64 | EXPORT_SYMBOL(down); |
| 65 | 65 | ||
| @@ -77,12 +77,12 @@ int down_interruptible(struct semaphore *sem) | |||
| 77 | unsigned long flags; | 77 | unsigned long flags; |
| 78 | int result = 0; | 78 | int result = 0; |
| 79 | 79 | ||
| 80 | spin_lock_irqsave(&sem->lock, flags); | 80 | raw_spin_lock_irqsave(&sem->lock, flags); |
| 81 | if (likely(sem->count > 0)) | 81 | if (likely(sem->count > 0)) |
| 82 | sem->count--; | 82 | sem->count--; |
| 83 | else | 83 | else |
| 84 | result = __down_interruptible(sem); | 84 | result = __down_interruptible(sem); |
| 85 | spin_unlock_irqrestore(&sem->lock, flags); | 85 | raw_spin_unlock_irqrestore(&sem->lock, flags); |
| 86 | 86 | ||
| 87 | return result; | 87 | return result; |
| 88 | } | 88 | } |
| @@ -103,12 +103,12 @@ int down_killable(struct semaphore *sem) | |||
| 103 | unsigned long flags; | 103 | unsigned long flags; |
| 104 | int result = 0; | 104 | int result = 0; |
| 105 | 105 | ||
| 106 | spin_lock_irqsave(&sem->lock, flags); | 106 | raw_spin_lock_irqsave(&sem->lock, flags); |
| 107 | if (likely(sem->count > 0)) | 107 | if (likely(sem->count > 0)) |
| 108 | sem->count--; | 108 | sem->count--; |
| 109 | else | 109 | else |
| 110 | result = __down_killable(sem); | 110 | result = __down_killable(sem); |
| 111 | spin_unlock_irqrestore(&sem->lock, flags); | 111 | raw_spin_unlock_irqrestore(&sem->lock, flags); |
| 112 | 112 | ||
| 113 | return result; | 113 | return result; |
| 114 | } | 114 | } |
| @@ -132,11 +132,11 @@ int down_trylock(struct semaphore *sem) | |||
| 132 | unsigned long flags; | 132 | unsigned long flags; |
| 133 | int count; | 133 | int count; |
| 134 | 134 | ||
| 135 | spin_lock_irqsave(&sem->lock, flags); | 135 | raw_spin_lock_irqsave(&sem->lock, flags); |
| 136 | count = sem->count - 1; | 136 | count = sem->count - 1; |
| 137 | if (likely(count >= 0)) | 137 | if (likely(count >= 0)) |
| 138 | sem->count = count; | 138 | sem->count = count; |
| 139 | spin_unlock_irqrestore(&sem->lock, flags); | 139 | raw_spin_unlock_irqrestore(&sem->lock, flags); |
| 140 | 140 | ||
| 141 | return (count < 0); | 141 | return (count < 0); |
| 142 | } | 142 | } |
| @@ -157,12 +157,12 @@ int down_timeout(struct semaphore *sem, long jiffies) | |||
| 157 | unsigned long flags; | 157 | unsigned long flags; |
| 158 | int result = 0; | 158 | int result = 0; |
| 159 | 159 | ||
| 160 | spin_lock_irqsave(&sem->lock, flags); | 160 | raw_spin_lock_irqsave(&sem->lock, flags); |
| 161 | if (likely(sem->count > 0)) | 161 | if (likely(sem->count > 0)) |
| 162 | sem->count--; | 162 | sem->count--; |
| 163 | else | 163 | else |
| 164 | result = __down_timeout(sem, jiffies); | 164 | result = __down_timeout(sem, jiffies); |
| 165 | spin_unlock_irqrestore(&sem->lock, flags); | 165 | raw_spin_unlock_irqrestore(&sem->lock, flags); |
| 166 | 166 | ||
| 167 | return result; | 167 | return result; |
| 168 | } | 168 | } |
| @@ -179,12 +179,12 @@ void up(struct semaphore *sem) | |||
| 179 | { | 179 | { |
| 180 | unsigned long flags; | 180 | unsigned long flags; |
| 181 | 181 | ||
| 182 | spin_lock_irqsave(&sem->lock, flags); | 182 | raw_spin_lock_irqsave(&sem->lock, flags); |
| 183 | if (likely(list_empty(&sem->wait_list))) | 183 | if (likely(list_empty(&sem->wait_list))) |
| 184 | sem->count++; | 184 | sem->count++; |
| 185 | else | 185 | else |
| 186 | __up(sem); | 186 | __up(sem); |
| 187 | spin_unlock_irqrestore(&sem->lock, flags); | 187 | raw_spin_unlock_irqrestore(&sem->lock, flags); |
| 188 | } | 188 | } |
| 189 | EXPORT_SYMBOL(up); | 189 | EXPORT_SYMBOL(up); |
| 190 | 190 | ||
| @@ -217,9 +217,9 @@ static inline int __sched __down_common(struct semaphore *sem, long state, | |||
| 217 | if (timeout <= 0) | 217 | if (timeout <= 0) |
| 218 | goto timed_out; | 218 | goto timed_out; |
| 219 | __set_task_state(task, state); | 219 | __set_task_state(task, state); |
| 220 | spin_unlock_irq(&sem->lock); | 220 | raw_spin_unlock_irq(&sem->lock); |
| 221 | timeout = schedule_timeout(timeout); | 221 | timeout = schedule_timeout(timeout); |
| 222 | spin_lock_irq(&sem->lock); | 222 | raw_spin_lock_irq(&sem->lock); |
| 223 | if (waiter.up) | 223 | if (waiter.up) |
| 224 | return 0; | 224 | return 0; |
| 225 | } | 225 | } |
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index a5d0a3a85dd8..0b537f27b559 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c | |||
| @@ -81,7 +81,7 @@ struct entry { | |||
| 81 | /* | 81 | /* |
| 82 | * Spinlock protecting the tables - not taken during lookup: | 82 | * Spinlock protecting the tables - not taken during lookup: |
| 83 | */ | 83 | */ |
| 84 | static DEFINE_SPINLOCK(table_lock); | 84 | static DEFINE_RAW_SPINLOCK(table_lock); |
| 85 | 85 | ||
| 86 | /* | 86 | /* |
| 87 | * Per-CPU lookup locks for fast hash lookup: | 87 | * Per-CPU lookup locks for fast hash lookup: |
| @@ -188,7 +188,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm) | |||
| 188 | prev = NULL; | 188 | prev = NULL; |
| 189 | curr = *head; | 189 | curr = *head; |
| 190 | 190 | ||
| 191 | spin_lock(&table_lock); | 191 | raw_spin_lock(&table_lock); |
| 192 | /* | 192 | /* |
| 193 | * Make sure we have not raced with another CPU: | 193 | * Make sure we have not raced with another CPU: |
| 194 | */ | 194 | */ |
| @@ -215,7 +215,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm) | |||
| 215 | *head = curr; | 215 | *head = curr; |
| 216 | } | 216 | } |
| 217 | out_unlock: | 217 | out_unlock: |
| 218 | spin_unlock(&table_lock); | 218 | raw_spin_unlock(&table_lock); |
| 219 | 219 | ||
| 220 | return curr; | 220 | return curr; |
| 221 | } | 221 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 731201bf4acc..f2f821acc597 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -478,7 +478,7 @@ struct ring_buffer_per_cpu { | |||
| 478 | int cpu; | 478 | int cpu; |
| 479 | atomic_t record_disabled; | 479 | atomic_t record_disabled; |
| 480 | struct ring_buffer *buffer; | 480 | struct ring_buffer *buffer; |
| 481 | spinlock_t reader_lock; /* serialize readers */ | 481 | raw_spinlock_t reader_lock; /* serialize readers */ |
| 482 | arch_spinlock_t lock; | 482 | arch_spinlock_t lock; |
| 483 | struct lock_class_key lock_key; | 483 | struct lock_class_key lock_key; |
| 484 | struct list_head *pages; | 484 | struct list_head *pages; |
| @@ -1062,7 +1062,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
| 1062 | 1062 | ||
| 1063 | cpu_buffer->cpu = cpu; | 1063 | cpu_buffer->cpu = cpu; |
| 1064 | cpu_buffer->buffer = buffer; | 1064 | cpu_buffer->buffer = buffer; |
| 1065 | spin_lock_init(&cpu_buffer->reader_lock); | 1065 | raw_spin_lock_init(&cpu_buffer->reader_lock); |
| 1066 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); | 1066 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); |
| 1067 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 1067 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 1068 | 1068 | ||
| @@ -1259,7 +1259,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
| 1259 | struct list_head *p; | 1259 | struct list_head *p; |
| 1260 | unsigned i; | 1260 | unsigned i; |
| 1261 | 1261 | ||
| 1262 | spin_lock_irq(&cpu_buffer->reader_lock); | 1262 | raw_spin_lock_irq(&cpu_buffer->reader_lock); |
| 1263 | rb_head_page_deactivate(cpu_buffer); | 1263 | rb_head_page_deactivate(cpu_buffer); |
| 1264 | 1264 | ||
| 1265 | for (i = 0; i < nr_pages; i++) { | 1265 | for (i = 0; i < nr_pages; i++) { |
| @@ -1277,7 +1277,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
| 1277 | rb_check_pages(cpu_buffer); | 1277 | rb_check_pages(cpu_buffer); |
| 1278 | 1278 | ||
| 1279 | out: | 1279 | out: |
| 1280 | spin_unlock_irq(&cpu_buffer->reader_lock); | 1280 | raw_spin_unlock_irq(&cpu_buffer->reader_lock); |
| 1281 | } | 1281 | } |
| 1282 | 1282 | ||
| 1283 | static void | 1283 | static void |
| @@ -1288,7 +1288,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1288 | struct list_head *p; | 1288 | struct list_head *p; |
| 1289 | unsigned i; | 1289 | unsigned i; |
| 1290 | 1290 | ||
| 1291 | spin_lock_irq(&cpu_buffer->reader_lock); | 1291 | raw_spin_lock_irq(&cpu_buffer->reader_lock); |
| 1292 | rb_head_page_deactivate(cpu_buffer); | 1292 | rb_head_page_deactivate(cpu_buffer); |
| 1293 | 1293 | ||
| 1294 | for (i = 0; i < nr_pages; i++) { | 1294 | for (i = 0; i < nr_pages; i++) { |
| @@ -1303,7 +1303,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1303 | rb_check_pages(cpu_buffer); | 1303 | rb_check_pages(cpu_buffer); |
| 1304 | 1304 | ||
| 1305 | out: | 1305 | out: |
| 1306 | spin_unlock_irq(&cpu_buffer->reader_lock); | 1306 | raw_spin_unlock_irq(&cpu_buffer->reader_lock); |
| 1307 | } | 1307 | } |
| 1308 | 1308 | ||
| 1309 | /** | 1309 | /** |
| @@ -2804,9 +2804,9 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | |||
| 2804 | 2804 | ||
| 2805 | cpu_buffer = iter->cpu_buffer; | 2805 | cpu_buffer = iter->cpu_buffer; |
| 2806 | 2806 | ||
| 2807 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2807 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 2808 | rb_iter_reset(iter); | 2808 | rb_iter_reset(iter); |
| 2809 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2809 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 2810 | } | 2810 | } |
| 2811 | EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); | 2811 | EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); |
| 2812 | 2812 | ||
| @@ -3265,12 +3265,12 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, | |||
| 3265 | again: | 3265 | again: |
| 3266 | local_irq_save(flags); | 3266 | local_irq_save(flags); |
| 3267 | if (dolock) | 3267 | if (dolock) |
| 3268 | spin_lock(&cpu_buffer->reader_lock); | 3268 | raw_spin_lock(&cpu_buffer->reader_lock); |
| 3269 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); | 3269 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); |
| 3270 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3270 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
| 3271 | rb_advance_reader(cpu_buffer); | 3271 | rb_advance_reader(cpu_buffer); |
| 3272 | if (dolock) | 3272 | if (dolock) |
| 3273 | spin_unlock(&cpu_buffer->reader_lock); | 3273 | raw_spin_unlock(&cpu_buffer->reader_lock); |
| 3274 | local_irq_restore(flags); | 3274 | local_irq_restore(flags); |
| 3275 | 3275 | ||
| 3276 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3276 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
| @@ -3295,9 +3295,9 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
| 3295 | unsigned long flags; | 3295 | unsigned long flags; |
| 3296 | 3296 | ||
| 3297 | again: | 3297 | again: |
| 3298 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3298 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 3299 | event = rb_iter_peek(iter, ts); | 3299 | event = rb_iter_peek(iter, ts); |
| 3300 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3300 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 3301 | 3301 | ||
| 3302 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3302 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
| 3303 | goto again; | 3303 | goto again; |
| @@ -3337,7 +3337,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, | |||
| 3337 | cpu_buffer = buffer->buffers[cpu]; | 3337 | cpu_buffer = buffer->buffers[cpu]; |
| 3338 | local_irq_save(flags); | 3338 | local_irq_save(flags); |
| 3339 | if (dolock) | 3339 | if (dolock) |
| 3340 | spin_lock(&cpu_buffer->reader_lock); | 3340 | raw_spin_lock(&cpu_buffer->reader_lock); |
| 3341 | 3341 | ||
| 3342 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); | 3342 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); |
| 3343 | if (event) { | 3343 | if (event) { |
| @@ -3346,7 +3346,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, | |||
| 3346 | } | 3346 | } |
| 3347 | 3347 | ||
| 3348 | if (dolock) | 3348 | if (dolock) |
| 3349 | spin_unlock(&cpu_buffer->reader_lock); | 3349 | raw_spin_unlock(&cpu_buffer->reader_lock); |
| 3350 | local_irq_restore(flags); | 3350 | local_irq_restore(flags); |
| 3351 | 3351 | ||
| 3352 | out: | 3352 | out: |
| @@ -3438,11 +3438,11 @@ ring_buffer_read_start(struct ring_buffer_iter *iter) | |||
| 3438 | 3438 | ||
| 3439 | cpu_buffer = iter->cpu_buffer; | 3439 | cpu_buffer = iter->cpu_buffer; |
| 3440 | 3440 | ||
| 3441 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3441 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 3442 | arch_spin_lock(&cpu_buffer->lock); | 3442 | arch_spin_lock(&cpu_buffer->lock); |
| 3443 | rb_iter_reset(iter); | 3443 | rb_iter_reset(iter); |
| 3444 | arch_spin_unlock(&cpu_buffer->lock); | 3444 | arch_spin_unlock(&cpu_buffer->lock); |
| 3445 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3445 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 3446 | } | 3446 | } |
| 3447 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); | 3447 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); |
| 3448 | 3448 | ||
| @@ -3477,7 +3477,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | |||
| 3477 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 3477 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
| 3478 | unsigned long flags; | 3478 | unsigned long flags; |
| 3479 | 3479 | ||
| 3480 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3480 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 3481 | again: | 3481 | again: |
| 3482 | event = rb_iter_peek(iter, ts); | 3482 | event = rb_iter_peek(iter, ts); |
| 3483 | if (!event) | 3483 | if (!event) |
| @@ -3488,7 +3488,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | |||
| 3488 | 3488 | ||
| 3489 | rb_advance_iter(iter); | 3489 | rb_advance_iter(iter); |
| 3490 | out: | 3490 | out: |
| 3491 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3491 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 3492 | 3492 | ||
| 3493 | return event; | 3493 | return event; |
| 3494 | } | 3494 | } |
| @@ -3557,7 +3557,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
| 3557 | 3557 | ||
| 3558 | atomic_inc(&cpu_buffer->record_disabled); | 3558 | atomic_inc(&cpu_buffer->record_disabled); |
| 3559 | 3559 | ||
| 3560 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3560 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 3561 | 3561 | ||
| 3562 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) | 3562 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) |
| 3563 | goto out; | 3563 | goto out; |
| @@ -3569,7 +3569,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
| 3569 | arch_spin_unlock(&cpu_buffer->lock); | 3569 | arch_spin_unlock(&cpu_buffer->lock); |
| 3570 | 3570 | ||
| 3571 | out: | 3571 | out: |
| 3572 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3572 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 3573 | 3573 | ||
| 3574 | atomic_dec(&cpu_buffer->record_disabled); | 3574 | atomic_dec(&cpu_buffer->record_disabled); |
| 3575 | } | 3575 | } |
| @@ -3607,10 +3607,10 @@ int ring_buffer_empty(struct ring_buffer *buffer) | |||
| 3607 | cpu_buffer = buffer->buffers[cpu]; | 3607 | cpu_buffer = buffer->buffers[cpu]; |
| 3608 | local_irq_save(flags); | 3608 | local_irq_save(flags); |
| 3609 | if (dolock) | 3609 | if (dolock) |
| 3610 | spin_lock(&cpu_buffer->reader_lock); | 3610 | raw_spin_lock(&cpu_buffer->reader_lock); |
| 3611 | ret = rb_per_cpu_empty(cpu_buffer); | 3611 | ret = rb_per_cpu_empty(cpu_buffer); |
| 3612 | if (dolock) | 3612 | if (dolock) |
| 3613 | spin_unlock(&cpu_buffer->reader_lock); | 3613 | raw_spin_unlock(&cpu_buffer->reader_lock); |
| 3614 | local_irq_restore(flags); | 3614 | local_irq_restore(flags); |
| 3615 | 3615 | ||
| 3616 | if (!ret) | 3616 | if (!ret) |
| @@ -3641,10 +3641,10 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | |||
| 3641 | cpu_buffer = buffer->buffers[cpu]; | 3641 | cpu_buffer = buffer->buffers[cpu]; |
| 3642 | local_irq_save(flags); | 3642 | local_irq_save(flags); |
| 3643 | if (dolock) | 3643 | if (dolock) |
| 3644 | spin_lock(&cpu_buffer->reader_lock); | 3644 | raw_spin_lock(&cpu_buffer->reader_lock); |
| 3645 | ret = rb_per_cpu_empty(cpu_buffer); | 3645 | ret = rb_per_cpu_empty(cpu_buffer); |
| 3646 | if (dolock) | 3646 | if (dolock) |
| 3647 | spin_unlock(&cpu_buffer->reader_lock); | 3647 | raw_spin_unlock(&cpu_buffer->reader_lock); |
| 3648 | local_irq_restore(flags); | 3648 | local_irq_restore(flags); |
| 3649 | 3649 | ||
| 3650 | return ret; | 3650 | return ret; |
| @@ -3841,7 +3841,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
| 3841 | if (!bpage) | 3841 | if (!bpage) |
| 3842 | goto out; | 3842 | goto out; |
| 3843 | 3843 | ||
| 3844 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3844 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 3845 | 3845 | ||
| 3846 | reader = rb_get_reader_page(cpu_buffer); | 3846 | reader = rb_get_reader_page(cpu_buffer); |
| 3847 | if (!reader) | 3847 | if (!reader) |
| @@ -3964,7 +3964,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
| 3964 | memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); | 3964 | memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); |
| 3965 | 3965 | ||
| 3966 | out_unlock: | 3966 | out_unlock: |
| 3967 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3967 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 3968 | 3968 | ||
| 3969 | out: | 3969 | out: |
| 3970 | return ret; | 3970 | return ret; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e5df02c69b1d..0c8bdeeb358b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -341,7 +341,7 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
| 341 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE; | 341 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE; |
| 342 | 342 | ||
| 343 | static int trace_stop_count; | 343 | static int trace_stop_count; |
| 344 | static DEFINE_SPINLOCK(tracing_start_lock); | 344 | static DEFINE_RAW_SPINLOCK(tracing_start_lock); |
| 345 | 345 | ||
| 346 | static void wakeup_work_handler(struct work_struct *work) | 346 | static void wakeup_work_handler(struct work_struct *work) |
| 347 | { | 347 | { |
| @@ -960,7 +960,7 @@ void tracing_start(void) | |||
| 960 | if (tracing_disabled) | 960 | if (tracing_disabled) |
| 961 | return; | 961 | return; |
| 962 | 962 | ||
| 963 | spin_lock_irqsave(&tracing_start_lock, flags); | 963 | raw_spin_lock_irqsave(&tracing_start_lock, flags); |
| 964 | if (--trace_stop_count) { | 964 | if (--trace_stop_count) { |
| 965 | if (trace_stop_count < 0) { | 965 | if (trace_stop_count < 0) { |
| 966 | /* Someone screwed up their debugging */ | 966 | /* Someone screwed up their debugging */ |
| @@ -985,7 +985,7 @@ void tracing_start(void) | |||
| 985 | 985 | ||
| 986 | ftrace_start(); | 986 | ftrace_start(); |
| 987 | out: | 987 | out: |
| 988 | spin_unlock_irqrestore(&tracing_start_lock, flags); | 988 | raw_spin_unlock_irqrestore(&tracing_start_lock, flags); |
| 989 | } | 989 | } |
| 990 | 990 | ||
| 991 | /** | 991 | /** |
| @@ -1000,7 +1000,7 @@ void tracing_stop(void) | |||
| 1000 | unsigned long flags; | 1000 | unsigned long flags; |
| 1001 | 1001 | ||
| 1002 | ftrace_stop(); | 1002 | ftrace_stop(); |
| 1003 | spin_lock_irqsave(&tracing_start_lock, flags); | 1003 | raw_spin_lock_irqsave(&tracing_start_lock, flags); |
| 1004 | if (trace_stop_count++) | 1004 | if (trace_stop_count++) |
| 1005 | goto out; | 1005 | goto out; |
| 1006 | 1006 | ||
| @@ -1018,7 +1018,7 @@ void tracing_stop(void) | |||
| 1018 | arch_spin_unlock(&ftrace_max_lock); | 1018 | arch_spin_unlock(&ftrace_max_lock); |
| 1019 | 1019 | ||
| 1020 | out: | 1020 | out: |
| 1021 | spin_unlock_irqrestore(&tracing_start_lock, flags); | 1021 | raw_spin_unlock_irqrestore(&tracing_start_lock, flags); |
| 1022 | } | 1022 | } |
| 1023 | 1023 | ||
| 1024 | void trace_stop_cmdline_recording(void); | 1024 | void trace_stop_cmdline_recording(void); |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 667aa8cc0cfc..11186212068c 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
| @@ -23,7 +23,7 @@ static int tracer_enabled __read_mostly; | |||
| 23 | 23 | ||
| 24 | static DEFINE_PER_CPU(int, tracing_cpu); | 24 | static DEFINE_PER_CPU(int, tracing_cpu); |
| 25 | 25 | ||
| 26 | static DEFINE_SPINLOCK(max_trace_lock); | 26 | static DEFINE_RAW_SPINLOCK(max_trace_lock); |
| 27 | 27 | ||
| 28 | enum { | 28 | enum { |
| 29 | TRACER_IRQS_OFF = (1 << 1), | 29 | TRACER_IRQS_OFF = (1 << 1), |
| @@ -321,7 +321,7 @@ check_critical_timing(struct trace_array *tr, | |||
| 321 | if (!report_latency(delta)) | 321 | if (!report_latency(delta)) |
| 322 | goto out; | 322 | goto out; |
| 323 | 323 | ||
| 324 | spin_lock_irqsave(&max_trace_lock, flags); | 324 | raw_spin_lock_irqsave(&max_trace_lock, flags); |
| 325 | 325 | ||
| 326 | /* check if we are still the max latency */ | 326 | /* check if we are still the max latency */ |
| 327 | if (!report_latency(delta)) | 327 | if (!report_latency(delta)) |
| @@ -344,7 +344,7 @@ check_critical_timing(struct trace_array *tr, | |||
| 344 | max_sequence++; | 344 | max_sequence++; |
| 345 | 345 | ||
| 346 | out_unlock: | 346 | out_unlock: |
| 347 | spin_unlock_irqrestore(&max_trace_lock, flags); | 347 | raw_spin_unlock_irqrestore(&max_trace_lock, flags); |
| 348 | 348 | ||
| 349 | out: | 349 | out: |
| 350 | data->critical_sequence = max_sequence; | 350 | data->critical_sequence = max_sequence; |
diff --git a/lib/atomic64.c b/lib/atomic64.c index e12ae0dd08a8..3975470caf4f 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c | |||
| @@ -29,11 +29,11 @@ | |||
| 29 | * Ensure each lock is in a separate cacheline. | 29 | * Ensure each lock is in a separate cacheline. |
| 30 | */ | 30 | */ |
| 31 | static union { | 31 | static union { |
| 32 | spinlock_t lock; | 32 | raw_spinlock_t lock; |
| 33 | char pad[L1_CACHE_BYTES]; | 33 | char pad[L1_CACHE_BYTES]; |
| 34 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; | 34 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; |
| 35 | 35 | ||
| 36 | static inline spinlock_t *lock_addr(const atomic64_t *v) | 36 | static inline raw_spinlock_t *lock_addr(const atomic64_t *v) |
| 37 | { | 37 | { |
| 38 | unsigned long addr = (unsigned long) v; | 38 | unsigned long addr = (unsigned long) v; |
| 39 | 39 | ||
| @@ -45,12 +45,12 @@ static inline spinlock_t *lock_addr(const atomic64_t *v) | |||
| 45 | long long atomic64_read(const atomic64_t *v) | 45 | long long atomic64_read(const atomic64_t *v) |
| 46 | { | 46 | { |
| 47 | unsigned long flags; | 47 | unsigned long flags; |
| 48 | spinlock_t *lock = lock_addr(v); | 48 | raw_spinlock_t *lock = lock_addr(v); |
| 49 | long long val; | 49 | long long val; |
| 50 | 50 | ||
| 51 | spin_lock_irqsave(lock, flags); | 51 | raw_spin_lock_irqsave(lock, flags); |
| 52 | val = v->counter; | 52 | val = v->counter; |
| 53 | spin_unlock_irqrestore(lock, flags); | 53 | raw_spin_unlock_irqrestore(lock, flags); |
| 54 | return val; | 54 | return val; |
| 55 | } | 55 | } |
| 56 | EXPORT_SYMBOL(atomic64_read); | 56 | EXPORT_SYMBOL(atomic64_read); |
| @@ -58,34 +58,34 @@ EXPORT_SYMBOL(atomic64_read); | |||
| 58 | void atomic64_set(atomic64_t *v, long long i) | 58 | void atomic64_set(atomic64_t *v, long long i) |
| 59 | { | 59 | { |
| 60 | unsigned long flags; | 60 | unsigned long flags; |
| 61 | spinlock_t *lock = lock_addr(v); | 61 | raw_spinlock_t *lock = lock_addr(v); |
| 62 | 62 | ||
| 63 | spin_lock_irqsave(lock, flags); | 63 | raw_spin_lock_irqsave(lock, flags); |
| 64 | v->counter = i; | 64 | v->counter = i; |
| 65 | spin_unlock_irqrestore(lock, flags); | 65 | raw_spin_unlock_irqrestore(lock, flags); |
| 66 | } | 66 | } |
| 67 | EXPORT_SYMBOL(atomic64_set); | 67 | EXPORT_SYMBOL(atomic64_set); |
| 68 | 68 | ||
| 69 | void atomic64_add(long long a, atomic64_t *v) | 69 | void atomic64_add(long long a, atomic64_t *v) |
| 70 | { | 70 | { |
| 71 | unsigned long flags; | 71 | unsigned long flags; |
| 72 | spinlock_t *lock = lock_addr(v); | 72 | raw_spinlock_t *lock = lock_addr(v); |
| 73 | 73 | ||
| 74 | spin_lock_irqsave(lock, flags); | 74 | raw_spin_lock_irqsave(lock, flags); |
| 75 | v->counter += a; | 75 | v->counter += a; |
| 76 | spin_unlock_irqrestore(lock, flags); | 76 | raw_spin_unlock_irqrestore(lock, flags); |
| 77 | } | 77 | } |
| 78 | EXPORT_SYMBOL(atomic64_add); | 78 | EXPORT_SYMBOL(atomic64_add); |
| 79 | 79 | ||
| 80 | long long atomic64_add_return(long long a, atomic64_t *v) | 80 | long long atomic64_add_return(long long a, atomic64_t *v) |
| 81 | { | 81 | { |
| 82 | unsigned long flags; | 82 | unsigned long flags; |
| 83 | spinlock_t *lock = lock_addr(v); | 83 | raw_spinlock_t *lock = lock_addr(v); |
| 84 | long long val; | 84 | long long val; |
| 85 | 85 | ||
| 86 | spin_lock_irqsave(lock, flags); | 86 | raw_spin_lock_irqsave(lock, flags); |
| 87 | val = v->counter += a; | 87 | val = v->counter += a; |
| 88 | spin_unlock_irqrestore(lock, flags); | 88 | raw_spin_unlock_irqrestore(lock, flags); |
| 89 | return val; | 89 | return val; |
| 90 | } | 90 | } |
| 91 | EXPORT_SYMBOL(atomic64_add_return); | 91 | EXPORT_SYMBOL(atomic64_add_return); |
| @@ -93,23 +93,23 @@ EXPORT_SYMBOL(atomic64_add_return); | |||
| 93 | void atomic64_sub(long long a, atomic64_t *v) | 93 | void atomic64_sub(long long a, atomic64_t *v) |
| 94 | { | 94 | { |
| 95 | unsigned long flags; | 95 | unsigned long flags; |
| 96 | spinlock_t *lock = lock_addr(v); | 96 | raw_spinlock_t *lock = lock_addr(v); |
| 97 | 97 | ||
| 98 | spin_lock_irqsave(lock, flags); | 98 | raw_spin_lock_irqsave(lock, flags); |
| 99 | v->counter -= a; | 99 | v->counter -= a; |
| 100 | spin_unlock_irqrestore(lock, flags); | 100 | raw_spin_unlock_irqrestore(lock, flags); |
| 101 | } | 101 | } |
| 102 | EXPORT_SYMBOL(atomic64_sub); | 102 | EXPORT_SYMBOL(atomic64_sub); |
| 103 | 103 | ||
| 104 | long long atomic64_sub_return(long long a, atomic64_t *v) | 104 | long long atomic64_sub_return(long long a, atomic64_t *v) |
| 105 | { | 105 | { |
| 106 | unsigned long flags; | 106 | unsigned long flags; |
| 107 | spinlock_t *lock = lock_addr(v); | 107 | raw_spinlock_t *lock = lock_addr(v); |
| 108 | long long val; | 108 | long long val; |
| 109 | 109 | ||
| 110 | spin_lock_irqsave(lock, flags); | 110 | raw_spin_lock_irqsave(lock, flags); |
| 111 | val = v->counter -= a; | 111 | val = v->counter -= a; |
| 112 | spin_unlock_irqrestore(lock, flags); | 112 | raw_spin_unlock_irqrestore(lock, flags); |
| 113 | return val; | 113 | return val; |
| 114 | } | 114 | } |
| 115 | EXPORT_SYMBOL(atomic64_sub_return); | 115 | EXPORT_SYMBOL(atomic64_sub_return); |
| @@ -117,14 +117,14 @@ EXPORT_SYMBOL(atomic64_sub_return); | |||
| 117 | long long atomic64_dec_if_positive(atomic64_t *v) | 117 | long long atomic64_dec_if_positive(atomic64_t *v) |
| 118 | { | 118 | { |
| 119 | unsigned long flags; | 119 | unsigned long flags; |
| 120 | spinlock_t *lock = lock_addr(v); | 120 | raw_spinlock_t *lock = lock_addr(v); |
| 121 | long long val; | 121 | long long val; |
| 122 | 122 | ||
| 123 | spin_lock_irqsave(lock, flags); | 123 | raw_spin_lock_irqsave(lock, flags); |
| 124 | val = v->counter - 1; | 124 | val = v->counter - 1; |
| 125 | if (val >= 0) | 125 | if (val >= 0) |
| 126 | v->counter = val; | 126 | v->counter = val; |
| 127 | spin_unlock_irqrestore(lock, flags); | 127 | raw_spin_unlock_irqrestore(lock, flags); |
| 128 | return val; | 128 | return val; |
| 129 | } | 129 | } |
| 130 | EXPORT_SYMBOL(atomic64_dec_if_positive); | 130 | EXPORT_SYMBOL(atomic64_dec_if_positive); |
| @@ -132,14 +132,14 @@ EXPORT_SYMBOL(atomic64_dec_if_positive); | |||
| 132 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) | 132 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) |
| 133 | { | 133 | { |
| 134 | unsigned long flags; | 134 | unsigned long flags; |
| 135 | spinlock_t *lock = lock_addr(v); | 135 | raw_spinlock_t *lock = lock_addr(v); |
| 136 | long long val; | 136 | long long val; |
| 137 | 137 | ||
| 138 | spin_lock_irqsave(lock, flags); | 138 | raw_spin_lock_irqsave(lock, flags); |
| 139 | val = v->counter; | 139 | val = v->counter; |
| 140 | if (val == o) | 140 | if (val == o) |
| 141 | v->counter = n; | 141 | v->counter = n; |
| 142 | spin_unlock_irqrestore(lock, flags); | 142 | raw_spin_unlock_irqrestore(lock, flags); |
| 143 | return val; | 143 | return val; |
| 144 | } | 144 | } |
| 145 | EXPORT_SYMBOL(atomic64_cmpxchg); | 145 | EXPORT_SYMBOL(atomic64_cmpxchg); |
| @@ -147,13 +147,13 @@ EXPORT_SYMBOL(atomic64_cmpxchg); | |||
| 147 | long long atomic64_xchg(atomic64_t *v, long long new) | 147 | long long atomic64_xchg(atomic64_t *v, long long new) |
| 148 | { | 148 | { |
| 149 | unsigned long flags; | 149 | unsigned long flags; |
| 150 | spinlock_t *lock = lock_addr(v); | 150 | raw_spinlock_t *lock = lock_addr(v); |
| 151 | long long val; | 151 | long long val; |
| 152 | 152 | ||
| 153 | spin_lock_irqsave(lock, flags); | 153 | raw_spin_lock_irqsave(lock, flags); |
| 154 | val = v->counter; | 154 | val = v->counter; |
| 155 | v->counter = new; | 155 | v->counter = new; |
| 156 | spin_unlock_irqrestore(lock, flags); | 156 | raw_spin_unlock_irqrestore(lock, flags); |
| 157 | return val; | 157 | return val; |
| 158 | } | 158 | } |
| 159 | EXPORT_SYMBOL(atomic64_xchg); | 159 | EXPORT_SYMBOL(atomic64_xchg); |
| @@ -161,15 +161,15 @@ EXPORT_SYMBOL(atomic64_xchg); | |||
| 161 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) | 161 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) |
| 162 | { | 162 | { |
| 163 | unsigned long flags; | 163 | unsigned long flags; |
| 164 | spinlock_t *lock = lock_addr(v); | 164 | raw_spinlock_t *lock = lock_addr(v); |
| 165 | int ret = 0; | 165 | int ret = 0; |
| 166 | 166 | ||
| 167 | spin_lock_irqsave(lock, flags); | 167 | raw_spin_lock_irqsave(lock, flags); |
| 168 | if (v->counter != u) { | 168 | if (v->counter != u) { |
| 169 | v->counter += a; | 169 | v->counter += a; |
| 170 | ret = 1; | 170 | ret = 1; |
| 171 | } | 171 | } |
| 172 | spin_unlock_irqrestore(lock, flags); | 172 | raw_spin_unlock_irqrestore(lock, flags); |
| 173 | return ret; | 173 | return ret; |
| 174 | } | 174 | } |
| 175 | EXPORT_SYMBOL(atomic64_add_unless); | 175 | EXPORT_SYMBOL(atomic64_add_unless); |
| @@ -179,7 +179,7 @@ static int init_atomic64_lock(void) | |||
| 179 | int i; | 179 | int i; |
| 180 | 180 | ||
| 181 | for (i = 0; i < NR_LOCKS; ++i) | 181 | for (i = 0; i < NR_LOCKS; ++i) |
| 182 | spin_lock_init(&atomic64_lock[i].lock); | 182 | raw_spin_lock_init(&atomic64_lock[i].lock); |
| 183 | return 0; | 183 | return 0; |
| 184 | } | 184 | } |
| 185 | 185 | ||
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 28f2c33c6b53..f087105ed914 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
| @@ -59,13 +59,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) | |||
| 59 | { | 59 | { |
| 60 | int cpu; | 60 | int cpu; |
| 61 | 61 | ||
| 62 | spin_lock(&fbc->lock); | 62 | raw_spin_lock(&fbc->lock); |
| 63 | for_each_possible_cpu(cpu) { | 63 | for_each_possible_cpu(cpu) { |
| 64 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 64 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
| 65 | *pcount = 0; | 65 | *pcount = 0; |
| 66 | } | 66 | } |
| 67 | fbc->count = amount; | 67 | fbc->count = amount; |
| 68 | spin_unlock(&fbc->lock); | 68 | raw_spin_unlock(&fbc->lock); |
| 69 | } | 69 | } |
| 70 | EXPORT_SYMBOL(percpu_counter_set); | 70 | EXPORT_SYMBOL(percpu_counter_set); |
| 71 | 71 | ||
| @@ -76,10 +76,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) | |||
| 76 | preempt_disable(); | 76 | preempt_disable(); |
| 77 | count = __this_cpu_read(*fbc->counters) + amount; | 77 | count = __this_cpu_read(*fbc->counters) + amount; |
| 78 | if (count >= batch || count <= -batch) { | 78 | if (count >= batch || count <= -batch) { |
| 79 | spin_lock(&fbc->lock); | 79 | raw_spin_lock(&fbc->lock); |
| 80 | fbc->count += count; | 80 | fbc->count += count; |
| 81 | __this_cpu_write(*fbc->counters, 0); | 81 | __this_cpu_write(*fbc->counters, 0); |
| 82 | spin_unlock(&fbc->lock); | 82 | raw_spin_unlock(&fbc->lock); |
| 83 | } else { | 83 | } else { |
| 84 | __this_cpu_write(*fbc->counters, count); | 84 | __this_cpu_write(*fbc->counters, count); |
| 85 | } | 85 | } |
| @@ -96,13 +96,13 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) | |||
| 96 | s64 ret; | 96 | s64 ret; |
| 97 | int cpu; | 97 | int cpu; |
| 98 | 98 | ||
| 99 | spin_lock(&fbc->lock); | 99 | raw_spin_lock(&fbc->lock); |
| 100 | ret = fbc->count; | 100 | ret = fbc->count; |
| 101 | for_each_online_cpu(cpu) { | 101 | for_each_online_cpu(cpu) { |
| 102 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 102 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
| 103 | ret += *pcount; | 103 | ret += *pcount; |
| 104 | } | 104 | } |
| 105 | spin_unlock(&fbc->lock); | 105 | raw_spin_unlock(&fbc->lock); |
| 106 | return ret; | 106 | return ret; |
| 107 | } | 107 | } |
| 108 | EXPORT_SYMBOL(__percpu_counter_sum); | 108 | EXPORT_SYMBOL(__percpu_counter_sum); |
| @@ -110,7 +110,7 @@ EXPORT_SYMBOL(__percpu_counter_sum); | |||
| 110 | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, | 110 | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, |
| 111 | struct lock_class_key *key) | 111 | struct lock_class_key *key) |
| 112 | { | 112 | { |
| 113 | spin_lock_init(&fbc->lock); | 113 | raw_spin_lock_init(&fbc->lock); |
| 114 | lockdep_set_class(&fbc->lock, key); | 114 | lockdep_set_class(&fbc->lock, key); |
| 115 | fbc->count = amount; | 115 | fbc->count = amount; |
| 116 | fbc->counters = alloc_percpu(s32); | 116 | fbc->counters = alloc_percpu(s32); |
| @@ -173,11 +173,11 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, | |||
| 173 | s32 *pcount; | 173 | s32 *pcount; |
| 174 | unsigned long flags; | 174 | unsigned long flags; |
| 175 | 175 | ||
| 176 | spin_lock_irqsave(&fbc->lock, flags); | 176 | raw_spin_lock_irqsave(&fbc->lock, flags); |
| 177 | pcount = per_cpu_ptr(fbc->counters, cpu); | 177 | pcount = per_cpu_ptr(fbc->counters, cpu); |
| 178 | fbc->count += *pcount; | 178 | fbc->count += *pcount; |
| 179 | *pcount = 0; | 179 | *pcount = 0; |
| 180 | spin_unlock_irqrestore(&fbc->lock, flags); | 180 | raw_spin_unlock_irqrestore(&fbc->lock, flags); |
| 181 | } | 181 | } |
| 182 | mutex_unlock(&percpu_counters_lock); | 182 | mutex_unlock(&percpu_counters_lock); |
| 183 | #endif | 183 | #endif |
diff --git a/lib/proportions.c b/lib/proportions.c index d50746a79de2..05df84801b56 100644 --- a/lib/proportions.c +++ b/lib/proportions.c | |||
| @@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift) | |||
| 190 | 190 | ||
| 191 | int prop_local_init_percpu(struct prop_local_percpu *pl) | 191 | int prop_local_init_percpu(struct prop_local_percpu *pl) |
| 192 | { | 192 | { |
| 193 | spin_lock_init(&pl->lock); | 193 | raw_spin_lock_init(&pl->lock); |
| 194 | pl->shift = 0; | 194 | pl->shift = 0; |
| 195 | pl->period = 0; | 195 | pl->period = 0; |
| 196 | return percpu_counter_init(&pl->events, 0); | 196 | return percpu_counter_init(&pl->events, 0); |
| @@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl) | |||
| 226 | if (pl->period == global_period) | 226 | if (pl->period == global_period) |
| 227 | return; | 227 | return; |
| 228 | 228 | ||
| 229 | spin_lock_irqsave(&pl->lock, flags); | 229 | raw_spin_lock_irqsave(&pl->lock, flags); |
| 230 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); | 230 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); |
| 231 | 231 | ||
| 232 | /* | 232 | /* |
| @@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl) | |||
| 247 | percpu_counter_set(&pl->events, 0); | 247 | percpu_counter_set(&pl->events, 0); |
| 248 | 248 | ||
| 249 | pl->period = global_period; | 249 | pl->period = global_period; |
| 250 | spin_unlock_irqrestore(&pl->lock, flags); | 250 | raw_spin_unlock_irqrestore(&pl->lock, flags); |
| 251 | } | 251 | } |
| 252 | 252 | ||
| 253 | /* | 253 | /* |
| @@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd, | |||
| 324 | 324 | ||
| 325 | int prop_local_init_single(struct prop_local_single *pl) | 325 | int prop_local_init_single(struct prop_local_single *pl) |
| 326 | { | 326 | { |
| 327 | spin_lock_init(&pl->lock); | 327 | raw_spin_lock_init(&pl->lock); |
| 328 | pl->shift = 0; | 328 | pl->shift = 0; |
| 329 | pl->period = 0; | 329 | pl->period = 0; |
| 330 | pl->events = 0; | 330 | pl->events = 0; |
| @@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl) | |||
| 356 | if (pl->period == global_period) | 356 | if (pl->period == global_period) |
| 357 | return; | 357 | return; |
| 358 | 358 | ||
| 359 | spin_lock_irqsave(&pl->lock, flags); | 359 | raw_spin_lock_irqsave(&pl->lock, flags); |
| 360 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); | 360 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); |
| 361 | /* | 361 | /* |
| 362 | * For each missed period, we half the local counter. | 362 | * For each missed period, we half the local counter. |
| @@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl) | |||
| 367 | else | 367 | else |
| 368 | pl->events = 0; | 368 | pl->events = 0; |
| 369 | pl->period = global_period; | 369 | pl->period = global_period; |
| 370 | spin_unlock_irqrestore(&pl->lock, flags); | 370 | raw_spin_unlock_irqrestore(&pl->lock, flags); |
| 371 | } | 371 | } |
| 372 | 372 | ||
| 373 | /* | 373 | /* |
diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 027a03f4c56d..c96d500577de 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c | |||
| @@ -39,7 +39,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) | |||
| 39 | * in addition to the one that will be printed by | 39 | * in addition to the one that will be printed by |
| 40 | * the entity that is holding the lock already: | 40 | * the entity that is holding the lock already: |
| 41 | */ | 41 | */ |
| 42 | if (!spin_trylock_irqsave(&rs->lock, flags)) | 42 | if (!raw_spin_trylock_irqsave(&rs->lock, flags)) |
| 43 | return 0; | 43 | return 0; |
| 44 | 44 | ||
| 45 | if (!rs->begin) | 45 | if (!rs->begin) |
| @@ -60,7 +60,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) | |||
| 60 | rs->missed++; | 60 | rs->missed++; |
| 61 | ret = 0; | 61 | ret = 0; |
| 62 | } | 62 | } |
| 63 | spin_unlock_irqrestore(&rs->lock, flags); | 63 | raw_spin_unlock_irqrestore(&rs->lock, flags); |
| 64 | 64 | ||
| 65 | return ret; | 65 | return ret; |
| 66 | } | 66 | } |
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index ffc9fc7f3b05..f2393c21fe85 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c | |||
| @@ -22,9 +22,9 @@ int rwsem_is_locked(struct rw_semaphore *sem) | |||
| 22 | int ret = 1; | 22 | int ret = 1; |
| 23 | unsigned long flags; | 23 | unsigned long flags; |
| 24 | 24 | ||
| 25 | if (spin_trylock_irqsave(&sem->wait_lock, flags)) { | 25 | if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { |
| 26 | ret = (sem->activity != 0); | 26 | ret = (sem->activity != 0); |
| 27 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 27 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 28 | } | 28 | } |
| 29 | return ret; | 29 | return ret; |
| 30 | } | 30 | } |
| @@ -44,7 +44,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, | |||
| 44 | lockdep_init_map(&sem->dep_map, name, key, 0); | 44 | lockdep_init_map(&sem->dep_map, name, key, 0); |
| 45 | #endif | 45 | #endif |
| 46 | sem->activity = 0; | 46 | sem->activity = 0; |
| 47 | spin_lock_init(&sem->wait_lock); | 47 | raw_spin_lock_init(&sem->wait_lock); |
| 48 | INIT_LIST_HEAD(&sem->wait_list); | 48 | INIT_LIST_HEAD(&sem->wait_list); |
| 49 | } | 49 | } |
| 50 | EXPORT_SYMBOL(__init_rwsem); | 50 | EXPORT_SYMBOL(__init_rwsem); |
| @@ -145,12 +145,12 @@ void __sched __down_read(struct rw_semaphore *sem) | |||
| 145 | struct task_struct *tsk; | 145 | struct task_struct *tsk; |
| 146 | unsigned long flags; | 146 | unsigned long flags; |
| 147 | 147 | ||
| 148 | spin_lock_irqsave(&sem->wait_lock, flags); | 148 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 149 | 149 | ||
| 150 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 150 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { |
| 151 | /* granted */ | 151 | /* granted */ |
| 152 | sem->activity++; | 152 | sem->activity++; |
| 153 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 153 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 154 | goto out; | 154 | goto out; |
| 155 | } | 155 | } |
| 156 | 156 | ||
| @@ -165,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem) | |||
| 165 | list_add_tail(&waiter.list, &sem->wait_list); | 165 | list_add_tail(&waiter.list, &sem->wait_list); |
| 166 | 166 | ||
| 167 | /* we don't need to touch the semaphore struct anymore */ | 167 | /* we don't need to touch the semaphore struct anymore */ |
| 168 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 168 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 169 | 169 | ||
| 170 | /* wait to be given the lock */ | 170 | /* wait to be given the lock */ |
| 171 | for (;;) { | 171 | for (;;) { |
| @@ -189,7 +189,7 @@ int __down_read_trylock(struct rw_semaphore *sem) | |||
| 189 | int ret = 0; | 189 | int ret = 0; |
| 190 | 190 | ||
| 191 | 191 | ||
| 192 | spin_lock_irqsave(&sem->wait_lock, flags); | 192 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 193 | 193 | ||
| 194 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 194 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { |
| 195 | /* granted */ | 195 | /* granted */ |
| @@ -197,7 +197,7 @@ int __down_read_trylock(struct rw_semaphore *sem) | |||
| 197 | ret = 1; | 197 | ret = 1; |
| 198 | } | 198 | } |
| 199 | 199 | ||
| 200 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 200 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 201 | 201 | ||
| 202 | return ret; | 202 | return ret; |
| 203 | } | 203 | } |
| @@ -212,12 +212,12 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
| 212 | struct task_struct *tsk; | 212 | struct task_struct *tsk; |
| 213 | unsigned long flags; | 213 | unsigned long flags; |
| 214 | 214 | ||
| 215 | spin_lock_irqsave(&sem->wait_lock, flags); | 215 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 216 | 216 | ||
| 217 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 217 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
| 218 | /* granted */ | 218 | /* granted */ |
| 219 | sem->activity = -1; | 219 | sem->activity = -1; |
| 220 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 220 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 221 | goto out; | 221 | goto out; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| @@ -232,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
| 232 | list_add_tail(&waiter.list, &sem->wait_list); | 232 | list_add_tail(&waiter.list, &sem->wait_list); |
| 233 | 233 | ||
| 234 | /* we don't need to touch the semaphore struct anymore */ | 234 | /* we don't need to touch the semaphore struct anymore */ |
| 235 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 235 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 236 | 236 | ||
| 237 | /* wait to be given the lock */ | 237 | /* wait to be given the lock */ |
| 238 | for (;;) { | 238 | for (;;) { |
| @@ -260,7 +260,7 @@ int __down_write_trylock(struct rw_semaphore *sem) | |||
| 260 | unsigned long flags; | 260 | unsigned long flags; |
| 261 | int ret = 0; | 261 | int ret = 0; |
| 262 | 262 | ||
| 263 | spin_lock_irqsave(&sem->wait_lock, flags); | 263 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 264 | 264 | ||
| 265 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 265 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
| 266 | /* granted */ | 266 | /* granted */ |
| @@ -268,7 +268,7 @@ int __down_write_trylock(struct rw_semaphore *sem) | |||
| 268 | ret = 1; | 268 | ret = 1; |
| 269 | } | 269 | } |
| 270 | 270 | ||
| 271 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 271 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 272 | 272 | ||
| 273 | return ret; | 273 | return ret; |
| 274 | } | 274 | } |
| @@ -280,12 +280,12 @@ void __up_read(struct rw_semaphore *sem) | |||
| 280 | { | 280 | { |
| 281 | unsigned long flags; | 281 | unsigned long flags; |
| 282 | 282 | ||
| 283 | spin_lock_irqsave(&sem->wait_lock, flags); | 283 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 284 | 284 | ||
| 285 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) | 285 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) |
| 286 | sem = __rwsem_wake_one_writer(sem); | 286 | sem = __rwsem_wake_one_writer(sem); |
| 287 | 287 | ||
| 288 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 288 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 289 | } | 289 | } |
| 290 | 290 | ||
| 291 | /* | 291 | /* |
| @@ -295,13 +295,13 @@ void __up_write(struct rw_semaphore *sem) | |||
| 295 | { | 295 | { |
| 296 | unsigned long flags; | 296 | unsigned long flags; |
| 297 | 297 | ||
| 298 | spin_lock_irqsave(&sem->wait_lock, flags); | 298 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 299 | 299 | ||
| 300 | sem->activity = 0; | 300 | sem->activity = 0; |
| 301 | if (!list_empty(&sem->wait_list)) | 301 | if (!list_empty(&sem->wait_list)) |
| 302 | sem = __rwsem_do_wake(sem, 1); | 302 | sem = __rwsem_do_wake(sem, 1); |
| 303 | 303 | ||
| 304 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 304 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 305 | } | 305 | } |
| 306 | 306 | ||
| 307 | /* | 307 | /* |
| @@ -312,12 +312,12 @@ void __downgrade_write(struct rw_semaphore *sem) | |||
| 312 | { | 312 | { |
| 313 | unsigned long flags; | 313 | unsigned long flags; |
| 314 | 314 | ||
| 315 | spin_lock_irqsave(&sem->wait_lock, flags); | 315 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 316 | 316 | ||
| 317 | sem->activity = 1; | 317 | sem->activity = 1; |
| 318 | if (!list_empty(&sem->wait_list)) | 318 | if (!list_empty(&sem->wait_list)) |
| 319 | sem = __rwsem_do_wake(sem, 0); | 319 | sem = __rwsem_do_wake(sem, 0); |
| 320 | 320 | ||
| 321 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 321 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 322 | } | 322 | } |
| 323 | 323 | ||
diff --git a/lib/rwsem.c b/lib/rwsem.c index aa7c3052261f..410aa1189b13 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
| @@ -22,7 +22,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, | |||
| 22 | lockdep_init_map(&sem->dep_map, name, key, 0); | 22 | lockdep_init_map(&sem->dep_map, name, key, 0); |
| 23 | #endif | 23 | #endif |
| 24 | sem->count = RWSEM_UNLOCKED_VALUE; | 24 | sem->count = RWSEM_UNLOCKED_VALUE; |
| 25 | spin_lock_init(&sem->wait_lock); | 25 | raw_spin_lock_init(&sem->wait_lock); |
| 26 | INIT_LIST_HEAD(&sem->wait_list); | 26 | INIT_LIST_HEAD(&sem->wait_list); |
| 27 | } | 27 | } |
| 28 | 28 | ||
| @@ -180,7 +180,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem, | |||
| 180 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 180 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
| 181 | 181 | ||
| 182 | /* set up my own style of waitqueue */ | 182 | /* set up my own style of waitqueue */ |
| 183 | spin_lock_irq(&sem->wait_lock); | 183 | raw_spin_lock_irq(&sem->wait_lock); |
| 184 | waiter.task = tsk; | 184 | waiter.task = tsk; |
| 185 | waiter.flags = flags; | 185 | waiter.flags = flags; |
| 186 | get_task_struct(tsk); | 186 | get_task_struct(tsk); |
| @@ -204,7 +204,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem, | |||
| 204 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) | 204 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) |
| 205 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | 205 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); |
| 206 | 206 | ||
| 207 | spin_unlock_irq(&sem->wait_lock); | 207 | raw_spin_unlock_irq(&sem->wait_lock); |
| 208 | 208 | ||
| 209 | /* wait to be given the lock */ | 209 | /* wait to be given the lock */ |
| 210 | for (;;) { | 210 | for (;;) { |
| @@ -245,13 +245,13 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) | |||
| 245 | { | 245 | { |
| 246 | unsigned long flags; | 246 | unsigned long flags; |
| 247 | 247 | ||
| 248 | spin_lock_irqsave(&sem->wait_lock, flags); | 248 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 249 | 249 | ||
| 250 | /* do nothing if list empty */ | 250 | /* do nothing if list empty */ |
| 251 | if (!list_empty(&sem->wait_list)) | 251 | if (!list_empty(&sem->wait_list)) |
| 252 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); | 252 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); |
| 253 | 253 | ||
| 254 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 254 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 255 | 255 | ||
| 256 | return sem; | 256 | return sem; |
| 257 | } | 257 | } |
| @@ -265,13 +265,13 @@ struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) | |||
| 265 | { | 265 | { |
| 266 | unsigned long flags; | 266 | unsigned long flags; |
| 267 | 267 | ||
| 268 | spin_lock_irqsave(&sem->wait_lock, flags); | 268 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 269 | 269 | ||
| 270 | /* do nothing if list empty */ | 270 | /* do nothing if list empty */ |
| 271 | if (!list_empty(&sem->wait_list)) | 271 | if (!list_empty(&sem->wait_list)) |
| 272 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | 272 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); |
| 273 | 273 | ||
| 274 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 274 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 275 | 275 | ||
| 276 | return sem; | 276 | return sem; |
| 277 | } | 277 | } |
