diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-07-03 09:44:46 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-09-13 05:12:14 -0400 |
commit | bd31b85960a7fcb2d7ede216460b8da71a88411c (patch) | |
tree | f2ab1a1105705856c5cdfc71bcf3f7b5f897d30d /arch/arm/mm/context.c | |
parent | a1741e7fcbc19a67520115df480ab17012cc3d0b (diff) |
locking, ARM: Annotate low level hw locks as raw
Annotate the low level hardware locks which must not be preempted.
In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/arm/mm/context.c')
-rw-r--r-- | arch/arm/mm/context.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index b0ee9ba3cfab..93aac068da94 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
17 | #include <asm/tlbflush.h> | 17 | #include <asm/tlbflush.h> |
18 | 18 | ||
19 | static DEFINE_SPINLOCK(cpu_asid_lock); | 19 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
20 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; | 20 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; |
21 | #ifdef CONFIG_SMP | 21 | #ifdef CONFIG_SMP |
22 | DEFINE_PER_CPU(struct mm_struct *, current_mm); | 22 | DEFINE_PER_CPU(struct mm_struct *, current_mm); |
@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm); | |||
31 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 31 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
32 | { | 32 | { |
33 | mm->context.id = 0; | 33 | mm->context.id = 0; |
34 | spin_lock_init(&mm->context.id_lock); | 34 | raw_spin_lock_init(&mm->context.id_lock); |
35 | } | 35 | } |
36 | 36 | ||
37 | static void flush_context(void) | 37 | static void flush_context(void) |
@@ -58,7 +58,7 @@ static void set_mm_context(struct mm_struct *mm, unsigned int asid) | |||
58 | * the broadcast. This function is also called via IPI so the | 58 | * the broadcast. This function is also called via IPI so the |
59 | * mm->context.id_lock has to be IRQ-safe. | 59 | * mm->context.id_lock has to be IRQ-safe. |
60 | */ | 60 | */ |
61 | spin_lock_irqsave(&mm->context.id_lock, flags); | 61 | raw_spin_lock_irqsave(&mm->context.id_lock, flags); |
62 | if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { | 62 | if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { |
63 | /* | 63 | /* |
64 | * Old version of ASID found. Set the new one and | 64 | * Old version of ASID found. Set the new one and |
@@ -67,7 +67,7 @@ static void set_mm_context(struct mm_struct *mm, unsigned int asid) | |||
67 | mm->context.id = asid; | 67 | mm->context.id = asid; |
68 | cpumask_clear(mm_cpumask(mm)); | 68 | cpumask_clear(mm_cpumask(mm)); |
69 | } | 69 | } |
70 | spin_unlock_irqrestore(&mm->context.id_lock, flags); | 70 | raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * Set the mm_cpumask(mm) bit for the current CPU. | 73 | * Set the mm_cpumask(mm) bit for the current CPU. |
@@ -117,7 +117,7 @@ void __new_context(struct mm_struct *mm) | |||
117 | { | 117 | { |
118 | unsigned int asid; | 118 | unsigned int asid; |
119 | 119 | ||
120 | spin_lock(&cpu_asid_lock); | 120 | raw_spin_lock(&cpu_asid_lock); |
121 | #ifdef CONFIG_SMP | 121 | #ifdef CONFIG_SMP |
122 | /* | 122 | /* |
123 | * Check the ASID again, in case the change was broadcast from | 123 | * Check the ASID again, in case the change was broadcast from |
@@ -125,7 +125,7 @@ void __new_context(struct mm_struct *mm) | |||
125 | */ | 125 | */ |
126 | if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { | 126 | if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { |
127 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | 127 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
128 | spin_unlock(&cpu_asid_lock); | 128 | raw_spin_unlock(&cpu_asid_lock); |
129 | return; | 129 | return; |
130 | } | 130 | } |
131 | #endif | 131 | #endif |
@@ -153,5 +153,5 @@ void __new_context(struct mm_struct *mm) | |||
153 | } | 153 | } |
154 | 154 | ||
155 | set_mm_context(mm, asid); | 155 | set_mm_context(mm, asid); |
156 | spin_unlock(&cpu_asid_lock); | 156 | raw_spin_unlock(&cpu_asid_lock); |
157 | } | 157 | } |