aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-07-03 09:44:46 -0400
committerIngo Molnar <mingo@elte.hu>2011-09-13 05:12:14 -0400
commitbd31b85960a7fcb2d7ede216460b8da71a88411c (patch)
treef2ab1a1105705856c5cdfc71bcf3f7b5f897d30d /arch/arm/mm
parenta1741e7fcbc19a67520115df480ab17012cc3d0b (diff)
locking, ARM: Annotate low level hw locks as raw
Annotate the low level hardware locks which must not be preempted. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/cache-l2x0.c46
-rw-r--r--arch/arm/mm/context.c14
-rw-r--r--arch/arm/mm/copypage-v4mc.c6
-rw-r--r--arch/arm/mm/copypage-v6.c10
-rw-r--r--arch/arm/mm/copypage-xscale.c6
5 files changed, 41 insertions, 41 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 9ecfdb511951..3255c51e3e35 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -26,7 +26,7 @@
26#define CACHE_LINE_SIZE 32 26#define CACHE_LINE_SIZE 32
27 27
28static void __iomem *l2x0_base; 28static void __iomem *l2x0_base;
29static DEFINE_SPINLOCK(l2x0_lock); 29static DEFINE_RAW_SPINLOCK(l2x0_lock);
30static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 30static uint32_t l2x0_way_mask; /* Bitmask of active ways */
31static uint32_t l2x0_size; 31static uint32_t l2x0_size;
32 32
@@ -115,9 +115,9 @@ static void l2x0_cache_sync(void)
115{ 115{
116 unsigned long flags; 116 unsigned long flags;
117 117
118 spin_lock_irqsave(&l2x0_lock, flags); 118 raw_spin_lock_irqsave(&l2x0_lock, flags);
119 cache_sync(); 119 cache_sync();
120 spin_unlock_irqrestore(&l2x0_lock, flags); 120 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
121} 121}
122 122
123static void __l2x0_flush_all(void) 123static void __l2x0_flush_all(void)
@@ -134,9 +134,9 @@ static void l2x0_flush_all(void)
134 unsigned long flags; 134 unsigned long flags;
135 135
136 /* clean all ways */ 136 /* clean all ways */
137 spin_lock_irqsave(&l2x0_lock, flags); 137 raw_spin_lock_irqsave(&l2x0_lock, flags);
138 __l2x0_flush_all(); 138 __l2x0_flush_all();
139 spin_unlock_irqrestore(&l2x0_lock, flags); 139 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
140} 140}
141 141
142static void l2x0_clean_all(void) 142static void l2x0_clean_all(void)
@@ -144,11 +144,11 @@ static void l2x0_clean_all(void)
144 unsigned long flags; 144 unsigned long flags;
145 145
146 /* clean all ways */ 146 /* clean all ways */
147 spin_lock_irqsave(&l2x0_lock, flags); 147 raw_spin_lock_irqsave(&l2x0_lock, flags);
148 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); 148 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
149 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); 149 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
150 cache_sync(); 150 cache_sync();
151 spin_unlock_irqrestore(&l2x0_lock, flags); 151 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
152} 152}
153 153
154static void l2x0_inv_all(void) 154static void l2x0_inv_all(void)
@@ -156,13 +156,13 @@ static void l2x0_inv_all(void)
156 unsigned long flags; 156 unsigned long flags;
157 157
158 /* invalidate all ways */ 158 /* invalidate all ways */
159 spin_lock_irqsave(&l2x0_lock, flags); 159 raw_spin_lock_irqsave(&l2x0_lock, flags);
160 /* Invalidating when L2 is enabled is a nono */ 160 /* Invalidating when L2 is enabled is a nono */
161 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); 161 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
162 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 162 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
163 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 163 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
164 cache_sync(); 164 cache_sync();
165 spin_unlock_irqrestore(&l2x0_lock, flags); 165 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
166} 166}
167 167
168static void l2x0_inv_range(unsigned long start, unsigned long end) 168static void l2x0_inv_range(unsigned long start, unsigned long end)
@@ -170,7 +170,7 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
170 void __iomem *base = l2x0_base; 170 void __iomem *base = l2x0_base;
171 unsigned long flags; 171 unsigned long flags;
172 172
173 spin_lock_irqsave(&l2x0_lock, flags); 173 raw_spin_lock_irqsave(&l2x0_lock, flags);
174 if (start & (CACHE_LINE_SIZE - 1)) { 174 if (start & (CACHE_LINE_SIZE - 1)) {
175 start &= ~(CACHE_LINE_SIZE - 1); 175 start &= ~(CACHE_LINE_SIZE - 1);
176 debug_writel(0x03); 176 debug_writel(0x03);
@@ -195,13 +195,13 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
195 } 195 }
196 196
197 if (blk_end < end) { 197 if (blk_end < end) {
198 spin_unlock_irqrestore(&l2x0_lock, flags); 198 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
199 spin_lock_irqsave(&l2x0_lock, flags); 199 raw_spin_lock_irqsave(&l2x0_lock, flags);
200 } 200 }
201 } 201 }
202 cache_wait(base + L2X0_INV_LINE_PA, 1); 202 cache_wait(base + L2X0_INV_LINE_PA, 1);
203 cache_sync(); 203 cache_sync();
204 spin_unlock_irqrestore(&l2x0_lock, flags); 204 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
205} 205}
206 206
207static void l2x0_clean_range(unsigned long start, unsigned long end) 207static void l2x0_clean_range(unsigned long start, unsigned long end)
@@ -214,7 +214,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
214 return; 214 return;
215 } 215 }
216 216
217 spin_lock_irqsave(&l2x0_lock, flags); 217 raw_spin_lock_irqsave(&l2x0_lock, flags);
218 start &= ~(CACHE_LINE_SIZE - 1); 218 start &= ~(CACHE_LINE_SIZE - 1);
219 while (start < end) { 219 while (start < end) {
220 unsigned long blk_end = start + min(end - start, 4096UL); 220 unsigned long blk_end = start + min(end - start, 4096UL);
@@ -225,13 +225,13 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
225 } 225 }
226 226
227 if (blk_end < end) { 227 if (blk_end < end) {
228 spin_unlock_irqrestore(&l2x0_lock, flags); 228 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
229 spin_lock_irqsave(&l2x0_lock, flags); 229 raw_spin_lock_irqsave(&l2x0_lock, flags);
230 } 230 }
231 } 231 }
232 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 232 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
233 cache_sync(); 233 cache_sync();
234 spin_unlock_irqrestore(&l2x0_lock, flags); 234 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
235} 235}
236 236
237static void l2x0_flush_range(unsigned long start, unsigned long end) 237static void l2x0_flush_range(unsigned long start, unsigned long end)
@@ -244,7 +244,7 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
244 return; 244 return;
245 } 245 }
246 246
247 spin_lock_irqsave(&l2x0_lock, flags); 247 raw_spin_lock_irqsave(&l2x0_lock, flags);
248 start &= ~(CACHE_LINE_SIZE - 1); 248 start &= ~(CACHE_LINE_SIZE - 1);
249 while (start < end) { 249 while (start < end) {
250 unsigned long blk_end = start + min(end - start, 4096UL); 250 unsigned long blk_end = start + min(end - start, 4096UL);
@@ -257,24 +257,24 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
257 debug_writel(0x00); 257 debug_writel(0x00);
258 258
259 if (blk_end < end) { 259 if (blk_end < end) {
260 spin_unlock_irqrestore(&l2x0_lock, flags); 260 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
261 spin_lock_irqsave(&l2x0_lock, flags); 261 raw_spin_lock_irqsave(&l2x0_lock, flags);
262 } 262 }
263 } 263 }
264 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 264 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
265 cache_sync(); 265 cache_sync();
266 spin_unlock_irqrestore(&l2x0_lock, flags); 266 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
267} 267}
268 268
269static void l2x0_disable(void) 269static void l2x0_disable(void)
270{ 270{
271 unsigned long flags; 271 unsigned long flags;
272 272
273 spin_lock_irqsave(&l2x0_lock, flags); 273 raw_spin_lock_irqsave(&l2x0_lock, flags);
274 __l2x0_flush_all(); 274 __l2x0_flush_all();
275 writel_relaxed(0, l2x0_base + L2X0_CTRL); 275 writel_relaxed(0, l2x0_base + L2X0_CTRL);
276 dsb(); 276 dsb();
277 spin_unlock_irqrestore(&l2x0_lock, flags); 277 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
278} 278}
279 279
280static void __init l2x0_unlock(__u32 cache_id) 280static void __init l2x0_unlock(__u32 cache_id)
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index b0ee9ba3cfab..93aac068da94 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -16,7 +16,7 @@
16#include <asm/mmu_context.h> 16#include <asm/mmu_context.h>
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
18 18
19static DEFINE_SPINLOCK(cpu_asid_lock); 19static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
20unsigned int cpu_last_asid = ASID_FIRST_VERSION; 20unsigned int cpu_last_asid = ASID_FIRST_VERSION;
21#ifdef CONFIG_SMP 21#ifdef CONFIG_SMP
22DEFINE_PER_CPU(struct mm_struct *, current_mm); 22DEFINE_PER_CPU(struct mm_struct *, current_mm);
@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
31void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 31void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
32{ 32{
33 mm->context.id = 0; 33 mm->context.id = 0;
34 spin_lock_init(&mm->context.id_lock); 34 raw_spin_lock_init(&mm->context.id_lock);
35} 35}
36 36
37static void flush_context(void) 37static void flush_context(void)
@@ -58,7 +58,7 @@ static void set_mm_context(struct mm_struct *mm, unsigned int asid)
58 * the broadcast. This function is also called via IPI so the 58 * the broadcast. This function is also called via IPI so the
59 * mm->context.id_lock has to be IRQ-safe. 59 * mm->context.id_lock has to be IRQ-safe.
60 */ 60 */
61 spin_lock_irqsave(&mm->context.id_lock, flags); 61 raw_spin_lock_irqsave(&mm->context.id_lock, flags);
62 if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { 62 if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
63 /* 63 /*
64 * Old version of ASID found. Set the new one and 64 * Old version of ASID found. Set the new one and
@@ -67,7 +67,7 @@ static void set_mm_context(struct mm_struct *mm, unsigned int asid)
67 mm->context.id = asid; 67 mm->context.id = asid;
68 cpumask_clear(mm_cpumask(mm)); 68 cpumask_clear(mm_cpumask(mm));
69 } 69 }
70 spin_unlock_irqrestore(&mm->context.id_lock, flags); 70 raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
71 71
72 /* 72 /*
73 * Set the mm_cpumask(mm) bit for the current CPU. 73 * Set the mm_cpumask(mm) bit for the current CPU.
@@ -117,7 +117,7 @@ void __new_context(struct mm_struct *mm)
117{ 117{
118 unsigned int asid; 118 unsigned int asid;
119 119
120 spin_lock(&cpu_asid_lock); 120 raw_spin_lock(&cpu_asid_lock);
121#ifdef CONFIG_SMP 121#ifdef CONFIG_SMP
122 /* 122 /*
123 * Check the ASID again, in case the change was broadcast from 123 * Check the ASID again, in case the change was broadcast from
@@ -125,7 +125,7 @@ void __new_context(struct mm_struct *mm)
125 */ 125 */
126 if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { 126 if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
127 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 127 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
128 spin_unlock(&cpu_asid_lock); 128 raw_spin_unlock(&cpu_asid_lock);
129 return; 129 return;
130 } 130 }
131#endif 131#endif
@@ -153,5 +153,5 @@ void __new_context(struct mm_struct *mm)
153 } 153 }
154 154
155 set_mm_context(mm, asid); 155 set_mm_context(mm, asid);
156 spin_unlock(&cpu_asid_lock); 156 raw_spin_unlock(&cpu_asid_lock);
157} 157}
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index b8061519ce77..7d0a8c230342 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -30,7 +30,7 @@
30#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 30#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
31 L_PTE_MT_MINICACHE) 31 L_PTE_MT_MINICACHE)
32 32
33static DEFINE_SPINLOCK(minicache_lock); 33static DEFINE_RAW_SPINLOCK(minicache_lock);
34 34
35/* 35/*
36 * ARMv4 mini-dcache optimised copy_user_highpage 36 * ARMv4 mini-dcache optimised copy_user_highpage
@@ -76,14 +76,14 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
76 if (!test_and_set_bit(PG_dcache_clean, &from->flags)) 76 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
77 __flush_dcache_page(page_mapping(from), from); 77 __flush_dcache_page(page_mapping(from), from);
78 78
79 spin_lock(&minicache_lock); 79 raw_spin_lock(&minicache_lock);
80 80
81 set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); 81 set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
82 flush_tlb_kernel_page(0xffff8000); 82 flush_tlb_kernel_page(0xffff8000);
83 83
84 mc_copy_user_page((void *)0xffff8000, kto); 84 mc_copy_user_page((void *)0xffff8000, kto);
85 85
86 spin_unlock(&minicache_lock); 86 raw_spin_unlock(&minicache_lock);
87 87
88 kunmap_atomic(kto, KM_USER1); 88 kunmap_atomic(kto, KM_USER1);
89} 89}
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 63cca0097130..3d9a1552cef6 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -27,7 +27,7 @@
27#define from_address (0xffff8000) 27#define from_address (0xffff8000)
28#define to_address (0xffffc000) 28#define to_address (0xffffc000)
29 29
30static DEFINE_SPINLOCK(v6_lock); 30static DEFINE_RAW_SPINLOCK(v6_lock);
31 31
32/* 32/*
33 * Copy the user page. No aliasing to deal with so we can just 33 * Copy the user page. No aliasing to deal with so we can just
@@ -88,7 +88,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
88 * Now copy the page using the same cache colour as the 88 * Now copy the page using the same cache colour as the
89 * pages ultimate destination. 89 * pages ultimate destination.
90 */ 90 */
91 spin_lock(&v6_lock); 91 raw_spin_lock(&v6_lock);
92 92
93 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); 93 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
94 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); 94 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
@@ -101,7 +101,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
101 101
102 copy_page((void *)kto, (void *)kfrom); 102 copy_page((void *)kto, (void *)kfrom);
103 103
104 spin_unlock(&v6_lock); 104 raw_spin_unlock(&v6_lock);
105} 105}
106 106
107/* 107/*
@@ -121,13 +121,13 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad
121 * Now clear the page using the same cache colour as 121 * Now clear the page using the same cache colour as
122 * the pages ultimate destination. 122 * the pages ultimate destination.
123 */ 123 */
124 spin_lock(&v6_lock); 124 raw_spin_lock(&v6_lock);
125 125
126 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); 126 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
127 flush_tlb_kernel_page(to); 127 flush_tlb_kernel_page(to);
128 clear_page((void *)to); 128 clear_page((void *)to);
129 129
130 spin_unlock(&v6_lock); 130 raw_spin_unlock(&v6_lock);
131} 131}
132 132
133struct cpu_user_fns v6_user_fns __initdata = { 133struct cpu_user_fns v6_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 649bbcd325bf..610c24ced310 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -32,7 +32,7 @@
32#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 32#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
33 L_PTE_MT_MINICACHE) 33 L_PTE_MT_MINICACHE)
34 34
35static DEFINE_SPINLOCK(minicache_lock); 35static DEFINE_RAW_SPINLOCK(minicache_lock);
36 36
37/* 37/*
38 * XScale mini-dcache optimised copy_user_highpage 38 * XScale mini-dcache optimised copy_user_highpage
@@ -98,14 +98,14 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
98 if (!test_and_set_bit(PG_dcache_clean, &from->flags)) 98 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
99 __flush_dcache_page(page_mapping(from), from); 99 __flush_dcache_page(page_mapping(from), from);
100 100
101 spin_lock(&minicache_lock); 101 raw_spin_lock(&minicache_lock);
102 102
103 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); 103 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
104 flush_tlb_kernel_page(COPYPAGE_MINICACHE); 104 flush_tlb_kernel_page(COPYPAGE_MINICACHE);
105 105
106 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); 106 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
107 107
108 spin_unlock(&minicache_lock); 108 raw_spin_unlock(&minicache_lock);
109 109
110 kunmap_atomic(kto, KM_USER1); 110 kunmap_atomic(kto, KM_USER1);
111} 111}