aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/assembler.h13
-rw-r--r--arch/arm/include/asm/atomic.h61
-rw-r--r--arch/arm/include/asm/cache.h16
-rw-r--r--arch/arm/include/asm/flat.h3
-rw-r--r--arch/arm/include/asm/page.h7
-rw-r--r--arch/arm/include/asm/system.h176
6 files changed, 257 insertions, 19 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 6116e4893c0a..15f8a092b700 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -114,3 +114,16 @@
114 .align 3; \ 114 .align 3; \
115 .long 9999b,9001f; \ 115 .long 9999b,9001f; \
116 .previous 116 .previous
117
118/*
119 * SMP data memory barrier
120 */
121 .macro smp_dmb
122#ifdef CONFIG_SMP
123#if __LINUX_ARM_ARCH__ >= 7
124 dmb
125#elif __LINUX_ARM_ARCH__ == 6
126 mcr p15, 0, r0, c7, c10, 5 @ dmb
127#endif
128#endif
129 .endm
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index ee99723b3a6c..16b52f397983 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -44,11 +44,29 @@ static inline void atomic_set(atomic_t *v, int i)
44 : "cc"); 44 : "cc");
45} 45}
46 46
47static inline void atomic_add(int i, atomic_t *v)
48{
49 unsigned long tmp;
50 int result;
51
52 __asm__ __volatile__("@ atomic_add\n"
53"1: ldrex %0, [%2]\n"
54" add %0, %0, %3\n"
55" strex %1, %0, [%2]\n"
56" teq %1, #0\n"
57" bne 1b"
58 : "=&r" (result), "=&r" (tmp)
59 : "r" (&v->counter), "Ir" (i)
60 : "cc");
61}
62
47static inline int atomic_add_return(int i, atomic_t *v) 63static inline int atomic_add_return(int i, atomic_t *v)
48{ 64{
49 unsigned long tmp; 65 unsigned long tmp;
50 int result; 66 int result;
51 67
68 smp_mb();
69
52 __asm__ __volatile__("@ atomic_add_return\n" 70 __asm__ __volatile__("@ atomic_add_return\n"
53"1: ldrex %0, [%2]\n" 71"1: ldrex %0, [%2]\n"
54" add %0, %0, %3\n" 72" add %0, %0, %3\n"
@@ -59,14 +77,34 @@ static inline int atomic_add_return(int i, atomic_t *v)
59 : "r" (&v->counter), "Ir" (i) 77 : "r" (&v->counter), "Ir" (i)
60 : "cc"); 78 : "cc");
61 79
80 smp_mb();
81
62 return result; 82 return result;
63} 83}
64 84
85static inline void atomic_sub(int i, atomic_t *v)
86{
87 unsigned long tmp;
88 int result;
89
90 __asm__ __volatile__("@ atomic_sub\n"
91"1: ldrex %0, [%2]\n"
92" sub %0, %0, %3\n"
93" strex %1, %0, [%2]\n"
94" teq %1, #0\n"
95" bne 1b"
96 : "=&r" (result), "=&r" (tmp)
97 : "r" (&v->counter), "Ir" (i)
98 : "cc");
99}
100
65static inline int atomic_sub_return(int i, atomic_t *v) 101static inline int atomic_sub_return(int i, atomic_t *v)
66{ 102{
67 unsigned long tmp; 103 unsigned long tmp;
68 int result; 104 int result;
69 105
106 smp_mb();
107
70 __asm__ __volatile__("@ atomic_sub_return\n" 108 __asm__ __volatile__("@ atomic_sub_return\n"
71"1: ldrex %0, [%2]\n" 109"1: ldrex %0, [%2]\n"
72" sub %0, %0, %3\n" 110" sub %0, %0, %3\n"
@@ -77,6 +115,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
77 : "r" (&v->counter), "Ir" (i) 115 : "r" (&v->counter), "Ir" (i)
78 : "cc"); 116 : "cc");
79 117
118 smp_mb();
119
80 return result; 120 return result;
81} 121}
82 122
@@ -84,6 +124,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
84{ 124{
85 unsigned long oldval, res; 125 unsigned long oldval, res;
86 126
127 smp_mb();
128
87 do { 129 do {
88 __asm__ __volatile__("@ atomic_cmpxchg\n" 130 __asm__ __volatile__("@ atomic_cmpxchg\n"
89 "ldrex %1, [%2]\n" 131 "ldrex %1, [%2]\n"
@@ -95,6 +137,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
95 : "cc"); 137 : "cc");
96 } while (res); 138 } while (res);
97 139
140 smp_mb();
141
98 return oldval; 142 return oldval;
99} 143}
100 144
@@ -135,6 +179,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
135 179
136 return val; 180 return val;
137} 181}
182#define atomic_add(i, v) (void) atomic_add_return(i, v)
138 183
139static inline int atomic_sub_return(int i, atomic_t *v) 184static inline int atomic_sub_return(int i, atomic_t *v)
140{ 185{
@@ -148,6 +193,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
148 193
149 return val; 194 return val;
150} 195}
196#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
151 197
152static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 198static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
153{ 199{
@@ -187,10 +233,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
187} 233}
188#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 234#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
189 235
190#define atomic_add(i, v) (void) atomic_add_return(i, v) 236#define atomic_inc(v) atomic_add(1, v)
191#define atomic_inc(v) (void) atomic_add_return(1, v) 237#define atomic_dec(v) atomic_sub(1, v)
192#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
193#define atomic_dec(v) (void) atomic_sub_return(1, v)
194 238
195#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) 239#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
196#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) 240#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
@@ -200,11 +244,10 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
200 244
201#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) 245#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
202 246
203/* Atomic operations are already serializing on ARM */ 247#define smp_mb__before_atomic_dec() smp_mb()
204#define smp_mb__before_atomic_dec() barrier() 248#define smp_mb__after_atomic_dec() smp_mb()
205#define smp_mb__after_atomic_dec() barrier() 249#define smp_mb__before_atomic_inc() smp_mb()
206#define smp_mb__before_atomic_inc() barrier() 250#define smp_mb__after_atomic_inc() smp_mb()
207#define smp_mb__after_atomic_inc() barrier()
208 251
209#include <asm-generic/atomic.h> 252#include <asm-generic/atomic.h>
210#endif 253#endif
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index cb7a9e97fd7e..feaa75f0013e 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -7,4 +7,20 @@
7#define L1_CACHE_SHIFT 5 7#define L1_CACHE_SHIFT 5
8#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 8#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9 9
10/*
11 * Memory returned by kmalloc() may be used for DMA, so we must make
12 * sure that all such allocations are cache aligned. Otherwise,
13 * unrelated code may cause parts of the buffer to be read into the
14 * cache before the transfer is done, causing old data to be seen by
15 * the CPU.
16 */
17#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
18
19/*
20 * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
21 */
22#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
23#define ARCH_SLAB_MINALIGN 8
24#endif
25
10#endif 26#endif
diff --git a/arch/arm/include/asm/flat.h b/arch/arm/include/asm/flat.h
index 1d77e51907f6..59426a4595c9 100644
--- a/arch/arm/include/asm/flat.h
+++ b/arch/arm/include/asm/flat.h
@@ -5,9 +5,6 @@
5#ifndef __ARM_FLAT_H__ 5#ifndef __ARM_FLAT_H__
6#define __ARM_FLAT_H__ 6#define __ARM_FLAT_H__
7 7
8/* An odd number of words will be pushed after this alignment, so
9 deliberately misalign the value. */
10#define flat_stack_align(sp) sp = (void *)(((unsigned long)(sp) - 4) | 4)
11#define flat_argvp_envp_on_stack() 1 8#define flat_argvp_envp_on_stack() 1
12#define flat_old_ram_flag(flags) (flags) 9#define flat_old_ram_flag(flags) (flags)
13#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 10#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index e6eb8a67b807..7b522770f29d 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -202,13 +202,6 @@ typedef struct page *pgtable_t;
202 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ 202 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
203 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 203 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
204 204
205/*
206 * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
207 */
208#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
209#define ARCH_SLAB_MINALIGN 8
210#endif
211
212#include <asm-generic/page.h> 205#include <asm-generic/page.h>
213 206
214#endif 207#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index bd4dc8ed53d5..d65b2f5bf41f 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -248,6 +248,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
248 unsigned int tmp; 248 unsigned int tmp;
249#endif 249#endif
250 250
251 smp_mb();
252
251 switch (size) { 253 switch (size) {
252#if __LINUX_ARM_ARCH__ >= 6 254#if __LINUX_ARM_ARCH__ >= 6
253 case 1: 255 case 1:
@@ -307,6 +309,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
307 __bad_xchg(ptr, size), ret = 0; 309 __bad_xchg(ptr, size), ret = 0;
308 break; 310 break;
309 } 311 }
312 smp_mb();
310 313
311 return ret; 314 return ret;
312} 315}
@@ -316,6 +319,12 @@ extern void enable_hlt(void);
316 319
317#include <asm-generic/cmpxchg-local.h> 320#include <asm-generic/cmpxchg-local.h>
318 321
322#if __LINUX_ARM_ARCH__ < 6
323
324#ifdef CONFIG_SMP
325#error "SMP is not supported on this platform"
326#endif
327
319/* 328/*
320 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 329 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
321 * them available. 330 * them available.
@@ -329,6 +338,173 @@ extern void enable_hlt(void);
329#include <asm-generic/cmpxchg.h> 338#include <asm-generic/cmpxchg.h>
330#endif 339#endif
331 340
341#else /* __LINUX_ARM_ARCH__ >= 6 */
342
343extern void __bad_cmpxchg(volatile void *ptr, int size);
344
345/*
346 * cmpxchg only support 32-bits operands on ARMv6.
347 */
348
349static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
350 unsigned long new, int size)
351{
352 unsigned long oldval, res;
353
354 switch (size) {
355#ifdef CONFIG_CPU_32v6K
356 case 1:
357 do {
358 asm volatile("@ __cmpxchg1\n"
359 " ldrexb %1, [%2]\n"
360 " mov %0, #0\n"
361 " teq %1, %3\n"
362 " strexbeq %0, %4, [%2]\n"
363 : "=&r" (res), "=&r" (oldval)
364 : "r" (ptr), "Ir" (old), "r" (new)
365 : "memory", "cc");
366 } while (res);
367 break;
368 case 2:
369 do {
370 asm volatile("@ __cmpxchg1\n"
371 " ldrexh %1, [%2]\n"
372 " mov %0, #0\n"
373 " teq %1, %3\n"
374 " strexheq %0, %4, [%2]\n"
375 : "=&r" (res), "=&r" (oldval)
376 : "r" (ptr), "Ir" (old), "r" (new)
377 : "memory", "cc");
378 } while (res);
379 break;
380#endif /* CONFIG_CPU_32v6K */
381 case 4:
382 do {
383 asm volatile("@ __cmpxchg4\n"
384 " ldrex %1, [%2]\n"
385 " mov %0, #0\n"
386 " teq %1, %3\n"
387 " strexeq %0, %4, [%2]\n"
388 : "=&r" (res), "=&r" (oldval)
389 : "r" (ptr), "Ir" (old), "r" (new)
390 : "memory", "cc");
391 } while (res);
392 break;
393 default:
394 __bad_cmpxchg(ptr, size);
395 oldval = 0;
396 }
397
398 return oldval;
399}
400
401static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
402 unsigned long new, int size)
403{
404 unsigned long ret;
405
406 smp_mb();
407 ret = __cmpxchg(ptr, old, new, size);
408 smp_mb();
409
410 return ret;
411}
412
413#define cmpxchg(ptr,o,n) \
414 ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
415 (unsigned long)(o), \
416 (unsigned long)(n), \
417 sizeof(*(ptr))))
418
419static inline unsigned long __cmpxchg_local(volatile void *ptr,
420 unsigned long old,
421 unsigned long new, int size)
422{
423 unsigned long ret;
424
425 switch (size) {
426#ifndef CONFIG_CPU_32v6K
427 case 1:
428 case 2:
429 ret = __cmpxchg_local_generic(ptr, old, new, size);
430 break;
431#endif /* !CONFIG_CPU_32v6K */
432 default:
433 ret = __cmpxchg(ptr, old, new, size);
434 }
435
436 return ret;
437}
438
439#define cmpxchg_local(ptr,o,n) \
440 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
441 (unsigned long)(o), \
442 (unsigned long)(n), \
443 sizeof(*(ptr))))
444
445#ifdef CONFIG_CPU_32v6K
446
447/*
448 * Note : ARMv7-M (currently unsupported by Linux) does not support
449 * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
450 * not be allowed to use __cmpxchg64.
451 */
452static inline unsigned long long __cmpxchg64(volatile void *ptr,
453 unsigned long long old,
454 unsigned long long new)
455{
456 register unsigned long long oldval asm("r0");
457 register unsigned long long __old asm("r2") = old;
458 register unsigned long long __new asm("r4") = new;
459 unsigned long res;
460
461 do {
462 asm volatile(
463 " @ __cmpxchg8\n"
464 " ldrexd %1, %H1, [%2]\n"
465 " mov %0, #0\n"
466 " teq %1, %3\n"
467 " teqeq %H1, %H3\n"
468 " strexdeq %0, %4, %H4, [%2]\n"
469 : "=&r" (res), "=&r" (oldval)
470 : "r" (ptr), "Ir" (__old), "r" (__new)
471 : "memory", "cc");
472 } while (res);
473
474 return oldval;
475}
476
477static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
478 unsigned long long old,
479 unsigned long long new)
480{
481 unsigned long long ret;
482
483 smp_mb();
484 ret = __cmpxchg64(ptr, old, new);
485 smp_mb();
486
487 return ret;
488}
489
490#define cmpxchg64(ptr,o,n) \
491 ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
492 (unsigned long long)(o), \
493 (unsigned long long)(n)))
494
495#define cmpxchg64_local(ptr,o,n) \
496 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
497 (unsigned long long)(o), \
498 (unsigned long long)(n)))
499
500#else /* !CONFIG_CPU_32v6K */
501
502#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
503
504#endif /* CONFIG_CPU_32v6K */
505
506#endif /* __LINUX_ARM_ARCH__ >= 6 */
507
332#endif /* __ASSEMBLY__ */ 508#endif /* __ASSEMBLY__ */
333 509
334#define arch_align_stack(x) (x) 510#define arch_align_stack(x) (x)