diff options
Diffstat (limited to 'arch/powerpc/include/asm/barrier.h')
-rw-r--r-- | arch/powerpc/include/asm/barrier.h | 68 |
1 files changed, 68 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h new file mode 100644 index 000000000000..ae782254e731 --- /dev/null +++ b/arch/powerpc/include/asm/barrier.h | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
3 | */ | ||
4 | #ifndef _ASM_POWERPC_BARRIER_H | ||
5 | #define _ASM_POWERPC_BARRIER_H | ||
6 | |||
7 | /* | ||
8 | * Memory barrier. | ||
9 | * The sync instruction guarantees that all memory accesses initiated | ||
10 | * by this processor have been performed (with respect to all other | ||
11 | * mechanisms that access memory). The eieio instruction is a barrier | ||
12 | * providing an ordering (separately) for (a) cacheable stores and (b) | ||
13 | * loads and stores to non-cacheable memory (e.g. I/O devices). | ||
14 | * | ||
15 | * mb() prevents loads and stores being reordered across this point. | ||
16 | * rmb() prevents loads being reordered across this point. | ||
17 | * wmb() prevents stores being reordered across this point. | ||
18 | * read_barrier_depends() prevents data-dependent loads being reordered | ||
19 | * across this point (nop on PPC). | ||
20 | * | ||
21 | * *mb() variants without smp_ prefix must order all types of memory | ||
22 | * operations with one another. sync is the only instruction sufficient | ||
23 | * to do this. | ||
24 | * | ||
25 | * For the smp_ barriers, ordering is for cacheable memory operations | ||
26 | * only. We have to use the sync instruction for smp_mb(), since lwsync | ||
27 | * doesn't order loads with respect to previous stores. Lwsync can be | ||
28 | * used for smp_rmb() and smp_wmb(). | ||
29 | * | ||
30 | * However, on CPUs that don't support lwsync, lwsync actually maps to a | ||
31 | * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio. | ||
32 | */ | ||
33 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") | ||
34 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") | ||
35 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") | ||
36 | #define read_barrier_depends() do { } while(0) | ||
37 | |||
38 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
39 | |||
40 | #ifdef CONFIG_SMP | ||
41 | |||
42 | #ifdef __SUBARCH_HAS_LWSYNC | ||
43 | # define SMPWMB LWSYNC | ||
44 | #else | ||
45 | # define SMPWMB eieio | ||
46 | #endif | ||
47 | |||
48 | #define smp_mb() mb() | ||
49 | #define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") | ||
50 | #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") | ||
51 | #define smp_read_barrier_depends() read_barrier_depends() | ||
52 | #else | ||
53 | #define smp_mb() barrier() | ||
54 | #define smp_rmb() barrier() | ||
55 | #define smp_wmb() barrier() | ||
56 | #define smp_read_barrier_depends() do { } while(0) | ||
57 | #endif /* CONFIG_SMP */ | ||
58 | |||
59 | /* | ||
60 | * This is a barrier which prevents following instructions from being | ||
61 | * started until the value of the argument x is known. For example, if | ||
62 | * x is a variable loaded from memory, this prevents following | ||
63 | * instructions from being executed until the load has been performed. | ||
64 | */ | ||
65 | #define data_barrier(x) \ | ||
66 | asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); | ||
67 | |||
68 | #endif /* _ASM_POWERPC_BARRIER_H */ | ||