aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-11-06 08:57:36 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-12 04:37:15 -0500
commit93ea02bb84354370e51de803a9405f171f3edf88 (patch)
treedc7c7719fdfe0aed36eae3c2b374ced951e62a4c /arch
parent1de7da377bd880ff23917f78924d0e908329d978 (diff)
arch: Clean up asm/barrier.h implementations using asm-generic/barrier.h
We're going to be adding a few new barrier primitives, and in order to avoid endless duplication make more agressive use of asm-generic/barrier.h. Change the asm-generic/barrier.h such that it allows partial barrier definitions and fills out the rest with defaults. There are a few architectures (m32r, m68k) that could probably do away with their barrier.h file entirely but are kept for now due to their unconventional nop() implementation. Suggested-by: Geert Uytterhoeven <geert@linux-m68k.org> Reviewed-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Michael Ellerman <michael@ellerman.id.au> Cc: Michael Neuling <mikey@neuling.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Victor Kaplansky <VICTORK@il.ibm.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Link: http://lkml.kernel.org/r/20131213150640.846368594@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/barrier.h25
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/avr32/include/asm/barrier.h17
-rw-r--r--arch/blackfin/include/asm/barrier.h18
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/barrier.h25
-rw-r--r--arch/frv/include/asm/barrier.h8
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/m32r/include/asm/barrier.h80
-rw-r--r--arch/m68k/include/asm/barrier.h14
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/barrier.h27
-rw-r--r--arch/mn10300/include/asm/Kbuild1
-rw-r--r--arch/mn10300/include/asm/barrier.h37
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/barrier.h35
-rw-r--r--arch/score/include/asm/Kbuild1
-rw-r--r--arch/score/include/asm/barrier.h16
-rw-r--r--arch/sh/include/asm/barrier.h21
-rw-r--r--arch/sparc/include/asm/barrier_32.h12
-rw-r--r--arch/tile/include/asm/barrier.h68
-rw-r--r--arch/unicore32/include/asm/barrier.h11
-rw-r--r--arch/xtensa/include/asm/barrier.h9
23 files changed, 29 insertions, 401 deletions
diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h
index ce8860a0b32d..3832bdb794fe 100644
--- a/arch/alpha/include/asm/barrier.h
+++ b/arch/alpha/include/asm/barrier.h
@@ -3,33 +3,18 @@
3 3
4#include <asm/compiler.h> 4#include <asm/compiler.h>
5 5
6#define mb() \ 6#define mb() __asm__ __volatile__("mb": : :"memory")
7__asm__ __volatile__("mb": : :"memory") 7#define rmb() __asm__ __volatile__("mb": : :"memory")
8#define wmb() __asm__ __volatile__("wmb": : :"memory")
8 9
9#define rmb() \ 10#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
10__asm__ __volatile__("mb": : :"memory")
11
12#define wmb() \
13__asm__ __volatile__("wmb": : :"memory")
14
15#define read_barrier_depends() \
16__asm__ __volatile__("mb": : :"memory")
17 11
18#ifdef CONFIG_SMP 12#ifdef CONFIG_SMP
19#define __ASM_SMP_MB "\tmb\n" 13#define __ASM_SMP_MB "\tmb\n"
20#define smp_mb() mb()
21#define smp_rmb() rmb()
22#define smp_wmb() wmb()
23#define smp_read_barrier_depends() read_barrier_depends()
24#else 14#else
25#define __ASM_SMP_MB 15#define __ASM_SMP_MB
26#define smp_mb() barrier()
27#define smp_rmb() barrier()
28#define smp_wmb() barrier()
29#define smp_read_barrier_depends() do { } while (0)
30#endif 16#endif
31 17
32#define set_mb(var, value) \ 18#include <asm-generic/barrier.h>
33do { var = value; mb(); } while (0)
34 19
35#endif /* __BARRIER_H */ 20#endif /* __BARRIER_H */
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 5943f7f9d325..e07c786011af 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -47,3 +47,4 @@ generic-y += user.h
47generic-y += vga.h 47generic-y += vga.h
48generic-y += xor.h 48generic-y += xor.h
49generic-y += preempt.h 49generic-y += preempt.h
50generic-y += barrier.h
diff --git a/arch/avr32/include/asm/barrier.h b/arch/avr32/include/asm/barrier.h
index 0961275373db..715100790fd0 100644
--- a/arch/avr32/include/asm/barrier.h
+++ b/arch/avr32/include/asm/barrier.h
@@ -8,22 +8,15 @@
8#ifndef __ASM_AVR32_BARRIER_H 8#ifndef __ASM_AVR32_BARRIER_H
9#define __ASM_AVR32_BARRIER_H 9#define __ASM_AVR32_BARRIER_H
10 10
11#define nop() asm volatile("nop") 11/*
12 12 * Weirdest thing ever.. no full barrier, but it has a write barrier!
13#define mb() asm volatile("" : : : "memory") 13 */
14#define rmb() mb() 14#define wmb() asm volatile("sync 0" : : : "memory")
15#define wmb() asm volatile("sync 0" : : : "memory")
16#define read_barrier_depends() do { } while(0)
17#define set_mb(var, value) do { var = value; mb(); } while(0)
18 15
19#ifdef CONFIG_SMP 16#ifdef CONFIG_SMP
20# error "The AVR32 port does not support SMP" 17# error "The AVR32 port does not support SMP"
21#else
22# define smp_mb() barrier()
23# define smp_rmb() barrier()
24# define smp_wmb() barrier()
25# define smp_read_barrier_depends() do { } while(0)
26#endif 18#endif
27 19
20#include <asm-generic/barrier.h>
28 21
29#endif /* __ASM_AVR32_BARRIER_H */ 22#endif /* __ASM_AVR32_BARRIER_H */
diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h
index ebb189507dd7..19283a16ac08 100644
--- a/arch/blackfin/include/asm/barrier.h
+++ b/arch/blackfin/include/asm/barrier.h
@@ -23,26 +23,10 @@
23# define rmb() do { barrier(); smp_check_barrier(); } while (0) 23# define rmb() do { barrier(); smp_check_barrier(); } while (0)
24# define wmb() do { barrier(); smp_mark_barrier(); } while (0) 24# define wmb() do { barrier(); smp_mark_barrier(); } while (0)
25# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) 25# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
26#else
27# define mb() barrier()
28# define rmb() barrier()
29# define wmb() barrier()
30# define read_barrier_depends() do { } while (0)
31#endif 26#endif
32 27
33#else /* !CONFIG_SMP */
34
35#define mb() barrier()
36#define rmb() barrier()
37#define wmb() barrier()
38#define read_barrier_depends() do { } while (0)
39
40#endif /* !CONFIG_SMP */ 28#endif /* !CONFIG_SMP */
41 29
42#define smp_mb() mb() 30#include <asm-generic/barrier.h>
43#define smp_rmb() rmb()
44#define smp_wmb() wmb()
45#define set_mb(var, value) do { var = value; mb(); } while (0)
46#define smp_read_barrier_depends() read_barrier_depends()
47 31
48#endif /* _BLACKFIN_BARRIER_H */ 32#endif /* _BLACKFIN_BARRIER_H */
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index b06caf649a95..35ec2e5ca832 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -12,3 +12,4 @@ generic-y += trace_clock.h
12generic-y += vga.h 12generic-y += vga.h
13generic-y += xor.h 13generic-y += xor.h
14generic-y += preempt.h 14generic-y += preempt.h
15generic-y += barrier.h
diff --git a/arch/cris/include/asm/barrier.h b/arch/cris/include/asm/barrier.h
deleted file mode 100644
index 198ad7fa6b25..000000000000
--- a/arch/cris/include/asm/barrier.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef __ASM_CRIS_BARRIER_H
2#define __ASM_CRIS_BARRIER_H
3
4#define nop() __asm__ __volatile__ ("nop");
5
6#define barrier() __asm__ __volatile__("": : :"memory")
7#define mb() barrier()
8#define rmb() mb()
9#define wmb() mb()
10#define read_barrier_depends() do { } while(0)
11#define set_mb(var, value) do { var = value; mb(); } while (0)
12
13#ifdef CONFIG_SMP
14#define smp_mb() mb()
15#define smp_rmb() rmb()
16#define smp_wmb() wmb()
17#define smp_read_barrier_depends() read_barrier_depends()
18#else
19#define smp_mb() barrier()
20#define smp_rmb() barrier()
21#define smp_wmb() barrier()
22#define smp_read_barrier_depends() do { } while(0)
23#endif
24
25#endif /* __ASM_CRIS_BARRIER_H */
diff --git a/arch/frv/include/asm/barrier.h b/arch/frv/include/asm/barrier.h
index 06776ad9f5e9..abbef470154c 100644
--- a/arch/frv/include/asm/barrier.h
+++ b/arch/frv/include/asm/barrier.h
@@ -17,13 +17,7 @@
17#define mb() asm volatile ("membar" : : :"memory") 17#define mb() asm volatile ("membar" : : :"memory")
18#define rmb() asm volatile ("membar" : : :"memory") 18#define rmb() asm volatile ("membar" : : :"memory")
19#define wmb() asm volatile ("membar" : : :"memory") 19#define wmb() asm volatile ("membar" : : :"memory")
20#define read_barrier_depends() do { } while (0)
21 20
22#define smp_mb() barrier() 21#include <asm-generic/barrier.h>
23#define smp_rmb() barrier()
24#define smp_wmb() barrier()
25#define smp_read_barrier_depends() do {} while(0)
26#define set_mb(var, value) \
27 do { var = (value); barrier(); } while (0)
28 22
29#endif /* _ASM_BARRIER_H */ 23#endif /* _ASM_BARRIER_H */
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 67c3450309b7..a614ec9747a6 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -54,3 +54,4 @@ generic-y += ucontext.h
54generic-y += unaligned.h 54generic-y += unaligned.h
55generic-y += xor.h 55generic-y += xor.h
56generic-y += preempt.h 56generic-y += preempt.h
57generic-y += barrier.h
diff --git a/arch/m32r/include/asm/barrier.h b/arch/m32r/include/asm/barrier.h
index 6976621efd3f..1a40265e8d88 100644
--- a/arch/m32r/include/asm/barrier.h
+++ b/arch/m32r/include/asm/barrier.h
@@ -11,84 +11,6 @@
11 11
12#define nop() __asm__ __volatile__ ("nop" : : ) 12#define nop() __asm__ __volatile__ ("nop" : : )
13 13
14/* 14#include <asm-generic/barrier.h>
15 * Memory barrier.
16 *
17 * mb() prevents loads and stores being reordered across this point.
18 * rmb() prevents loads being reordered across this point.
19 * wmb() prevents stores being reordered across this point.
20 */
21#define mb() barrier()
22#define rmb() mb()
23#define wmb() mb()
24
25/**
26 * read_barrier_depends - Flush all pending reads that subsequents reads
27 * depend on.
28 *
29 * No data-dependent reads from memory-like regions are ever reordered
30 * over this barrier. All reads preceding this primitive are guaranteed
31 * to access memory (but not necessarily other CPUs' caches) before any
32 * reads following this primitive that depend on the data return by
33 * any of the preceding reads. This primitive is much lighter weight than
34 * rmb() on most CPUs, and is never heavier weight than is
35 * rmb().
36 *
37 * These ordering constraints are respected by both the local CPU
38 * and the compiler.
39 *
40 * Ordering is not guaranteed by anything other than these primitives,
41 * not even by data dependencies. See the documentation for
42 * memory_barrier() for examples and URLs to more information.
43 *
44 * For example, the following code would force ordering (the initial
45 * value of "a" is zero, "b" is one, and "p" is "&a"):
46 *
47 * <programlisting>
48 * CPU 0 CPU 1
49 *
50 * b = 2;
51 * memory_barrier();
52 * p = &b; q = p;
53 * read_barrier_depends();
54 * d = *q;
55 * </programlisting>
56 *
57 *
58 * because the read of "*q" depends on the read of "p" and these
59 * two reads are separated by a read_barrier_depends(). However,
60 * the following code, with the same initial values for "a" and "b":
61 *
62 * <programlisting>
63 * CPU 0 CPU 1
64 *
65 * a = 2;
66 * memory_barrier();
67 * b = 3; y = b;
68 * read_barrier_depends();
69 * x = a;
70 * </programlisting>
71 *
72 * does not enforce ordering, since there is no data dependency between
73 * the read of "a" and the read of "b". Therefore, on some CPUs, such
74 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
75 * in cases like this where there are no data dependencies.
76 **/
77
78#define read_barrier_depends() do { } while (0)
79
80#ifdef CONFIG_SMP
81#define smp_mb() mb()
82#define smp_rmb() rmb()
83#define smp_wmb() wmb()
84#define smp_read_barrier_depends() read_barrier_depends()
85#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
86#else
87#define smp_mb() barrier()
88#define smp_rmb() barrier()
89#define smp_wmb() barrier()
90#define smp_read_barrier_depends() do { } while (0)
91#define set_mb(var, value) do { var = value; barrier(); } while (0)
92#endif
93 15
94#endif /* _ASM_M32R_BARRIER_H */ 16#endif /* _ASM_M32R_BARRIER_H */
diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h
index 445ce22c23cb..15c5f77c1614 100644
--- a/arch/m68k/include/asm/barrier.h
+++ b/arch/m68k/include/asm/barrier.h
@@ -1,20 +1,8 @@
1#ifndef _M68K_BARRIER_H 1#ifndef _M68K_BARRIER_H
2#define _M68K_BARRIER_H 2#define _M68K_BARRIER_H
3 3
4/*
5 * Force strict CPU ordering.
6 * Not really required on m68k...
7 */
8#define nop() do { asm volatile ("nop"); barrier(); } while (0) 4#define nop() do { asm volatile ("nop"); barrier(); } while (0)
9#define mb() barrier()
10#define rmb() barrier()
11#define wmb() barrier()
12#define read_barrier_depends() ((void)0)
13#define set_mb(var, value) ({ (var) = (value); wmb(); })
14 5
15#define smp_mb() barrier() 6#include <asm-generic/barrier.h>
16#define smp_rmb() barrier()
17#define smp_wmb() barrier()
18#define smp_read_barrier_depends() ((void)0)
19 7
20#endif /* _M68K_BARRIER_H */ 8#endif /* _M68K_BARRIER_H */
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index ce0bbf8f5640..f77fb6630b11 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -4,3 +4,4 @@ generic-y += exec.h
4generic-y += trace_clock.h 4generic-y += trace_clock.h
5generic-y += syscalls.h 5generic-y += syscalls.h
6generic-y += preempt.h 6generic-y += preempt.h
7generic-y += barrier.h
diff --git a/arch/microblaze/include/asm/barrier.h b/arch/microblaze/include/asm/barrier.h
deleted file mode 100644
index df5be3e87044..000000000000
--- a/arch/microblaze/include/asm/barrier.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * Copyright (C) 2006 Atmark Techno, Inc.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#ifndef _ASM_MICROBLAZE_BARRIER_H
10#define _ASM_MICROBLAZE_BARRIER_H
11
12#define nop() asm volatile ("nop")
13
14#define smp_read_barrier_depends() do {} while (0)
15#define read_barrier_depends() do {} while (0)
16
17#define mb() barrier()
18#define rmb() mb()
19#define wmb() mb()
20#define set_mb(var, value) do { var = value; mb(); } while (0)
21#define set_wmb(var, value) do { var = value; wmb(); } while (0)
22
23#define smp_mb() mb()
24#define smp_rmb() rmb()
25#define smp_wmb() wmb()
26
27#endif /* _ASM_MICROBLAZE_BARRIER_H */
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index 74742dc6a3da..367ef399ddf7 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -3,3 +3,4 @@ generic-y += clkdev.h
3generic-y += exec.h 3generic-y += exec.h
4generic-y += trace_clock.h 4generic-y += trace_clock.h
5generic-y += preempt.h 5generic-y += preempt.h
6generic-y += barrier.h
diff --git a/arch/mn10300/include/asm/barrier.h b/arch/mn10300/include/asm/barrier.h
deleted file mode 100644
index 2bd97a5c8af7..000000000000
--- a/arch/mn10300/include/asm/barrier.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/* MN10300 memory barrier definitions
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef _ASM_BARRIER_H
12#define _ASM_BARRIER_H
13
14#define nop() asm volatile ("nop")
15
16#define mb() asm volatile ("": : :"memory")
17#define rmb() mb()
18#define wmb() asm volatile ("": : :"memory")
19
20#ifdef CONFIG_SMP
21#define smp_mb() mb()
22#define smp_rmb() rmb()
23#define smp_wmb() wmb()
24#define set_mb(var, value) do { xchg(&var, value); } while (0)
25#else /* CONFIG_SMP */
26#define smp_mb() barrier()
27#define smp_rmb() barrier()
28#define smp_wmb() barrier()
29#define set_mb(var, value) do { var = value; mb(); } while (0)
30#endif /* CONFIG_SMP */
31
32#define set_wmb(var, value) do { var = value; wmb(); } while (0)
33
34#define read_barrier_depends() do {} while (0)
35#define smp_read_barrier_depends() do {} while (0)
36
37#endif /* _ASM_BARRIER_H */
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index a603b9ebe54c..8df06d0074f4 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -5,3 +5,4 @@ generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \
5 poll.h xor.h clkdev.h exec.h 5 poll.h xor.h clkdev.h exec.h
6generic-y += trace_clock.h 6generic-y += trace_clock.h
7generic-y += preempt.h 7generic-y += preempt.h
8generic-y += barrier.h
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
deleted file mode 100644
index e77d834aa803..000000000000
--- a/arch/parisc/include/asm/barrier.h
+++ /dev/null
@@ -1,35 +0,0 @@
1#ifndef __PARISC_BARRIER_H
2#define __PARISC_BARRIER_H
3
4/*
5** This is simply the barrier() macro from linux/kernel.h but when serial.c
6** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h
7** hasn't yet been included yet so it fails, thus repeating the macro here.
8**
9** PA-RISC architecture allows for weakly ordered memory accesses although
10** none of the processors use it. There is a strong ordered bit that is
11** set in the O-bit of the page directory entry. Operating systems that
12** can not tolerate out of order accesses should set this bit when mapping
13** pages. The O-bit of the PSW should also be set to 1 (I don't believe any
14** of the processor implemented the PSW O-bit). The PCX-W ERS states that
15** the TLB O-bit is not implemented so the page directory does not need to
16** have the O-bit set when mapping pages (section 3.1). This section also
17** states that the PSW Y, Z, G, and O bits are not implemented.
18** So it looks like nothing needs to be done for parisc-linux (yet).
19** (thanks to chada for the above comment -ggg)
20**
21** The __asm__ op below simple prevents gcc/ld from reordering
22** instructions across the mb() "call".
23*/
24#define mb() __asm__ __volatile__("":::"memory") /* barrier() */
25#define rmb() mb()
26#define wmb() mb()
27#define smp_mb() mb()
28#define smp_rmb() mb()
29#define smp_wmb() mb()
30#define smp_read_barrier_depends() do { } while(0)
31#define read_barrier_depends() do { } while(0)
32
33#define set_mb(var, value) do { var = value; mb(); } while (0)
34
35#endif /* __PARISC_BARRIER_H */
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index f3414ade77a3..ee2993b6e5d1 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -5,3 +5,4 @@ generic-y += clkdev.h
5generic-y += trace_clock.h 5generic-y += trace_clock.h
6generic-y += xor.h 6generic-y += xor.h
7generic-y += preempt.h 7generic-y += preempt.h
8generic-y += barrier.h
diff --git a/arch/score/include/asm/barrier.h b/arch/score/include/asm/barrier.h
deleted file mode 100644
index 0eacb6471e6d..000000000000
--- a/arch/score/include/asm/barrier.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef _ASM_SCORE_BARRIER_H
2#define _ASM_SCORE_BARRIER_H
3
4#define mb() barrier()
5#define rmb() barrier()
6#define wmb() barrier()
7#define smp_mb() barrier()
8#define smp_rmb() barrier()
9#define smp_wmb() barrier()
10
11#define read_barrier_depends() do {} while (0)
12#define smp_read_barrier_depends() do {} while (0)
13
14#define set_mb(var, value) do {var = value; wmb(); } while (0)
15
16#endif /* _ASM_SCORE_BARRIER_H */
diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h
index 72c103dae300..43715308b068 100644
--- a/arch/sh/include/asm/barrier.h
+++ b/arch/sh/include/asm/barrier.h
@@ -26,29 +26,14 @@
26#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) 26#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
27#define mb() __asm__ __volatile__ ("synco": : :"memory") 27#define mb() __asm__ __volatile__ ("synco": : :"memory")
28#define rmb() mb() 28#define rmb() mb()
29#define wmb() __asm__ __volatile__ ("synco": : :"memory") 29#define wmb() mb()
30#define ctrl_barrier() __icbi(PAGE_OFFSET) 30#define ctrl_barrier() __icbi(PAGE_OFFSET)
31#define read_barrier_depends() do { } while(0)
32#else 31#else
33#define mb() __asm__ __volatile__ ("": : :"memory")
34#define rmb() mb()
35#define wmb() __asm__ __volatile__ ("": : :"memory")
36#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") 32#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
37#define read_barrier_depends() do { } while(0)
38#endif
39
40#ifdef CONFIG_SMP
41#define smp_mb() mb()
42#define smp_rmb() rmb()
43#define smp_wmb() wmb()
44#define smp_read_barrier_depends() read_barrier_depends()
45#else
46#define smp_mb() barrier()
47#define smp_rmb() barrier()
48#define smp_wmb() barrier()
49#define smp_read_barrier_depends() do { } while(0)
50#endif 33#endif
51 34
52#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 35#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
53 36
37#include <asm-generic/barrier.h>
38
54#endif /* __ASM_SH_BARRIER_H */ 39#endif /* __ASM_SH_BARRIER_H */
diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h
index c1b76654ee76..ae69eda288f4 100644
--- a/arch/sparc/include/asm/barrier_32.h
+++ b/arch/sparc/include/asm/barrier_32.h
@@ -1,15 +1,7 @@
1#ifndef __SPARC_BARRIER_H 1#ifndef __SPARC_BARRIER_H
2#define __SPARC_BARRIER_H 2#define __SPARC_BARRIER_H
3 3
4/* XXX Change this if we ever use a PSO mode kernel. */ 4#include <asm/processor.h> /* for nop() */
5#define mb() __asm__ __volatile__ ("" : : : "memory") 5#include <asm-generic/barrier.h>
6#define rmb() mb()
7#define wmb() mb()
8#define read_barrier_depends() do { } while(0)
9#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
10#define smp_mb() __asm__ __volatile__("":::"memory")
11#define smp_rmb() __asm__ __volatile__("":::"memory")
12#define smp_wmb() __asm__ __volatile__("":::"memory")
13#define smp_read_barrier_depends() do { } while(0)
14 6
15#endif /* !(__SPARC_BARRIER_H) */ 7#endif /* !(__SPARC_BARRIER_H) */
diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h
index a9a73da5865d..b5a05d050a8f 100644
--- a/arch/tile/include/asm/barrier.h
+++ b/arch/tile/include/asm/barrier.h
@@ -22,59 +22,6 @@
22#include <arch/spr_def.h> 22#include <arch/spr_def.h>
23#include <asm/timex.h> 23#include <asm/timex.h>
24 24
25/*
26 * read_barrier_depends - Flush all pending reads that subsequents reads
27 * depend on.
28 *
29 * No data-dependent reads from memory-like regions are ever reordered
30 * over this barrier. All reads preceding this primitive are guaranteed
31 * to access memory (but not necessarily other CPUs' caches) before any
32 * reads following this primitive that depend on the data return by
33 * any of the preceding reads. This primitive is much lighter weight than
34 * rmb() on most CPUs, and is never heavier weight than is
35 * rmb().
36 *
37 * These ordering constraints are respected by both the local CPU
38 * and the compiler.
39 *
40 * Ordering is not guaranteed by anything other than these primitives,
41 * not even by data dependencies. See the documentation for
42 * memory_barrier() for examples and URLs to more information.
43 *
44 * For example, the following code would force ordering (the initial
45 * value of "a" is zero, "b" is one, and "p" is "&a"):
46 *
47 * <programlisting>
48 * CPU 0 CPU 1
49 *
50 * b = 2;
51 * memory_barrier();
52 * p = &b; q = p;
53 * read_barrier_depends();
54 * d = *q;
55 * </programlisting>
56 *
57 * because the read of "*q" depends on the read of "p" and these
58 * two reads are separated by a read_barrier_depends(). However,
59 * the following code, with the same initial values for "a" and "b":
60 *
61 * <programlisting>
62 * CPU 0 CPU 1
63 *
64 * a = 2;
65 * memory_barrier();
66 * b = 3; y = b;
67 * read_barrier_depends();
68 * x = a;
69 * </programlisting>
70 *
71 * does not enforce ordering, since there is no data dependency between
72 * the read of "a" and the read of "b". Therefore, on some CPUs, such
73 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
74 * in cases like this where there are no data dependencies.
75 */
76#define read_barrier_depends() do { } while (0)
77
78#define __sync() __insn_mf() 25#define __sync() __insn_mf()
79 26
80#include <hv/syscall_public.h> 27#include <hv/syscall_public.h>
@@ -125,20 +72,7 @@ mb_incoherent(void)
125#define mb() fast_mb() 72#define mb() fast_mb()
126#define iob() fast_iob() 73#define iob() fast_iob()
127 74
128#ifdef CONFIG_SMP 75#include <asm-generic/barrier.h>
129#define smp_mb() mb()
130#define smp_rmb() rmb()
131#define smp_wmb() wmb()
132#define smp_read_barrier_depends() read_barrier_depends()
133#else
134#define smp_mb() barrier()
135#define smp_rmb() barrier()
136#define smp_wmb() barrier()
137#define smp_read_barrier_depends() do { } while (0)
138#endif
139
140#define set_mb(var, value) \
141 do { var = value; mb(); } while (0)
142 76
143#endif /* !__ASSEMBLY__ */ 77#endif /* !__ASSEMBLY__ */
144#endif /* _ASM_TILE_BARRIER_H */ 78#endif /* _ASM_TILE_BARRIER_H */
diff --git a/arch/unicore32/include/asm/barrier.h b/arch/unicore32/include/asm/barrier.h
index a6620e5336b6..83d6a520f4bd 100644
--- a/arch/unicore32/include/asm/barrier.h
+++ b/arch/unicore32/include/asm/barrier.h
@@ -14,15 +14,6 @@
14#define dsb() __asm__ __volatile__ ("" : : : "memory") 14#define dsb() __asm__ __volatile__ ("" : : : "memory")
15#define dmb() __asm__ __volatile__ ("" : : : "memory") 15#define dmb() __asm__ __volatile__ ("" : : : "memory")
16 16
17#define mb() barrier() 17#include <asm-generic/barrier.h>
18#define rmb() barrier()
19#define wmb() barrier()
20#define smp_mb() barrier()
21#define smp_rmb() barrier()
22#define smp_wmb() barrier()
23#define read_barrier_depends() do { } while (0)
24#define smp_read_barrier_depends() do { } while (0)
25
26#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
27 18
28#endif /* __UNICORE_BARRIER_H__ */ 19#endif /* __UNICORE_BARRIER_H__ */
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
index ef021677d536..e1ee6b51dfc5 100644
--- a/arch/xtensa/include/asm/barrier.h
+++ b/arch/xtensa/include/asm/barrier.h
@@ -9,21 +9,14 @@
9#ifndef _XTENSA_SYSTEM_H 9#ifndef _XTENSA_SYSTEM_H
10#define _XTENSA_SYSTEM_H 10#define _XTENSA_SYSTEM_H
11 11
12#define smp_read_barrier_depends() do { } while(0)
13#define read_barrier_depends() do { } while(0)
14
15#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); }) 12#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
16#define rmb() barrier() 13#define rmb() barrier()
17#define wmb() mb() 14#define wmb() mb()
18 15
19#ifdef CONFIG_SMP 16#ifdef CONFIG_SMP
20#error smp_* not defined 17#error smp_* not defined
21#else
22#define smp_mb() barrier()
23#define smp_rmb() barrier()
24#define smp_wmb() barrier()
25#endif 18#endif
26 19
27#define set_mb(var, value) do { var = value; mb(); } while (0) 20#include <asm-generic/barrier.h>
28 21
29#endif /* _XTENSA_SYSTEM_H */ 22#endif /* _XTENSA_SYSTEM_H */