diff options
author | David Howells <dhowells@redhat.com> | 2012-03-28 13:30:02 -0400 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2012-03-28 13:30:02 -0400 |
commit | 1c80f22f8d809a9a9024aad7a5bd093f078e77cf (patch) | |
tree | 116af9c3b675d888f63910adefe1557528726736 /arch/mn10300/include/asm | |
parent | b81947c646bfefdf98e2fde5d7d39cbbda8525d4 (diff) |
Disintegrate asm/system.h for MN10300
Disintegrate asm/system.h for MN10300.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: linux-am33-list@redhat.com
Diffstat (limited to 'arch/mn10300/include/asm')
-rw-r--r-- | arch/mn10300/include/asm/atomic.h | 109 | ||||
-rw-r--r-- | arch/mn10300/include/asm/barrier.h | 37 | ||||
-rw-r--r-- | arch/mn10300/include/asm/cmpxchg.h | 115 | ||||
-rw-r--r-- | arch/mn10300/include/asm/dma.h | 1 | ||||
-rw-r--r-- | arch/mn10300/include/asm/exec.h | 16 | ||||
-rw-r--r-- | arch/mn10300/include/asm/switch_to.h | 49 | ||||
-rw-r--r-- | arch/mn10300/include/asm/system.h | 107 |
7 files changed, 225 insertions, 209 deletions
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index b9a8f8461262..975e1841ca64 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h | |||
@@ -12,112 +12,7 @@ | |||
12 | #define _ASM_ATOMIC_H | 12 | #define _ASM_ATOMIC_H |
13 | 13 | ||
14 | #include <asm/irqflags.h> | 14 | #include <asm/irqflags.h> |
15 | 15 | #include <asm/cmpxchg.h> | |
16 | #ifndef __ASSEMBLY__ | ||
17 | |||
18 | #ifdef CONFIG_SMP | ||
19 | #ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT | ||
20 | static inline | ||
21 | unsigned long __xchg(volatile unsigned long *m, unsigned long val) | ||
22 | { | ||
23 | unsigned long status; | ||
24 | unsigned long oldval; | ||
25 | |||
26 | asm volatile( | ||
27 | "1: mov %4,(_AAR,%3) \n" | ||
28 | " mov (_ADR,%3),%1 \n" | ||
29 | " mov %5,(_ADR,%3) \n" | ||
30 | " mov (_ADR,%3),%0 \n" /* flush */ | ||
31 | " mov (_ASR,%3),%0 \n" | ||
32 | " or %0,%0 \n" | ||
33 | " bne 1b \n" | ||
34 | : "=&r"(status), "=&r"(oldval), "=m"(*m) | ||
35 | : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val) | ||
36 | : "memory", "cc"); | ||
37 | |||
38 | return oldval; | ||
39 | } | ||
40 | |||
41 | static inline unsigned long __cmpxchg(volatile unsigned long *m, | ||
42 | unsigned long old, unsigned long new) | ||
43 | { | ||
44 | unsigned long status; | ||
45 | unsigned long oldval; | ||
46 | |||
47 | asm volatile( | ||
48 | "1: mov %4,(_AAR,%3) \n" | ||
49 | " mov (_ADR,%3),%1 \n" | ||
50 | " cmp %5,%1 \n" | ||
51 | " bne 2f \n" | ||
52 | " mov %6,(_ADR,%3) \n" | ||
53 | "2: mov (_ADR,%3),%0 \n" /* flush */ | ||
54 | " mov (_ASR,%3),%0 \n" | ||
55 | " or %0,%0 \n" | ||
56 | " bne 1b \n" | ||
57 | : "=&r"(status), "=&r"(oldval), "=m"(*m) | ||
58 | : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), | ||
59 | "r"(old), "r"(new) | ||
60 | : "memory", "cc"); | ||
61 | |||
62 | return oldval; | ||
63 | } | ||
64 | #else /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */ | ||
65 | #error "No SMP atomic operation support!" | ||
66 | #endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */ | ||
67 | |||
68 | #else /* CONFIG_SMP */ | ||
69 | |||
70 | /* | ||
71 | * Emulate xchg for non-SMP MN10300 | ||
72 | */ | ||
73 | struct __xchg_dummy { unsigned long a[100]; }; | ||
74 | #define __xg(x) ((struct __xchg_dummy *)(x)) | ||
75 | |||
76 | static inline | ||
77 | unsigned long __xchg(volatile unsigned long *m, unsigned long val) | ||
78 | { | ||
79 | unsigned long oldval; | ||
80 | unsigned long flags; | ||
81 | |||
82 | flags = arch_local_cli_save(); | ||
83 | oldval = *m; | ||
84 | *m = val; | ||
85 | arch_local_irq_restore(flags); | ||
86 | return oldval; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Emulate cmpxchg for non-SMP MN10300 | ||
91 | */ | ||
92 | static inline unsigned long __cmpxchg(volatile unsigned long *m, | ||
93 | unsigned long old, unsigned long new) | ||
94 | { | ||
95 | unsigned long oldval; | ||
96 | unsigned long flags; | ||
97 | |||
98 | flags = arch_local_cli_save(); | ||
99 | oldval = *m; | ||
100 | if (oldval == old) | ||
101 | *m = new; | ||
102 | arch_local_irq_restore(flags); | ||
103 | return oldval; | ||
104 | } | ||
105 | |||
106 | #endif /* CONFIG_SMP */ | ||
107 | |||
108 | #define xchg(ptr, v) \ | ||
109 | ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \ | ||
110 | (unsigned long)(v))) | ||
111 | |||
112 | #define cmpxchg(ptr, o, n) \ | ||
113 | ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ | ||
114 | (unsigned long)(o), \ | ||
115 | (unsigned long)(n))) | ||
116 | |||
117 | #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) | ||
118 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) | ||
119 | |||
120 | #endif /* !__ASSEMBLY__ */ | ||
121 | 16 | ||
122 | #ifndef CONFIG_SMP | 17 | #ifndef CONFIG_SMP |
123 | #include <asm-generic/atomic.h> | 18 | #include <asm-generic/atomic.h> |
@@ -269,6 +164,8 @@ static inline void atomic_dec(atomic_t *v) | |||
269 | c; \ | 164 | c; \ |
270 | }) | 165 | }) |
271 | 166 | ||
167 | #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) | ||
168 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) | ||
272 | 169 | ||
273 | /** | 170 | /** |
274 | * atomic_clear_mask - Atomically clear bits in memory | 171 | * atomic_clear_mask - Atomically clear bits in memory |
diff --git a/arch/mn10300/include/asm/barrier.h b/arch/mn10300/include/asm/barrier.h new file mode 100644 index 000000000000..2bd97a5c8af7 --- /dev/null +++ b/arch/mn10300/include/asm/barrier.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* MN10300 memory barrier definitions | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #ifndef _ASM_BARRIER_H | ||
12 | #define _ASM_BARRIER_H | ||
13 | |||
14 | #define nop() asm volatile ("nop") | ||
15 | |||
16 | #define mb() asm volatile ("": : :"memory") | ||
17 | #define rmb() mb() | ||
18 | #define wmb() asm volatile ("": : :"memory") | ||
19 | |||
20 | #ifdef CONFIG_SMP | ||
21 | #define smp_mb() mb() | ||
22 | #define smp_rmb() rmb() | ||
23 | #define smp_wmb() wmb() | ||
24 | #define set_mb(var, value) do { xchg(&var, value); } while (0) | ||
25 | #else /* CONFIG_SMP */ | ||
26 | #define smp_mb() barrier() | ||
27 | #define smp_rmb() barrier() | ||
28 | #define smp_wmb() barrier() | ||
29 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
30 | #endif /* CONFIG_SMP */ | ||
31 | |||
32 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | ||
33 | |||
34 | #define read_barrier_depends() do {} while (0) | ||
35 | #define smp_read_barrier_depends() do {} while (0) | ||
36 | |||
37 | #endif /* _ASM_BARRIER_H */ | ||
diff --git a/arch/mn10300/include/asm/cmpxchg.h b/arch/mn10300/include/asm/cmpxchg.h new file mode 100644 index 000000000000..97a4aaf387a6 --- /dev/null +++ b/arch/mn10300/include/asm/cmpxchg.h | |||
@@ -0,0 +1,115 @@ | |||
1 | /* MN10300 Atomic xchg/cmpxchg operations | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #ifndef _ASM_CMPXCHG_H | ||
12 | #define _ASM_CMPXCHG_H | ||
13 | |||
14 | #include <asm/irqflags.h> | ||
15 | |||
16 | #ifdef CONFIG_SMP | ||
17 | #ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT | ||
18 | static inline | ||
19 | unsigned long __xchg(volatile unsigned long *m, unsigned long val) | ||
20 | { | ||
21 | unsigned long status; | ||
22 | unsigned long oldval; | ||
23 | |||
24 | asm volatile( | ||
25 | "1: mov %4,(_AAR,%3) \n" | ||
26 | " mov (_ADR,%3),%1 \n" | ||
27 | " mov %5,(_ADR,%3) \n" | ||
28 | " mov (_ADR,%3),%0 \n" /* flush */ | ||
29 | " mov (_ASR,%3),%0 \n" | ||
30 | " or %0,%0 \n" | ||
31 | " bne 1b \n" | ||
32 | : "=&r"(status), "=&r"(oldval), "=m"(*m) | ||
33 | : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val) | ||
34 | : "memory", "cc"); | ||
35 | |||
36 | return oldval; | ||
37 | } | ||
38 | |||
39 | static inline unsigned long __cmpxchg(volatile unsigned long *m, | ||
40 | unsigned long old, unsigned long new) | ||
41 | { | ||
42 | unsigned long status; | ||
43 | unsigned long oldval; | ||
44 | |||
45 | asm volatile( | ||
46 | "1: mov %4,(_AAR,%3) \n" | ||
47 | " mov (_ADR,%3),%1 \n" | ||
48 | " cmp %5,%1 \n" | ||
49 | " bne 2f \n" | ||
50 | " mov %6,(_ADR,%3) \n" | ||
51 | "2: mov (_ADR,%3),%0 \n" /* flush */ | ||
52 | " mov (_ASR,%3),%0 \n" | ||
53 | " or %0,%0 \n" | ||
54 | " bne 1b \n" | ||
55 | : "=&r"(status), "=&r"(oldval), "=m"(*m) | ||
56 | : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), | ||
57 | "r"(old), "r"(new) | ||
58 | : "memory", "cc"); | ||
59 | |||
60 | return oldval; | ||
61 | } | ||
62 | #else /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */ | ||
63 | #error "No SMP atomic operation support!" | ||
64 | #endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */ | ||
65 | |||
66 | #else /* CONFIG_SMP */ | ||
67 | |||
68 | /* | ||
69 | * Emulate xchg for non-SMP MN10300 | ||
70 | */ | ||
71 | struct __xchg_dummy { unsigned long a[100]; }; | ||
72 | #define __xg(x) ((struct __xchg_dummy *)(x)) | ||
73 | |||
74 | static inline | ||
75 | unsigned long __xchg(volatile unsigned long *m, unsigned long val) | ||
76 | { | ||
77 | unsigned long oldval; | ||
78 | unsigned long flags; | ||
79 | |||
80 | flags = arch_local_cli_save(); | ||
81 | oldval = *m; | ||
82 | *m = val; | ||
83 | arch_local_irq_restore(flags); | ||
84 | return oldval; | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Emulate cmpxchg for non-SMP MN10300 | ||
89 | */ | ||
90 | static inline unsigned long __cmpxchg(volatile unsigned long *m, | ||
91 | unsigned long old, unsigned long new) | ||
92 | { | ||
93 | unsigned long oldval; | ||
94 | unsigned long flags; | ||
95 | |||
96 | flags = arch_local_cli_save(); | ||
97 | oldval = *m; | ||
98 | if (oldval == old) | ||
99 | *m = new; | ||
100 | arch_local_irq_restore(flags); | ||
101 | return oldval; | ||
102 | } | ||
103 | |||
104 | #endif /* CONFIG_SMP */ | ||
105 | |||
106 | #define xchg(ptr, v) \ | ||
107 | ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \ | ||
108 | (unsigned long)(v))) | ||
109 | |||
110 | #define cmpxchg(ptr, o, n) \ | ||
111 | ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ | ||
112 | (unsigned long)(o), \ | ||
113 | (unsigned long)(n))) | ||
114 | |||
115 | #endif /* _ASM_CMPXCHG_H */ | ||
diff --git a/arch/mn10300/include/asm/dma.h b/arch/mn10300/include/asm/dma.h index 098df2e617ab..10b77d4628c2 100644 --- a/arch/mn10300/include/asm/dma.h +++ b/arch/mn10300/include/asm/dma.h | |||
@@ -11,7 +11,6 @@ | |||
11 | #ifndef _ASM_DMA_H | 11 | #ifndef _ASM_DMA_H |
12 | #define _ASM_DMA_H | 12 | #define _ASM_DMA_H |
13 | 13 | ||
14 | #include <asm/system.h> | ||
15 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
16 | #include <asm/io.h> | 15 | #include <asm/io.h> |
17 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
diff --git a/arch/mn10300/include/asm/exec.h b/arch/mn10300/include/asm/exec.h new file mode 100644 index 000000000000..c74e367f4b9d --- /dev/null +++ b/arch/mn10300/include/asm/exec.h | |||
@@ -0,0 +1,16 @@ | |||
1 | /* MN10300 process execution definitions | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #ifndef _ASM_EXEC_H | ||
12 | #define _ASM_EXEC_H | ||
13 | |||
14 | #define arch_align_stack(x) (x) | ||
15 | |||
16 | #endif /* _ASM_EXEC_H */ | ||
diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h new file mode 100644 index 000000000000..393d311735c8 --- /dev/null +++ b/arch/mn10300/include/asm/switch_to.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* MN10300 task switching definitions | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #ifndef _ASM_SWITCH_TO_H | ||
12 | #define _ASM_SWITCH_TO_H | ||
13 | |||
14 | #include <asm/barrier.h> | ||
15 | |||
16 | struct task_struct; | ||
17 | struct thread_struct; | ||
18 | |||
19 | #if !defined(CONFIG_LAZY_SAVE_FPU) | ||
20 | struct fpu_state_struct; | ||
21 | extern asmlinkage void fpu_save(struct fpu_state_struct *); | ||
22 | #define switch_fpu(prev, next) \ | ||
23 | do { \ | ||
24 | if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) { \ | ||
25 | (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU; \ | ||
26 | (prev)->thread.uregs->epsw &= ~EPSW_FE; \ | ||
27 | fpu_save(&(prev)->thread.fpu_state); \ | ||
28 | } \ | ||
29 | } while (0) | ||
30 | #else | ||
31 | #define switch_fpu(prev, next) do {} while (0) | ||
32 | #endif | ||
33 | |||
34 | /* context switching is now performed out-of-line in switch_to.S */ | ||
35 | extern asmlinkage | ||
36 | struct task_struct *__switch_to(struct thread_struct *prev, | ||
37 | struct thread_struct *next, | ||
38 | struct task_struct *prev_task); | ||
39 | |||
40 | #define switch_to(prev, next, last) \ | ||
41 | do { \ | ||
42 | switch_fpu(prev, next); \ | ||
43 | current->thread.wchan = (u_long) __builtin_return_address(0); \ | ||
44 | (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \ | ||
45 | mb(); \ | ||
46 | current->thread.wchan = 0; \ | ||
47 | } while (0) | ||
48 | |||
49 | #endif /* _ASM_SWITCH_TO_H */ | ||
diff --git a/arch/mn10300/include/asm/system.h b/arch/mn10300/include/asm/system.h index 94b4c5e1491b..a7f40578587c 100644 --- a/arch/mn10300/include/asm/system.h +++ b/arch/mn10300/include/asm/system.h | |||
@@ -1,102 +1,5 @@ | |||
1 | /* MN10300 System definitions | 1 | /* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */ |
2 | * | 2 | #include <asm/barrier.h> |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | 3 | #include <asm/cmpxchg.h> |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | #include <asm/exec.h> |
5 | * | 5 | #include <asm/switch_to.h> |
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #ifndef _ASM_SYSTEM_H | ||
12 | #define _ASM_SYSTEM_H | ||
13 | |||
14 | #include <asm/cpu-regs.h> | ||
15 | #include <asm/intctl-regs.h> | ||
16 | |||
17 | #ifdef __KERNEL__ | ||
18 | #ifndef __ASSEMBLY__ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/irqflags.h> | ||
22 | #include <linux/atomic.h> | ||
23 | |||
24 | #if !defined(CONFIG_LAZY_SAVE_FPU) | ||
25 | struct fpu_state_struct; | ||
26 | extern asmlinkage void fpu_save(struct fpu_state_struct *); | ||
27 | #define switch_fpu(prev, next) \ | ||
28 | do { \ | ||
29 | if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) { \ | ||
30 | (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU; \ | ||
31 | (prev)->thread.uregs->epsw &= ~EPSW_FE; \ | ||
32 | fpu_save(&(prev)->thread.fpu_state); \ | ||
33 | } \ | ||
34 | } while (0) | ||
35 | #else | ||
36 | #define switch_fpu(prev, next) do {} while (0) | ||
37 | #endif | ||
38 | |||
39 | struct task_struct; | ||
40 | struct thread_struct; | ||
41 | |||
42 | extern asmlinkage | ||
43 | struct task_struct *__switch_to(struct thread_struct *prev, | ||
44 | struct thread_struct *next, | ||
45 | struct task_struct *prev_task); | ||
46 | |||
47 | /* context switching is now performed out-of-line in switch_to.S */ | ||
48 | #define switch_to(prev, next, last) \ | ||
49 | do { \ | ||
50 | switch_fpu(prev, next); \ | ||
51 | current->thread.wchan = (u_long) __builtin_return_address(0); \ | ||
52 | (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \ | ||
53 | mb(); \ | ||
54 | current->thread.wchan = 0; \ | ||
55 | } while (0) | ||
56 | |||
57 | #define arch_align_stack(x) (x) | ||
58 | |||
59 | #define nop() asm volatile ("nop") | ||
60 | |||
61 | /* | ||
62 | * Force strict CPU ordering. | ||
63 | * And yes, this is required on UP too when we're talking | ||
64 | * to devices. | ||
65 | * | ||
66 | * For now, "wmb()" doesn't actually do anything, as all | ||
67 | * Intel CPU's follow what Intel calls a *Processor Order*, | ||
68 | * in which all writes are seen in the program order even | ||
69 | * outside the CPU. | ||
70 | * | ||
71 | * I expect future Intel CPU's to have a weaker ordering, | ||
72 | * but I'd also expect them to finally get their act together | ||
73 | * and add some real memory barriers if so. | ||
74 | * | ||
75 | * Some non intel clones support out of order store. wmb() ceases to be a | ||
76 | * nop for these. | ||
77 | */ | ||
78 | |||
79 | #define mb() asm volatile ("": : :"memory") | ||
80 | #define rmb() mb() | ||
81 | #define wmb() asm volatile ("": : :"memory") | ||
82 | |||
83 | #ifdef CONFIG_SMP | ||
84 | #define smp_mb() mb() | ||
85 | #define smp_rmb() rmb() | ||
86 | #define smp_wmb() wmb() | ||
87 | #define set_mb(var, value) do { xchg(&var, value); } while (0) | ||
88 | #else /* CONFIG_SMP */ | ||
89 | #define smp_mb() barrier() | ||
90 | #define smp_rmb() barrier() | ||
91 | #define smp_wmb() barrier() | ||
92 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
93 | #endif /* CONFIG_SMP */ | ||
94 | |||
95 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | ||
96 | |||
97 | #define read_barrier_depends() do {} while (0) | ||
98 | #define smp_read_barrier_depends() do {} while (0) | ||
99 | |||
100 | #endif /* !__ASSEMBLY__ */ | ||
101 | #endif /* __KERNEL__ */ | ||
102 | #endif /* _ASM_SYSTEM_H */ | ||