aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorBecky Bruce <bgill@freescale.com>2005-09-22 15:20:04 -0400
committerPaul Mackerras <paulus@samba.org>2005-09-25 08:38:46 -0400
commitfeaf7cf153335fe7100b65ed6f4585c3574fe69a (patch)
treec57198f01b5f12ffe8ce90f4e1399505c1f84a02 /include
parent2bfadee32f1501faa3184d574f6a769f17236c87 (diff)
[PATCH] powerpc: merge atomic.h, memory.h
powerpc: Merge atomic.h and memory.h into powerpc Merged atomic.h into include/powerpc. Moved asm-style HMT_ defines from memory.h into ppc_asm.h, where there were already HMT_defines; moved c-style HMT_ defines to processor.h. Renamed memory.h to synch.h to better reflect its contents. Signed-off-by: Kumar Gala <kumar.gala@freescale.com> Signed-off-by: Becky Bruce <becky.bruce@freescale.com> Signed-off-by: Jon Loeliger <linuxppc@jdl.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-powerpc/atomic.h (renamed from include/asm-ppc/atomic.h)45
-rw-r--r--include/asm-powerpc/ppc_asm.h3
-rw-r--r--include/asm-powerpc/synch.h51
-rw-r--r--include/asm-ppc/io.h11
-rw-r--r--include/asm-ppc64/atomic.h197
-rw-r--r--include/asm-ppc64/bitops.h2
-rw-r--r--include/asm-ppc64/futex.h2
-rw-r--r--include/asm-ppc64/io.h2
-rw-r--r--include/asm-ppc64/memory.h61
-rw-r--r--include/asm-ppc64/processor.h8
-rw-r--r--include/asm-ppc64/system.h4
11 files changed, 88 insertions, 298 deletions
diff --git a/include/asm-ppc/atomic.h b/include/asm-powerpc/atomic.h
index eeafd505836e..ed4b345ed75d 100644
--- a/include/asm-ppc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -1,29 +1,20 @@
1#ifndef _ASM_POWERPC_ATOMIC_H_
2#define _ASM_POWERPC_ATOMIC_H_
3
1/* 4/*
2 * PowerPC atomic operations 5 * PowerPC atomic operations
3 */ 6 */
4 7
5#ifndef _ASM_PPC_ATOMIC_H_
6#define _ASM_PPC_ATOMIC_H_
7
8typedef struct { volatile int counter; } atomic_t; 8typedef struct { volatile int counter; } atomic_t;
9 9
10#ifdef __KERNEL__ 10#ifdef __KERNEL__
11#include <asm/synch.h>
11 12
12#define ATOMIC_INIT(i) { (i) } 13#define ATOMIC_INIT(i) { (i) }
13 14
14#define atomic_read(v) ((v)->counter) 15#define atomic_read(v) ((v)->counter)
15#define atomic_set(v,i) (((v)->counter) = (i)) 16#define atomic_set(v,i) (((v)->counter) = (i))
16 17
17extern void atomic_clear_mask(unsigned long mask, unsigned long *addr);
18
19#ifdef CONFIG_SMP
20#define SMP_SYNC "sync"
21#define SMP_ISYNC "\n\tisync"
22#else
23#define SMP_SYNC ""
24#define SMP_ISYNC
25#endif
26
27/* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx. 18/* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx.
28 * The old ATOMIC_SYNC_FIX covered some but not all of this. 19 * The old ATOMIC_SYNC_FIX covered some but not all of this.
29 */ 20 */
@@ -53,12 +44,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
53 int t; 44 int t;
54 45
55 __asm__ __volatile__( 46 __asm__ __volatile__(
47 EIEIO_ON_SMP
56"1: lwarx %0,0,%2 # atomic_add_return\n\ 48"1: lwarx %0,0,%2 # atomic_add_return\n\
57 add %0,%1,%0\n" 49 add %0,%1,%0\n"
58 PPC405_ERR77(0,%2) 50 PPC405_ERR77(0,%2)
59" stwcx. %0,0,%2 \n\ 51" stwcx. %0,0,%2 \n\
60 bne- 1b" 52 bne- 1b"
61 SMP_ISYNC 53 ISYNC_ON_SMP
62 : "=&r" (t) 54 : "=&r" (t)
63 : "r" (a), "r" (&v->counter) 55 : "r" (a), "r" (&v->counter)
64 : "cc", "memory"); 56 : "cc", "memory");
@@ -88,12 +80,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
88 int t; 80 int t;
89 81
90 __asm__ __volatile__( 82 __asm__ __volatile__(
83 EIEIO_ON_SMP
91"1: lwarx %0,0,%2 # atomic_sub_return\n\ 84"1: lwarx %0,0,%2 # atomic_sub_return\n\
92 subf %0,%1,%0\n" 85 subf %0,%1,%0\n"
93 PPC405_ERR77(0,%2) 86 PPC405_ERR77(0,%2)
94" stwcx. %0,0,%2 \n\ 87" stwcx. %0,0,%2 \n\
95 bne- 1b" 88 bne- 1b"
96 SMP_ISYNC 89 ISYNC_ON_SMP
97 : "=&r" (t) 90 : "=&r" (t)
98 : "r" (a), "r" (&v->counter) 91 : "r" (a), "r" (&v->counter)
99 : "cc", "memory"); 92 : "cc", "memory");
@@ -121,12 +114,13 @@ static __inline__ int atomic_inc_return(atomic_t *v)
121 int t; 114 int t;
122 115
123 __asm__ __volatile__( 116 __asm__ __volatile__(
117 EIEIO_ON_SMP
124"1: lwarx %0,0,%1 # atomic_inc_return\n\ 118"1: lwarx %0,0,%1 # atomic_inc_return\n\
125 addic %0,%0,1\n" 119 addic %0,%0,1\n"
126 PPC405_ERR77(0,%1) 120 PPC405_ERR77(0,%1)
127" stwcx. %0,0,%1 \n\ 121" stwcx. %0,0,%1 \n\
128 bne- 1b" 122 bne- 1b"
129 SMP_ISYNC 123 ISYNC_ON_SMP
130 : "=&r" (t) 124 : "=&r" (t)
131 : "r" (&v->counter) 125 : "r" (&v->counter)
132 : "cc", "memory"); 126 : "cc", "memory");
@@ -164,12 +158,13 @@ static __inline__ int atomic_dec_return(atomic_t *v)
164 int t; 158 int t;
165 159
166 __asm__ __volatile__( 160 __asm__ __volatile__(
161 EIEIO_ON_SMP
167"1: lwarx %0,0,%1 # atomic_dec_return\n\ 162"1: lwarx %0,0,%1 # atomic_dec_return\n\
168 addic %0,%0,-1\n" 163 addic %0,%0,-1\n"
169 PPC405_ERR77(0,%1) 164 PPC405_ERR77(0,%1)
170" stwcx. %0,0,%1\n\ 165" stwcx. %0,0,%1\n\
171 bne- 1b" 166 bne- 1b"
172 SMP_ISYNC 167 ISYNC_ON_SMP
173 : "=&r" (t) 168 : "=&r" (t)
174 : "r" (&v->counter) 169 : "r" (&v->counter)
175 : "cc", "memory"); 170 : "cc", "memory");
@@ -189,13 +184,14 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
189 int t; 184 int t;
190 185
191 __asm__ __volatile__( 186 __asm__ __volatile__(
187 EIEIO_ON_SMP
192"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ 188"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
193 addic. %0,%0,-1\n\ 189 addic. %0,%0,-1\n\
194 blt- 2f\n" 190 blt- 2f\n"
195 PPC405_ERR77(0,%1) 191 PPC405_ERR77(0,%1)
196" stwcx. %0,0,%1\n\ 192" stwcx. %0,0,%1\n\
197 bne- 1b" 193 bne- 1b"
198 SMP_ISYNC 194 ISYNC_ON_SMP
199 "\n\ 195 "\n\
2002:" : "=&r" (t) 1962:" : "=&r" (t)
201 : "r" (&v->counter) 197 : "r" (&v->counter)
@@ -204,11 +200,10 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
204 return t; 200 return t;
205} 201}
206 202
207#define __MB __asm__ __volatile__ (SMP_SYNC : : : "memory") 203#define smp_mb__before_atomic_dec() smp_mb()
208#define smp_mb__before_atomic_dec() __MB 204#define smp_mb__after_atomic_dec() smp_mb()
209#define smp_mb__after_atomic_dec() __MB 205#define smp_mb__before_atomic_inc() smp_mb()
210#define smp_mb__before_atomic_inc() __MB 206#define smp_mb__after_atomic_inc() smp_mb()
211#define smp_mb__after_atomic_inc() __MB
212 207
213#endif /* __KERNEL__ */ 208#endif /* __KERNEL__ */
214#endif /* _ASM_PPC_ATOMIC_H_ */ 209#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index 553035cda00e..4efa71878fa9 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -75,8 +75,11 @@
75#define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base) 75#define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base)
76 76
77/* Macros to adjust thread priority for Iseries hardware multithreading */ 77/* Macros to adjust thread priority for Iseries hardware multithreading */
78#define HMT_VERY_LOW or 31,31,31 # very low priority\n"
78#define HMT_LOW or 1,1,1 79#define HMT_LOW or 1,1,1
80#define HMT_MEDIUM_LOW or 6,6,6 # medium low priority\n"
79#define HMT_MEDIUM or 2,2,2 81#define HMT_MEDIUM or 2,2,2
82#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority\n"
80#define HMT_HIGH or 3,3,3 83#define HMT_HIGH or 3,3,3
81 84
82/* handle instructions that older assemblers may not know */ 85/* handle instructions that older assemblers may not know */
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h
new file mode 100644
index 000000000000..4660c0394a77
--- /dev/null
+++ b/include/asm-powerpc/synch.h
@@ -0,0 +1,51 @@
1#ifndef _ASM_POWERPC_SYNCH_H
2#define _ASM_POWERPC_SYNCH_H
3
4#include <linux/config.h>
5
6#ifdef __powerpc64__
7#define __SUBARCH_HAS_LWSYNC
8#endif
9
10#ifdef __SUBARCH_HAS_LWSYNC
11# define LWSYNC lwsync
12#else
13# define LWSYNC sync
14#endif
15
16
17/*
18 * Arguably the bitops and *xchg operations don't imply any memory barrier
19 * or SMP ordering, but in fact a lot of drivers expect them to imply
20 * both, since they do on x86 cpus.
21 */
22#ifdef CONFIG_SMP
23#define EIEIO_ON_SMP "eieio\n"
24#define ISYNC_ON_SMP "\n\tisync"
25#define SYNC_ON_SMP __stringify(LWSYNC) "\n"
26#else
27#define EIEIO_ON_SMP
28#define ISYNC_ON_SMP
29#define SYNC_ON_SMP
30#endif
31
32static inline void eieio(void)
33{
34 __asm__ __volatile__ ("eieio" : : : "memory");
35}
36
37static inline void isync(void)
38{
39 __asm__ __volatile__ ("isync" : : : "memory");
40}
41
42#ifdef CONFIG_SMP
43#define eieio_on_smp() eieio()
44#define isync_on_smp() isync()
45#else
46#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
47#define isync_on_smp() __asm__ __volatile__("": : :"memory")
48#endif
49
50#endif /* _ASM_POWERPC_SYNCH_H */
51
diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h
index 7eb7cf6360bd..39caf067a31b 100644
--- a/include/asm-ppc/io.h
+++ b/include/asm-ppc/io.h
@@ -8,6 +8,7 @@
8 8
9#include <asm/page.h> 9#include <asm/page.h>
10#include <asm/byteorder.h> 10#include <asm/byteorder.h>
11#include <asm/synch.h>
11#include <asm/mmu.h> 12#include <asm/mmu.h>
12 13
13#define SIO_CONFIG_RA 0x398 14#define SIO_CONFIG_RA 0x398
@@ -440,16 +441,6 @@ extern inline void * phys_to_virt(unsigned long address)
440#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 441#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
441#define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET) 442#define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET)
442 443
443/*
444 * Enforce In-order Execution of I/O:
445 * Acts as a barrier to ensure all previous I/O accesses have
446 * completed before any further ones are issued.
447 */
448extern inline void eieio(void)
449{
450 __asm__ __volatile__ ("eieio" : : : "memory");
451}
452
453/* Enforce in-order execution of data I/O. 444/* Enforce in-order execution of data I/O.
454 * No distinction between read/write on PPC; use eieio for all three. 445 * No distinction between read/write on PPC; use eieio for all three.
455 */ 446 */
diff --git a/include/asm-ppc64/atomic.h b/include/asm-ppc64/atomic.h
deleted file mode 100644
index 0e5f25e83bc0..000000000000
--- a/include/asm-ppc64/atomic.h
+++ /dev/null
@@ -1,197 +0,0 @@
1/*
2 * PowerPC64 atomic operations
3 *
4 * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#ifndef _ASM_PPC64_ATOMIC_H_
14#define _ASM_PPC64_ATOMIC_H_
15
16#include <asm/memory.h>
17
18typedef struct { volatile int counter; } atomic_t;
19
20#define ATOMIC_INIT(i) { (i) }
21
22#define atomic_read(v) ((v)->counter)
23#define atomic_set(v,i) (((v)->counter) = (i))
24
25static __inline__ void atomic_add(int a, atomic_t *v)
26{
27 int t;
28
29 __asm__ __volatile__(
30"1: lwarx %0,0,%3 # atomic_add\n\
31 add %0,%2,%0\n\
32 stwcx. %0,0,%3\n\
33 bne- 1b"
34 : "=&r" (t), "=m" (v->counter)
35 : "r" (a), "r" (&v->counter), "m" (v->counter)
36 : "cc");
37}
38
39static __inline__ int atomic_add_return(int a, atomic_t *v)
40{
41 int t;
42
43 __asm__ __volatile__(
44 EIEIO_ON_SMP
45"1: lwarx %0,0,%2 # atomic_add_return\n\
46 add %0,%1,%0\n\
47 stwcx. %0,0,%2\n\
48 bne- 1b"
49 ISYNC_ON_SMP
50 : "=&r" (t)
51 : "r" (a), "r" (&v->counter)
52 : "cc", "memory");
53
54 return t;
55}
56
57#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
58
59static __inline__ void atomic_sub(int a, atomic_t *v)
60{
61 int t;
62
63 __asm__ __volatile__(
64"1: lwarx %0,0,%3 # atomic_sub\n\
65 subf %0,%2,%0\n\
66 stwcx. %0,0,%3\n\
67 bne- 1b"
68 : "=&r" (t), "=m" (v->counter)
69 : "r" (a), "r" (&v->counter), "m" (v->counter)
70 : "cc");
71}
72
73static __inline__ int atomic_sub_return(int a, atomic_t *v)
74{
75 int t;
76
77 __asm__ __volatile__(
78 EIEIO_ON_SMP
79"1: lwarx %0,0,%2 # atomic_sub_return\n\
80 subf %0,%1,%0\n\
81 stwcx. %0,0,%2\n\
82 bne- 1b"
83 ISYNC_ON_SMP
84 : "=&r" (t)
85 : "r" (a), "r" (&v->counter)
86 : "cc", "memory");
87
88 return t;
89}
90
91static __inline__ void atomic_inc(atomic_t *v)
92{
93 int t;
94
95 __asm__ __volatile__(
96"1: lwarx %0,0,%2 # atomic_inc\n\
97 addic %0,%0,1\n\
98 stwcx. %0,0,%2\n\
99 bne- 1b"
100 : "=&r" (t), "=m" (v->counter)
101 : "r" (&v->counter), "m" (v->counter)
102 : "cc");
103}
104
105static __inline__ int atomic_inc_return(atomic_t *v)
106{
107 int t;
108
109 __asm__ __volatile__(
110 EIEIO_ON_SMP
111"1: lwarx %0,0,%1 # atomic_inc_return\n\
112 addic %0,%0,1\n\
113 stwcx. %0,0,%1\n\
114 bne- 1b"
115 ISYNC_ON_SMP
116 : "=&r" (t)
117 : "r" (&v->counter)
118 : "cc", "memory");
119
120 return t;
121}
122
123/*
124 * atomic_inc_and_test - increment and test
125 * @v: pointer of type atomic_t
126 *
127 * Atomically increments @v by 1
128 * and returns true if the result is zero, or false for all
129 * other cases.
130 */
131#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
132
133static __inline__ void atomic_dec(atomic_t *v)
134{
135 int t;
136
137 __asm__ __volatile__(
138"1: lwarx %0,0,%2 # atomic_dec\n\
139 addic %0,%0,-1\n\
140 stwcx. %0,0,%2\n\
141 bne- 1b"
142 : "=&r" (t), "=m" (v->counter)
143 : "r" (&v->counter), "m" (v->counter)
144 : "cc");
145}
146
147static __inline__ int atomic_dec_return(atomic_t *v)
148{
149 int t;
150
151 __asm__ __volatile__(
152 EIEIO_ON_SMP
153"1: lwarx %0,0,%1 # atomic_dec_return\n\
154 addic %0,%0,-1\n\
155 stwcx. %0,0,%1\n\
156 bne- 1b"
157 ISYNC_ON_SMP
158 : "=&r" (t)
159 : "r" (&v->counter)
160 : "cc", "memory");
161
162 return t;
163}
164
165#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
166#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
167
168/*
169 * Atomically test *v and decrement if it is greater than 0.
170 * The function returns the old value of *v minus 1.
171 */
172static __inline__ int atomic_dec_if_positive(atomic_t *v)
173{
174 int t;
175
176 __asm__ __volatile__(
177 EIEIO_ON_SMP
178"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
179 addic. %0,%0,-1\n\
180 blt- 2f\n\
181 stwcx. %0,0,%1\n\
182 bne- 1b"
183 ISYNC_ON_SMP
184 "\n\
1852:" : "=&r" (t)
186 : "r" (&v->counter)
187 : "cc", "memory");
188
189 return t;
190}
191
192#define smp_mb__before_atomic_dec() smp_mb()
193#define smp_mb__after_atomic_dec() smp_mb()
194#define smp_mb__before_atomic_inc() smp_mb()
195#define smp_mb__after_atomic_inc() smp_mb()
196
197#endif /* _ASM_PPC64_ATOMIC_H_ */
diff --git a/include/asm-ppc64/bitops.h b/include/asm-ppc64/bitops.h
index a0f831224f96..dbfa42ef4a99 100644
--- a/include/asm-ppc64/bitops.h
+++ b/include/asm-ppc64/bitops.h
@@ -42,7 +42,7 @@
42 42
43#ifdef __KERNEL__ 43#ifdef __KERNEL__
44 44
45#include <asm/memory.h> 45#include <asm/synch.h>
46 46
47/* 47/*
48 * clear_bit doesn't imply a memory barrier 48 * clear_bit doesn't imply a memory barrier
diff --git a/include/asm-ppc64/futex.h b/include/asm-ppc64/futex.h
index cb2640b3a408..266b460de44e 100644
--- a/include/asm-ppc64/futex.h
+++ b/include/asm-ppc64/futex.h
@@ -5,7 +5,7 @@
5 5
6#include <linux/futex.h> 6#include <linux/futex.h>
7#include <asm/errno.h> 7#include <asm/errno.h>
8#include <asm/memory.h> 8#include <asm/synch.h>
9#include <asm/uaccess.h> 9#include <asm/uaccess.h>
10 10
11#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 11#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
diff --git a/include/asm-ppc64/io.h b/include/asm-ppc64/io.h
index 59c958aea4db..bd7c9532d77b 100644
--- a/include/asm-ppc64/io.h
+++ b/include/asm-ppc64/io.h
@@ -15,7 +15,7 @@
15#ifdef CONFIG_PPC_ISERIES 15#ifdef CONFIG_PPC_ISERIES
16#include <asm/iSeries/iSeries_io.h> 16#include <asm/iSeries/iSeries_io.h>
17#endif 17#endif
18#include <asm/memory.h> 18#include <asm/synch.h>
19#include <asm/delay.h> 19#include <asm/delay.h>
20 20
21#include <asm-generic/iomap.h> 21#include <asm-generic/iomap.h>
diff --git a/include/asm-ppc64/memory.h b/include/asm-ppc64/memory.h
deleted file mode 100644
index af53ffb55726..000000000000
--- a/include/asm-ppc64/memory.h
+++ /dev/null
@@ -1,61 +0,0 @@
1#ifndef _ASM_PPC64_MEMORY_H_
2#define _ASM_PPC64_MEMORY_H_
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <linux/config.h>
12
13/*
14 * Arguably the bitops and *xchg operations don't imply any memory barrier
15 * or SMP ordering, but in fact a lot of drivers expect them to imply
16 * both, since they do on x86 cpus.
17 */
18#ifdef CONFIG_SMP
19#define EIEIO_ON_SMP "eieio\n"
20#define ISYNC_ON_SMP "\n\tisync"
21#define SYNC_ON_SMP "lwsync\n\t"
22#else
23#define EIEIO_ON_SMP
24#define ISYNC_ON_SMP
25#define SYNC_ON_SMP
26#endif
27
28static inline void eieio(void)
29{
30 __asm__ __volatile__ ("eieio" : : : "memory");
31}
32
33static inline void isync(void)
34{
35 __asm__ __volatile__ ("isync" : : : "memory");
36}
37
38#ifdef CONFIG_SMP
39#define eieio_on_smp() eieio()
40#define isync_on_smp() isync()
41#else
42#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
43#define isync_on_smp() __asm__ __volatile__("": : :"memory")
44#endif
45
46/* Macros for adjusting thread priority (hardware multi-threading) */
47#define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
48#define HMT_low() asm volatile("or 1,1,1 # low priority")
49#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
50#define HMT_medium() asm volatile("or 2,2,2 # medium priority")
51#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
52#define HMT_high() asm volatile("or 3,3,3 # high priority")
53
54#define HMT_VERY_LOW "\tor 31,31,31 # very low priority\n"
55#define HMT_LOW "\tor 1,1,1 # low priority\n"
56#define HMT_MEDIUM_LOW "\tor 6,6,6 # medium low priority\n"
57#define HMT_MEDIUM "\tor 2,2,2 # medium priority\n"
58#define HMT_MEDIUM_HIGH "\tor 5,5,5 # medium high priority\n"
59#define HMT_HIGH "\tor 3,3,3 # high priority\n"
60
61#endif
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h
index 4146189006e3..e5fc18531ec1 100644
--- a/include/asm-ppc64/processor.h
+++ b/include/asm-ppc64/processor.h
@@ -368,6 +368,14 @@ GLUE(.,name):
368#define mfasr() ({unsigned long rval; \ 368#define mfasr() ({unsigned long rval; \
369 asm volatile("mfasr %0" : "=r" (rval)); rval;}) 369 asm volatile("mfasr %0" : "=r" (rval)); rval;})
370 370
371/* Macros for adjusting thread priority (hardware multi-threading) */
372#define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
373#define HMT_low() asm volatile("or 1,1,1 # low priority")
374#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
375#define HMT_medium() asm volatile("or 2,2,2 # medium priority")
376#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
377#define HMT_high() asm volatile("or 3,3,3 # high priority")
378
371static inline void set_tb(unsigned int upper, unsigned int lower) 379static inline void set_tb(unsigned int upper, unsigned int lower)
372{ 380{
373 mttbl(0); 381 mttbl(0);
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h
index 375015c62f20..1fbdc9f0590c 100644
--- a/include/asm-ppc64/system.h
+++ b/include/asm-ppc64/system.h
@@ -13,7 +13,7 @@
13#include <asm/page.h> 13#include <asm/page.h>
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <asm/hw_irq.h> 15#include <asm/hw_irq.h>
16#include <asm/memory.h> 16#include <asm/synch.h>
17 17
18/* 18/*
19 * Memory barrier. 19 * Memory barrier.
@@ -48,7 +48,7 @@
48#ifdef CONFIG_SMP 48#ifdef CONFIG_SMP
49#define smp_mb() mb() 49#define smp_mb() mb()
50#define smp_rmb() rmb() 50#define smp_rmb() rmb()
51#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") 51#define smp_wmb() eieio()
52#define smp_read_barrier_depends() read_barrier_depends() 52#define smp_read_barrier_depends() read_barrier_depends()
53#else 53#else
54#define smp_mb() __asm__ __volatile__("": : :"memory") 54#define smp_mb() __asm__ __volatile__("": : :"memory")