aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/atomic.h30
-rw-r--r--include/asm-i386/futex.h10
-rw-r--r--include/asm-i386/local.h14
-rw-r--r--include/asm-i386/posix_types.h4
-rw-r--r--include/asm-i386/rwlock.h4
-rw-r--r--include/asm-i386/rwsem.h35
-rw-r--r--include/asm-i386/semaphore.h8
-rw-r--r--include/asm-i386/spinlock.h14
-rw-r--r--include/asm-i386/system.h23
-rw-r--r--include/asm-i386/thread_info.h7
10 files changed, 74 insertions, 75 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 4f061fa73794..51a166242522 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -46,8 +46,8 @@ static __inline__ void atomic_add(int i, atomic_t *v)
46{ 46{
47 __asm__ __volatile__( 47 __asm__ __volatile__(
48 LOCK_PREFIX "addl %1,%0" 48 LOCK_PREFIX "addl %1,%0"
49 :"=m" (v->counter) 49 :"+m" (v->counter)
50 :"ir" (i), "m" (v->counter)); 50 :"ir" (i));
51} 51}
52 52
53/** 53/**
@@ -61,8 +61,8 @@ static __inline__ void atomic_sub(int i, atomic_t *v)
61{ 61{
62 __asm__ __volatile__( 62 __asm__ __volatile__(
63 LOCK_PREFIX "subl %1,%0" 63 LOCK_PREFIX "subl %1,%0"
64 :"=m" (v->counter) 64 :"+m" (v->counter)
65 :"ir" (i), "m" (v->counter)); 65 :"ir" (i));
66} 66}
67 67
68/** 68/**
@@ -80,8 +80,8 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
80 80
81 __asm__ __volatile__( 81 __asm__ __volatile__(
82 LOCK_PREFIX "subl %2,%0; sete %1" 82 LOCK_PREFIX "subl %2,%0; sete %1"
83 :"=m" (v->counter), "=qm" (c) 83 :"+m" (v->counter), "=qm" (c)
84 :"ir" (i), "m" (v->counter) : "memory"); 84 :"ir" (i) : "memory");
85 return c; 85 return c;
86} 86}
87 87
@@ -95,8 +95,7 @@ static __inline__ void atomic_inc(atomic_t *v)
95{ 95{
96 __asm__ __volatile__( 96 __asm__ __volatile__(
97 LOCK_PREFIX "incl %0" 97 LOCK_PREFIX "incl %0"
98 :"=m" (v->counter) 98 :"+m" (v->counter));
99 :"m" (v->counter));
100} 99}
101 100
102/** 101/**
@@ -109,8 +108,7 @@ static __inline__ void atomic_dec(atomic_t *v)
109{ 108{
110 __asm__ __volatile__( 109 __asm__ __volatile__(
111 LOCK_PREFIX "decl %0" 110 LOCK_PREFIX "decl %0"
112 :"=m" (v->counter) 111 :"+m" (v->counter));
113 :"m" (v->counter));
114} 112}
115 113
116/** 114/**
@@ -127,8 +125,8 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
127 125
128 __asm__ __volatile__( 126 __asm__ __volatile__(
129 LOCK_PREFIX "decl %0; sete %1" 127 LOCK_PREFIX "decl %0; sete %1"
130 :"=m" (v->counter), "=qm" (c) 128 :"+m" (v->counter), "=qm" (c)
131 :"m" (v->counter) : "memory"); 129 : : "memory");
132 return c != 0; 130 return c != 0;
133} 131}
134 132
@@ -146,8 +144,8 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
146 144
147 __asm__ __volatile__( 145 __asm__ __volatile__(
148 LOCK_PREFIX "incl %0; sete %1" 146 LOCK_PREFIX "incl %0; sete %1"
149 :"=m" (v->counter), "=qm" (c) 147 :"+m" (v->counter), "=qm" (c)
150 :"m" (v->counter) : "memory"); 148 : : "memory");
151 return c != 0; 149 return c != 0;
152} 150}
153 151
@@ -166,8 +164,8 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
166 164
167 __asm__ __volatile__( 165 __asm__ __volatile__(
168 LOCK_PREFIX "addl %2,%0; sets %1" 166 LOCK_PREFIX "addl %2,%0; sets %1"
169 :"=m" (v->counter), "=qm" (c) 167 :"+m" (v->counter), "=qm" (c)
170 :"ir" (i), "m" (v->counter) : "memory"); 168 :"ir" (i) : "memory");
171 return c; 169 return c;
172} 170}
173 171
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h
index 7b8ceefd010f..946d97cfea23 100644
--- a/include/asm-i386/futex.h
+++ b/include/asm-i386/futex.h
@@ -20,8 +20,8 @@
20 .align 8\n\ 20 .align 8\n\
21 .long 1b,3b\n\ 21 .long 1b,3b\n\
22 .previous" \ 22 .previous" \
23 : "=r" (oldval), "=r" (ret), "=m" (*uaddr) \ 23 : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
24 : "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0)) 24 : "i" (-EFAULT), "0" (oparg), "1" (0))
25 25
26#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ 26#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
27 __asm__ __volatile ( \ 27 __asm__ __volatile ( \
@@ -38,9 +38,9 @@
38 .align 8\n\ 38 .align 8\n\
39 .long 1b,4b,2b,4b\n\ 39 .long 1b,4b,2b,4b\n\
40 .previous" \ 40 .previous" \
41 : "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \ 41 : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \
42 "=&r" (tem) \ 42 "=&r" (tem) \
43 : "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0)) 43 : "r" (oparg), "i" (-EFAULT), "1" (0))
44 44
45static inline int 45static inline int
46futex_atomic_op_inuser (int encoded_op, int __user *uaddr) 46futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
@@ -123,7 +123,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
123 " .long 1b,3b \n" 123 " .long 1b,3b \n"
124 " .previous \n" 124 " .previous \n"
125 125
126 : "=a" (oldval), "=m" (*uaddr) 126 : "=a" (oldval), "+m" (*uaddr)
127 : "i" (-EFAULT), "r" (newval), "0" (oldval) 127 : "i" (-EFAULT), "r" (newval), "0" (oldval)
128 : "memory" 128 : "memory"
129 ); 129 );
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
index 3b4998c51d08..12060e22f7e2 100644
--- a/include/asm-i386/local.h
+++ b/include/asm-i386/local.h
@@ -17,32 +17,30 @@ static __inline__ void local_inc(local_t *v)
17{ 17{
18 __asm__ __volatile__( 18 __asm__ __volatile__(
19 "incl %0" 19 "incl %0"
20 :"=m" (v->counter) 20 :"+m" (v->counter));
21 :"m" (v->counter));
22} 21}
23 22
24static __inline__ void local_dec(local_t *v) 23static __inline__ void local_dec(local_t *v)
25{ 24{
26 __asm__ __volatile__( 25 __asm__ __volatile__(
27 "decl %0" 26 "decl %0"
28 :"=m" (v->counter) 27 :"+m" (v->counter));
29 :"m" (v->counter));
30} 28}
31 29
32static __inline__ void local_add(long i, local_t *v) 30static __inline__ void local_add(long i, local_t *v)
33{ 31{
34 __asm__ __volatile__( 32 __asm__ __volatile__(
35 "addl %1,%0" 33 "addl %1,%0"
36 :"=m" (v->counter) 34 :"+m" (v->counter)
37 :"ir" (i), "m" (v->counter)); 35 :"ir" (i));
38} 36}
39 37
40static __inline__ void local_sub(long i, local_t *v) 38static __inline__ void local_sub(long i, local_t *v)
41{ 39{
42 __asm__ __volatile__( 40 __asm__ __volatile__(
43 "subl %1,%0" 41 "subl %1,%0"
44 :"=m" (v->counter) 42 :"+m" (v->counter)
45 :"ir" (i), "m" (v->counter)); 43 :"ir" (i));
46} 44}
47 45
48/* On x86, these are no better than the atomic variants. */ 46/* On x86, these are no better than the atomic variants. */
diff --git a/include/asm-i386/posix_types.h b/include/asm-i386/posix_types.h
index 4e47ed059ad6..133e31e7dfde 100644
--- a/include/asm-i386/posix_types.h
+++ b/include/asm-i386/posix_types.h
@@ -51,12 +51,12 @@ typedef struct {
51#undef __FD_SET 51#undef __FD_SET
52#define __FD_SET(fd,fdsetp) \ 52#define __FD_SET(fd,fdsetp) \
53 __asm__ __volatile__("btsl %1,%0": \ 53 __asm__ __volatile__("btsl %1,%0": \
54 "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) 54 "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
55 55
56#undef __FD_CLR 56#undef __FD_CLR
57#define __FD_CLR(fd,fdsetp) \ 57#define __FD_CLR(fd,fdsetp) \
58 __asm__ __volatile__("btrl %1,%0": \ 58 __asm__ __volatile__("btrl %1,%0": \
59 "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) 59 "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
60 60
61#undef __FD_ISSET 61#undef __FD_ISSET
62#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \ 62#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h
index 94f00195d543..96b0bef2ea56 100644
--- a/include/asm-i386/rwlock.h
+++ b/include/asm-i386/rwlock.h
@@ -37,7 +37,7 @@
37 "popl %%eax\n\t" \ 37 "popl %%eax\n\t" \
38 "1:\n", \ 38 "1:\n", \
39 "subl $1,%0\n\t", \ 39 "subl $1,%0\n\t", \
40 "=m" (*(volatile int *)rw) : : "memory") 40 "+m" (*(volatile int *)rw) : : "memory")
41 41
42#define __build_read_lock(rw, helper) do { \ 42#define __build_read_lock(rw, helper) do { \
43 if (__builtin_constant_p(rw)) \ 43 if (__builtin_constant_p(rw)) \
@@ -63,7 +63,7 @@
63 "popl %%eax\n\t" \ 63 "popl %%eax\n\t" \
64 "1:\n", \ 64 "1:\n", \
65 "subl $" RW_LOCK_BIAS_STR ",%0\n\t", \ 65 "subl $" RW_LOCK_BIAS_STR ",%0\n\t", \
66 "=m" (*(volatile int *)rw) : : "memory") 66 "+m" (*(volatile int *)rw) : : "memory")
67 67
68#define __build_write_lock(rw, helper) do { \ 68#define __build_write_lock(rw, helper) do { \
69 if (__builtin_constant_p(rw)) \ 69 if (__builtin_constant_p(rw)) \
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h
index 2f07601562e7..43113f5608eb 100644
--- a/include/asm-i386/rwsem.h
+++ b/include/asm-i386/rwsem.h
@@ -111,8 +111,8 @@ LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value
111 " jmp 1b\n" 111 " jmp 1b\n"
112 LOCK_SECTION_END 112 LOCK_SECTION_END
113 "# ending down_read\n\t" 113 "# ending down_read\n\t"
114 : "=m"(sem->count) 114 : "+m" (sem->count)
115 : "a"(sem), "m"(sem->count) 115 : "a" (sem)
116 : "memory", "cc"); 116 : "memory", "cc");
117} 117}
118 118
@@ -133,8 +133,8 @@ LOCK_PREFIX " cmpxchgl %2,%0\n\t"
133 " jnz 1b\n\t" 133 " jnz 1b\n\t"
134 "2:\n\t" 134 "2:\n\t"
135 "# ending __down_read_trylock\n\t" 135 "# ending __down_read_trylock\n\t"
136 : "+m"(sem->count), "=&a"(result), "=&r"(tmp) 136 : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
137 : "i"(RWSEM_ACTIVE_READ_BIAS) 137 : "i" (RWSEM_ACTIVE_READ_BIAS)
138 : "memory", "cc"); 138 : "memory", "cc");
139 return result>=0 ? 1 : 0; 139 return result>=0 ? 1 : 0;
140} 140}
@@ -161,8 +161,8 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the
161 " jmp 1b\n" 161 " jmp 1b\n"
162 LOCK_SECTION_END 162 LOCK_SECTION_END
163 "# ending down_write" 163 "# ending down_write"
164 : "=m"(sem->count), "=d"(tmp) 164 : "+m" (sem->count), "=d" (tmp)
165 : "a"(sem), "1"(tmp), "m"(sem->count) 165 : "a" (sem), "1" (tmp)
166 : "memory", "cc"); 166 : "memory", "cc");
167} 167}
168 168
@@ -205,8 +205,8 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old valu
205 " jmp 1b\n" 205 " jmp 1b\n"
206 LOCK_SECTION_END 206 LOCK_SECTION_END
207 "# ending __up_read\n" 207 "# ending __up_read\n"
208 : "=m"(sem->count), "=d"(tmp) 208 : "+m" (sem->count), "=d" (tmp)
209 : "a"(sem), "1"(tmp), "m"(sem->count) 209 : "a" (sem), "1" (tmp)
210 : "memory", "cc"); 210 : "memory", "cc");
211} 211}
212 212
@@ -231,8 +231,8 @@ LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 ->
231 " jmp 1b\n" 231 " jmp 1b\n"
232 LOCK_SECTION_END 232 LOCK_SECTION_END
233 "# ending __up_write\n" 233 "# ending __up_write\n"
234 : "=m"(sem->count) 234 : "+m" (sem->count)
235 : "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count) 235 : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
236 : "memory", "cc", "edx"); 236 : "memory", "cc", "edx");
237} 237}
238 238
@@ -256,8 +256,8 @@ LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001
256 " jmp 1b\n" 256 " jmp 1b\n"
257 LOCK_SECTION_END 257 LOCK_SECTION_END
258 "# ending __downgrade_write\n" 258 "# ending __downgrade_write\n"
259 : "=m"(sem->count) 259 : "+m" (sem->count)
260 : "a"(sem), "i"(-RWSEM_WAITING_BIAS), "m"(sem->count) 260 : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
261 : "memory", "cc"); 261 : "memory", "cc");
262} 262}
263 263
@@ -268,8 +268,8 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
268{ 268{
269 __asm__ __volatile__( 269 __asm__ __volatile__(
270LOCK_PREFIX "addl %1,%0" 270LOCK_PREFIX "addl %1,%0"
271 : "=m"(sem->count) 271 : "+m" (sem->count)
272 : "ir"(delta), "m"(sem->count)); 272 : "ir" (delta));
273} 273}
274 274
275/* 275/*
@@ -280,10 +280,9 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
280 int tmp = delta; 280 int tmp = delta;
281 281
282 __asm__ __volatile__( 282 __asm__ __volatile__(
283LOCK_PREFIX "xadd %0,(%2)" 283LOCK_PREFIX "xadd %0,%1"
284 : "+r"(tmp), "=m"(sem->count) 284 : "+r" (tmp), "+m" (sem->count)
285 : "r"(sem), "m"(sem->count) 285 : : "memory");
286 : "memory");
287 286
288 return tmp+delta; 287 return tmp+delta;
289} 288}
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
index f7a0f310c524..d51e800acf29 100644
--- a/include/asm-i386/semaphore.h
+++ b/include/asm-i386/semaphore.h
@@ -107,7 +107,7 @@ static inline void down(struct semaphore * sem)
107 "call __down_failed\n\t" 107 "call __down_failed\n\t"
108 "jmp 1b\n" 108 "jmp 1b\n"
109 LOCK_SECTION_END 109 LOCK_SECTION_END
110 :"=m" (sem->count) 110 :"+m" (sem->count)
111 : 111 :
112 :"memory","ax"); 112 :"memory","ax");
113} 113}
@@ -132,7 +132,7 @@ static inline int down_interruptible(struct semaphore * sem)
132 "call __down_failed_interruptible\n\t" 132 "call __down_failed_interruptible\n\t"
133 "jmp 1b\n" 133 "jmp 1b\n"
134 LOCK_SECTION_END 134 LOCK_SECTION_END
135 :"=a" (result), "=m" (sem->count) 135 :"=a" (result), "+m" (sem->count)
136 : 136 :
137 :"memory"); 137 :"memory");
138 return result; 138 return result;
@@ -157,7 +157,7 @@ static inline int down_trylock(struct semaphore * sem)
157 "call __down_failed_trylock\n\t" 157 "call __down_failed_trylock\n\t"
158 "jmp 1b\n" 158 "jmp 1b\n"
159 LOCK_SECTION_END 159 LOCK_SECTION_END
160 :"=a" (result), "=m" (sem->count) 160 :"=a" (result), "+m" (sem->count)
161 : 161 :
162 :"memory"); 162 :"memory");
163 return result; 163 return result;
@@ -182,7 +182,7 @@ static inline void up(struct semaphore * sem)
182 "jmp 1b\n" 182 "jmp 1b\n"
183 LOCK_SECTION_END 183 LOCK_SECTION_END
184 ".subsection 0\n" 184 ".subsection 0\n"
185 :"=m" (sem->count) 185 :"+m" (sem->count)
186 : 186 :
187 :"memory","ax"); 187 :"memory","ax");
188} 188}
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index 87c40f830653..d816c62a7a1d 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -65,7 +65,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
65 alternative_smp( 65 alternative_smp(
66 __raw_spin_lock_string, 66 __raw_spin_lock_string,
67 __raw_spin_lock_string_up, 67 __raw_spin_lock_string_up,
68 "=m" (lock->slock) : : "memory"); 68 "+m" (lock->slock) : : "memory");
69} 69}
70 70
71/* 71/*
@@ -79,7 +79,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
79 alternative_smp( 79 alternative_smp(
80 __raw_spin_lock_string_flags, 80 __raw_spin_lock_string_flags,
81 __raw_spin_lock_string_up, 81 __raw_spin_lock_string_up,
82 "=m" (lock->slock) : "r" (flags) : "memory"); 82 "+m" (lock->slock) : "r" (flags) : "memory");
83} 83}
84#endif 84#endif
85 85
@@ -88,7 +88,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
88 char oldval; 88 char oldval;
89 __asm__ __volatile__( 89 __asm__ __volatile__(
90 "xchgb %b0,%1" 90 "xchgb %b0,%1"
91 :"=q" (oldval), "=m" (lock->slock) 91 :"=q" (oldval), "+m" (lock->slock)
92 :"0" (0) : "memory"); 92 :"0" (0) : "memory");
93 return oldval > 0; 93 return oldval > 0;
94} 94}
@@ -104,7 +104,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
104 104
105#define __raw_spin_unlock_string \ 105#define __raw_spin_unlock_string \
106 "movb $1,%0" \ 106 "movb $1,%0" \
107 :"=m" (lock->slock) : : "memory" 107 :"+m" (lock->slock) : : "memory"
108 108
109 109
110static inline void __raw_spin_unlock(raw_spinlock_t *lock) 110static inline void __raw_spin_unlock(raw_spinlock_t *lock)
@@ -118,7 +118,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
118 118
119#define __raw_spin_unlock_string \ 119#define __raw_spin_unlock_string \
120 "xchgb %b0, %1" \ 120 "xchgb %b0, %1" \
121 :"=q" (oldval), "=m" (lock->slock) \ 121 :"=q" (oldval), "+m" (lock->slock) \
122 :"0" (oldval) : "memory" 122 :"0" (oldval) : "memory"
123 123
124static inline void __raw_spin_unlock(raw_spinlock_t *lock) 124static inline void __raw_spin_unlock(raw_spinlock_t *lock)
@@ -199,13 +199,13 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
199 199
200static inline void __raw_read_unlock(raw_rwlock_t *rw) 200static inline void __raw_read_unlock(raw_rwlock_t *rw)
201{ 201{
202 asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory"); 202 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
203} 203}
204 204
205static inline void __raw_write_unlock(raw_rwlock_t *rw) 205static inline void __raw_write_unlock(raw_rwlock_t *rw)
206{ 206{
207 asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0" 207 asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
208 : "=m" (rw->lock) : : "memory"); 208 : "+m" (rw->lock) : : "memory");
209} 209}
210 210
211#endif /* __ASM_SPINLOCK_H */ 211#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index db398d88b1d9..49928eb33f8b 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -82,10 +82,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
82#define savesegment(seg, value) \ 82#define savesegment(seg, value) \
83 asm volatile("mov %%" #seg ",%0":"=rm" (value)) 83 asm volatile("mov %%" #seg ",%0":"=rm" (value))
84 84
85/*
86 * Clear and set 'TS' bit respectively
87 */
88#define clts() __asm__ __volatile__ ("clts")
89#define read_cr0() ({ \ 85#define read_cr0() ({ \
90 unsigned int __dummy; \ 86 unsigned int __dummy; \
91 __asm__ __volatile__( \ 87 __asm__ __volatile__( \
@@ -94,7 +90,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
94 __dummy; \ 90 __dummy; \
95}) 91})
96#define write_cr0(x) \ 92#define write_cr0(x) \
97 __asm__ __volatile__("movl %0,%%cr0": :"r" (x)); 93 __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
98 94
99#define read_cr2() ({ \ 95#define read_cr2() ({ \
100 unsigned int __dummy; \ 96 unsigned int __dummy; \
@@ -104,7 +100,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
104 __dummy; \ 100 __dummy; \
105}) 101})
106#define write_cr2(x) \ 102#define write_cr2(x) \
107 __asm__ __volatile__("movl %0,%%cr2": :"r" (x)); 103 __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
108 104
109#define read_cr3() ({ \ 105#define read_cr3() ({ \
110 unsigned int __dummy; \ 106 unsigned int __dummy; \
@@ -114,7 +110,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
114 __dummy; \ 110 __dummy; \
115}) 111})
116#define write_cr3(x) \ 112#define write_cr3(x) \
117 __asm__ __volatile__("movl %0,%%cr3": :"r" (x)); 113 __asm__ __volatile__("movl %0,%%cr3": :"r" (x))
118 114
119#define read_cr4() ({ \ 115#define read_cr4() ({ \
120 unsigned int __dummy; \ 116 unsigned int __dummy; \
@@ -123,7 +119,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
123 :"=r" (__dummy)); \ 119 :"=r" (__dummy)); \
124 __dummy; \ 120 __dummy; \
125}) 121})
126
127#define read_cr4_safe() ({ \ 122#define read_cr4_safe() ({ \
128 unsigned int __dummy; \ 123 unsigned int __dummy; \
129 /* This could fault if %cr4 does not exist */ \ 124 /* This could fault if %cr4 does not exist */ \
@@ -135,15 +130,19 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
135 : "=r" (__dummy): "0" (0)); \ 130 : "=r" (__dummy): "0" (0)); \
136 __dummy; \ 131 __dummy; \
137}) 132})
138
139#define write_cr4(x) \ 133#define write_cr4(x) \
140 __asm__ __volatile__("movl %0,%%cr4": :"r" (x)); 134 __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
135
136/*
137 * Clear and set 'TS' bit respectively
138 */
139#define clts() __asm__ __volatile__ ("clts")
141#define stts() write_cr0(8 | read_cr0()) 140#define stts() write_cr0(8 | read_cr0())
142 141
143#endif /* __KERNEL__ */ 142#endif /* __KERNEL__ */
144 143
145#define wbinvd() \ 144#define wbinvd() \
146 __asm__ __volatile__ ("wbinvd": : :"memory"); 145 __asm__ __volatile__ ("wbinvd": : :"memory")
147 146
148static inline unsigned long get_limit(unsigned long segment) 147static inline unsigned long get_limit(unsigned long segment)
149{ 148{
@@ -454,8 +453,6 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
454#define set_mb(var, value) do { var = value; barrier(); } while (0) 453#define set_mb(var, value) do { var = value; barrier(); } while (0)
455#endif 454#endif
456 455
457#define set_wmb(var, value) do { var = value; wmb(); } while (0)
458
459#include <linux/irqflags.h> 456#include <linux/irqflags.h>
460 457
461/* 458/*
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 2833fa2c0dd0..54d6d7aea938 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -140,6 +140,8 @@ static inline struct thread_info *current_thread_info(void)
140#define TIF_SECCOMP 8 /* secure computing */ 140#define TIF_SECCOMP 8 /* secure computing */
141#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ 141#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
142#define TIF_MEMDIE 16 142#define TIF_MEMDIE 16
143#define TIF_DEBUG 17 /* uses debug registers */
144#define TIF_IO_BITMAP 18 /* uses I/O bitmap */
143 145
144#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 146#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
145#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 147#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -151,6 +153,8 @@ static inline struct thread_info *current_thread_info(void)
151#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 153#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
152#define _TIF_SECCOMP (1<<TIF_SECCOMP) 154#define _TIF_SECCOMP (1<<TIF_SECCOMP)
153#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 155#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
156#define _TIF_DEBUG (1<<TIF_DEBUG)
157#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP)
154 158
155/* work to do on interrupt/exception return */ 159/* work to do on interrupt/exception return */
156#define _TIF_WORK_MASK \ 160#define _TIF_WORK_MASK \
@@ -159,6 +163,9 @@ static inline struct thread_info *current_thread_info(void)
159/* work to do on any return to u-space */ 163/* work to do on any return to u-space */
160#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) 164#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
161 165
166/* flags to check in __switch_to() */
167#define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP)
168
162/* 169/*
163 * Thread-synchronous status. 170 * Thread-synchronous status.
164 * 171 *