aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/Kbuild1
-rw-r--r--include/asm-i386/dma-mapping.h9
-rw-r--r--include/asm-i386/fixmap.h7
-rw-r--r--include/asm-i386/mmzone.h6
-rw-r--r--include/asm-i386/pgtable-2level.h3
-rw-r--r--include/asm-i386/pgtable-3level.h4
-rw-r--r--include/asm-i386/pgtable.h45
-rw-r--r--include/asm-i386/processor.h60
-rw-r--r--include/asm-i386/ptrace-abi.h39
-rw-r--r--include/asm-i386/ptrace.h35
-rw-r--r--include/asm-i386/sync_bitops.h156
-rw-r--r--include/asm-i386/system.h36
12 files changed, 302 insertions, 99 deletions
diff --git a/include/asm-i386/Kbuild b/include/asm-i386/Kbuild
index b75a348d0c1c..147e4ac1ebf0 100644
--- a/include/asm-i386/Kbuild
+++ b/include/asm-i386/Kbuild
@@ -3,6 +3,7 @@ include include/asm-generic/Kbuild.asm
3header-y += boot.h 3header-y += boot.h
4header-y += debugreg.h 4header-y += debugreg.h
5header-y += ldt.h 5header-y += ldt.h
6header-y += ptrace-abi.h
6header-y += ucontext.h 7header-y += ucontext.h
7 8
8unifdef-y += mtrr.h 9unifdef-y += mtrr.h
diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h
index 9cf20cacf76e..576ae01d71c8 100644
--- a/include/asm-i386/dma-mapping.h
+++ b/include/asm-i386/dma-mapping.h
@@ -21,8 +21,7 @@ static inline dma_addr_t
21dma_map_single(struct device *dev, void *ptr, size_t size, 21dma_map_single(struct device *dev, void *ptr, size_t size,
22 enum dma_data_direction direction) 22 enum dma_data_direction direction)
23{ 23{
24 if (direction == DMA_NONE) 24 BUG_ON(direction == DMA_NONE);
25 BUG();
26 WARN_ON(size == 0); 25 WARN_ON(size == 0);
27 flush_write_buffers(); 26 flush_write_buffers();
28 return virt_to_phys(ptr); 27 return virt_to_phys(ptr);
@@ -32,8 +31,7 @@ static inline void
32dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 31dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
33 enum dma_data_direction direction) 32 enum dma_data_direction direction)
34{ 33{
35 if (direction == DMA_NONE) 34 BUG_ON(direction == DMA_NONE);
36 BUG();
37} 35}
38 36
39static inline int 37static inline int
@@ -42,8 +40,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
42{ 40{
43 int i; 41 int i;
44 42
45 if (direction == DMA_NONE) 43 BUG_ON(direction == DMA_NONE);
46 BUG();
47 WARN_ON(nents == 0 || sg[0].length == 0); 44 WARN_ON(nents == 0 || sg[0].length == 0);
48 45
49 for (i = 0; i < nents; i++ ) { 46 for (i = 0; i < nents; i++ ) {
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h
index a48cc3f7ccc6..02428cb36621 100644
--- a/include/asm-i386/fixmap.h
+++ b/include/asm-i386/fixmap.h
@@ -19,7 +19,11 @@
19 * Leave one empty page between vmalloc'ed areas and 19 * Leave one empty page between vmalloc'ed areas and
20 * the start of the fixmap. 20 * the start of the fixmap.
21 */ 21 */
22#define __FIXADDR_TOP 0xfffff000 22#ifndef CONFIG_COMPAT_VDSO
23extern unsigned long __FIXADDR_TOP;
24#else
25#define __FIXADDR_TOP 0xfffff000
26#endif
23 27
24#ifndef __ASSEMBLY__ 28#ifndef __ASSEMBLY__
25#include <linux/kernel.h> 29#include <linux/kernel.h>
@@ -93,6 +97,7 @@ enum fixed_addresses {
93 97
94extern void __set_fixmap (enum fixed_addresses idx, 98extern void __set_fixmap (enum fixed_addresses idx,
95 unsigned long phys, pgprot_t flags); 99 unsigned long phys, pgprot_t flags);
100extern void reserve_top_address(unsigned long reserve);
96 101
97#define set_fixmap(idx, phys) \ 102#define set_fixmap(idx, phys) \
98 __set_fixmap(idx, phys, PAGE_KERNEL) 103 __set_fixmap(idx, phys, PAGE_KERNEL)
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index 22cb07cc8f32..61b073322006 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -38,10 +38,16 @@ static inline void get_memcfg_numa(void)
38} 38}
39 39
40extern int early_pfn_to_nid(unsigned long pfn); 40extern int early_pfn_to_nid(unsigned long pfn);
41extern void numa_kva_reserve(void);
41 42
42#else /* !CONFIG_NUMA */ 43#else /* !CONFIG_NUMA */
44
43#define get_memcfg_numa get_memcfg_numa_flat 45#define get_memcfg_numa get_memcfg_numa_flat
44#define get_zholes_size(n) (0) 46#define get_zholes_size(n) (0)
47
48static inline void numa_kva_reserve(void)
49{
50}
45#endif /* CONFIG_NUMA */ 51#endif /* CONFIG_NUMA */
46 52
47#ifdef CONFIG_DISCONTIGMEM 53#ifdef CONFIG_DISCONTIGMEM
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index 2756d4b04c27..201c86a6711e 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -21,8 +21,9 @@
21#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) 21#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
22#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) 22#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
23 23
24#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
24#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0)) 25#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0))
25#define pte_same(a, b) ((a).pte_low == (b).pte_low) 26
26#define pte_page(x) pfn_to_page(pte_pfn(x)) 27#define pte_page(x) pfn_to_page(pte_pfn(x))
27#define pte_none(x) (!(x).pte_low) 28#define pte_none(x) (!(x).pte_low)
28#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) 29#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index dccb1b3337ad..0d899173232e 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -77,7 +77,7 @@ static inline void pud_clear (pud_t * pud) { }
77#define pud_page(pud) \ 77#define pud_page(pud) \
78((struct page *) __va(pud_val(pud) & PAGE_MASK)) 78((struct page *) __va(pud_val(pud) & PAGE_MASK))
79 79
80#define pud_page_kernel(pud) \ 80#define pud_page_vaddr(pud) \
81((unsigned long) __va(pud_val(pud) & PAGE_MASK)) 81((unsigned long) __va(pud_val(pud) & PAGE_MASK))
82 82
83 83
@@ -105,6 +105,7 @@ static inline void pmd_clear(pmd_t *pmd)
105 *(tmp + 1) = 0; 105 *(tmp + 1) = 0;
106} 106}
107 107
108#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
108static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 109static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
109{ 110{
110 pte_t res; 111 pte_t res;
@@ -117,6 +118,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
117 return res; 118 return res;
118} 119}
119 120
121#define __HAVE_ARCH_PTE_SAME
120static inline int pte_same(pte_t a, pte_t b) 122static inline int pte_same(pte_t a, pte_t b)
121{ 123{
122 return a.pte_low == b.pte_low && a.pte_high == b.pte_high; 124 return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 64140f2f1b95..541b3e234335 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -246,6 +246,23 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
246# include <asm/pgtable-2level.h> 246# include <asm/pgtable-2level.h>
247#endif 247#endif
248 248
249/*
250 * We only update the dirty/accessed state if we set
251 * the dirty bit by hand in the kernel, since the hardware
252 * will do the accessed bit for us, and we don't want to
253 * race with other CPU's that might be updating the dirty
254 * bit at the same time.
255 */
256#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
257#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
258do { \
259 if (dirty) { \
260 (ptep)->pte_low = (entry).pte_low; \
261 flush_tlb_page(vma, address); \
262 } \
263} while (0)
264
265#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
249static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 266static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
250{ 267{
251 if (!pte_dirty(*ptep)) 268 if (!pte_dirty(*ptep))
@@ -253,6 +270,7 @@ static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned
253 return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); 270 return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low);
254} 271}
255 272
273#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
256static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 274static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
257{ 275{
258 if (!pte_young(*ptep)) 276 if (!pte_young(*ptep))
@@ -260,6 +278,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
260 return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); 278 return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low);
261} 279}
262 280
281#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
263static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) 282static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
264{ 283{
265 pte_t pte; 284 pte_t pte;
@@ -272,6 +291,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
272 return pte; 291 return pte;
273} 292}
274 293
294#define __HAVE_ARCH_PTEP_SET_WRPROTECT
275static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 295static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
276{ 296{
277 clear_bit(_PAGE_BIT_RW, &ptep->pte_low); 297 clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
@@ -364,11 +384,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
364#define pte_index(address) \ 384#define pte_index(address) \
365 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 385 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
366#define pte_offset_kernel(dir, address) \ 386#define pte_offset_kernel(dir, address) \
367 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address)) 387 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
368 388
369#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) 389#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
370 390
371#define pmd_page_kernel(pmd) \ 391#define pmd_page_vaddr(pmd) \
372 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 392 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
373 393
374/* 394/*
@@ -409,23 +429,8 @@ extern pte_t *lookup_address(unsigned long address);
409/* 429/*
410 * The i386 doesn't have any external MMU info: the kernel page 430 * The i386 doesn't have any external MMU info: the kernel page
411 * tables contain all the necessary information. 431 * tables contain all the necessary information.
412 *
413 * Also, we only update the dirty/accessed state if we set
414 * the dirty bit by hand in the kernel, since the hardware
415 * will do the accessed bit for us, and we don't want to
416 * race with other CPU's that might be updating the dirty
417 * bit at the same time.
418 */ 432 */
419#define update_mmu_cache(vma,address,pte) do { } while (0) 433#define update_mmu_cache(vma,address,pte) do { } while (0)
420#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
421#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
422 do { \
423 if (__dirty) { \
424 (__ptep)->pte_low = (__entry).pte_low; \
425 flush_tlb_page(__vma, __address); \
426 } \
427 } while (0)
428
429#endif /* !__ASSEMBLY__ */ 434#endif /* !__ASSEMBLY__ */
430 435
431#ifdef CONFIG_FLATMEM 436#ifdef CONFIG_FLATMEM
@@ -439,12 +444,6 @@ extern pte_t *lookup_address(unsigned long address);
439#define GET_IOSPACE(pfn) 0 444#define GET_IOSPACE(pfn) 0
440#define GET_PFN(pfn) (pfn) 445#define GET_PFN(pfn) (pfn)
441 446
442#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
443#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
444#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
445#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
446#define __HAVE_ARCH_PTEP_SET_WRPROTECT
447#define __HAVE_ARCH_PTE_SAME
448#include <asm-generic/pgtable.h> 447#include <asm-generic/pgtable.h>
449 448
450#endif /* _I386_PGTABLE_H */ 449#endif /* _I386_PGTABLE_H */
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index b32346d62e10..2277127696d2 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -143,6 +143,18 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {}
143#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ 143#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
144#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ 144#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
145 145
146static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
147 unsigned int *ecx, unsigned int *edx)
148{
149 /* ecx is often an input as well as an output. */
150 __asm__("cpuid"
151 : "=a" (*eax),
152 "=b" (*ebx),
153 "=c" (*ecx),
154 "=d" (*edx)
155 : "0" (*eax), "2" (*ecx));
156}
157
146/* 158/*
147 * Generic CPUID function 159 * Generic CPUID function
148 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx 160 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
@@ -150,24 +162,18 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {}
150 */ 162 */
151static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) 163static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
152{ 164{
153 __asm__("cpuid" 165 *eax = op;
154 : "=a" (*eax), 166 *ecx = 0;
155 "=b" (*ebx), 167 __cpuid(eax, ebx, ecx, edx);
156 "=c" (*ecx),
157 "=d" (*edx)
158 : "0" (op), "c"(0));
159} 168}
160 169
161/* Some CPUID calls want 'count' to be placed in ecx */ 170/* Some CPUID calls want 'count' to be placed in ecx */
162static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, 171static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
163 int *edx) 172 int *edx)
164{ 173{
165 __asm__("cpuid" 174 *eax = op;
166 : "=a" (*eax), 175 *ecx = count;
167 "=b" (*ebx), 176 __cpuid(eax, ebx, ecx, edx);
168 "=c" (*ecx),
169 "=d" (*edx)
170 : "0" (op), "c" (count));
171} 177}
172 178
173/* 179/*
@@ -175,42 +181,30 @@ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
175 */ 181 */
176static inline unsigned int cpuid_eax(unsigned int op) 182static inline unsigned int cpuid_eax(unsigned int op)
177{ 183{
178 unsigned int eax; 184 unsigned int eax, ebx, ecx, edx;
179 185
180 __asm__("cpuid" 186 cpuid(op, &eax, &ebx, &ecx, &edx);
181 : "=a" (eax)
182 : "0" (op)
183 : "bx", "cx", "dx");
184 return eax; 187 return eax;
185} 188}
186static inline unsigned int cpuid_ebx(unsigned int op) 189static inline unsigned int cpuid_ebx(unsigned int op)
187{ 190{
188 unsigned int eax, ebx; 191 unsigned int eax, ebx, ecx, edx;
189 192
190 __asm__("cpuid" 193 cpuid(op, &eax, &ebx, &ecx, &edx);
191 : "=a" (eax), "=b" (ebx)
192 : "0" (op)
193 : "cx", "dx" );
194 return ebx; 194 return ebx;
195} 195}
196static inline unsigned int cpuid_ecx(unsigned int op) 196static inline unsigned int cpuid_ecx(unsigned int op)
197{ 197{
198 unsigned int eax, ecx; 198 unsigned int eax, ebx, ecx, edx;
199 199
200 __asm__("cpuid" 200 cpuid(op, &eax, &ebx, &ecx, &edx);
201 : "=a" (eax), "=c" (ecx)
202 : "0" (op)
203 : "bx", "dx" );
204 return ecx; 201 return ecx;
205} 202}
206static inline unsigned int cpuid_edx(unsigned int op) 203static inline unsigned int cpuid_edx(unsigned int op)
207{ 204{
208 unsigned int eax, edx; 205 unsigned int eax, ebx, ecx, edx;
209 206
210 __asm__("cpuid" 207 cpuid(op, &eax, &ebx, &ecx, &edx);
211 : "=a" (eax), "=d" (edx)
212 : "0" (op)
213 : "bx", "cx");
214 return edx; 208 return edx;
215} 209}
216 210
diff --git a/include/asm-i386/ptrace-abi.h b/include/asm-i386/ptrace-abi.h
new file mode 100644
index 000000000000..a44901817a26
--- /dev/null
+++ b/include/asm-i386/ptrace-abi.h
@@ -0,0 +1,39 @@
1#ifndef I386_PTRACE_ABI_H
2#define I386_PTRACE_ABI_H
3
4#define EBX 0
5#define ECX 1
6#define EDX 2
7#define ESI 3
8#define EDI 4
9#define EBP 5
10#define EAX 6
11#define DS 7
12#define ES 8
13#define FS 9
14#define GS 10
15#define ORIG_EAX 11
16#define EIP 12
17#define CS 13
18#define EFL 14
19#define UESP 15
20#define SS 16
21#define FRAME_SIZE 17
22
23/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
24#define PTRACE_GETREGS 12
25#define PTRACE_SETREGS 13
26#define PTRACE_GETFPREGS 14
27#define PTRACE_SETFPREGS 15
28#define PTRACE_GETFPXREGS 18
29#define PTRACE_SETFPXREGS 19
30
31#define PTRACE_OLDSETOPTIONS 21
32
33#define PTRACE_GET_THREAD_AREA 25
34#define PTRACE_SET_THREAD_AREA 26
35
36#define PTRACE_SYSEMU 31
37#define PTRACE_SYSEMU_SINGLESTEP 32
38
39#endif
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h
index 21bb91679c82..a4a0e5207db5 100644
--- a/include/asm-i386/ptrace.h
+++ b/include/asm-i386/ptrace.h
@@ -1,24 +1,7 @@
1#ifndef _I386_PTRACE_H 1#ifndef _I386_PTRACE_H
2#define _I386_PTRACE_H 2#define _I386_PTRACE_H
3 3
4#define EBX 0 4#include <asm/ptrace-abi.h>
5#define ECX 1
6#define EDX 2
7#define ESI 3
8#define EDI 4
9#define EBP 5
10#define EAX 6
11#define DS 7
12#define ES 8
13#define FS 9
14#define GS 10
15#define ORIG_EAX 11
16#define EIP 12
17#define CS 13
18#define EFL 14
19#define UESP 15
20#define SS 16
21#define FRAME_SIZE 17
22 5
23/* this struct defines the way the registers are stored on the 6/* this struct defines the way the registers are stored on the
24 stack during a system call. */ 7 stack during a system call. */
@@ -41,22 +24,6 @@ struct pt_regs {
41 int xss; 24 int xss;
42}; 25};
43 26
44/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
45#define PTRACE_GETREGS 12
46#define PTRACE_SETREGS 13
47#define PTRACE_GETFPREGS 14
48#define PTRACE_SETFPREGS 15
49#define PTRACE_GETFPXREGS 18
50#define PTRACE_SETFPXREGS 19
51
52#define PTRACE_OLDSETOPTIONS 21
53
54#define PTRACE_GET_THREAD_AREA 25
55#define PTRACE_SET_THREAD_AREA 26
56
57#define PTRACE_SYSEMU 31
58#define PTRACE_SYSEMU_SINGLESTEP 32
59
60#ifdef __KERNEL__ 27#ifdef __KERNEL__
61 28
62#include <asm/vm86.h> 29#include <asm/vm86.h>
diff --git a/include/asm-i386/sync_bitops.h b/include/asm-i386/sync_bitops.h
new file mode 100644
index 000000000000..c94d51c993ee
--- /dev/null
+++ b/include/asm-i386/sync_bitops.h
@@ -0,0 +1,156 @@
1#ifndef _I386_SYNC_BITOPS_H
2#define _I386_SYNC_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8/*
9 * These have to be done with inline assembly: that way the bit-setting
10 * is guaranteed to be atomic. All bit operations return 0 if the bit
11 * was cleared before the operation and != 0 if it was not.
12 *
13 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
14 */
15
16#define ADDR (*(volatile long *) addr)
17
18/**
19 * sync_set_bit - Atomically set a bit in memory
20 * @nr: the bit to set
21 * @addr: the address to start counting from
22 *
23 * This function is atomic and may not be reordered. See __set_bit()
24 * if you do not require the atomic guarantees.
25 *
26 * Note: there are no guarantees that this function will not be reordered
27 * on non x86 architectures, so if you are writting portable code,
28 * make sure not to rely on its reordering guarantees.
29 *
30 * Note that @nr may be almost arbitrarily large; this function is not
31 * restricted to acting on a single-word quantity.
32 */
33static inline void sync_set_bit(int nr, volatile unsigned long * addr)
34{
35 __asm__ __volatile__("lock; btsl %1,%0"
36 :"+m" (ADDR)
37 :"Ir" (nr)
38 : "memory");
39}
40
41/**
42 * sync_clear_bit - Clears a bit in memory
43 * @nr: Bit to clear
44 * @addr: Address to start counting from
45 *
46 * sync_clear_bit() is atomic and may not be reordered. However, it does
47 * not contain a memory barrier, so if it is used for locking purposes,
48 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
49 * in order to ensure changes are visible on other processors.
50 */
51static inline void sync_clear_bit(int nr, volatile unsigned long * addr)
52{
53 __asm__ __volatile__("lock; btrl %1,%0"
54 :"+m" (ADDR)
55 :"Ir" (nr)
56 : "memory");
57}
58
59/**
60 * sync_change_bit - Toggle a bit in memory
61 * @nr: Bit to change
62 * @addr: Address to start counting from
63 *
64 * change_bit() is atomic and may not be reordered. It may be
65 * reordered on other architectures than x86.
66 * Note that @nr may be almost arbitrarily large; this function is not
67 * restricted to acting on a single-word quantity.
68 */
69static inline void sync_change_bit(int nr, volatile unsigned long * addr)
70{
71 __asm__ __volatile__("lock; btcl %1,%0"
72 :"+m" (ADDR)
73 :"Ir" (nr)
74 : "memory");
75}
76
77/**
78 * sync_test_and_set_bit - Set a bit and return its old value
79 * @nr: Bit to set
80 * @addr: Address to count from
81 *
82 * This operation is atomic and cannot be reordered.
83 * It may be reordered on other architectures than x86.
84 * It also implies a memory barrier.
85 */
86static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr)
87{
88 int oldbit;
89
90 __asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0"
91 :"=r" (oldbit),"+m" (ADDR)
92 :"Ir" (nr) : "memory");
93 return oldbit;
94}
95
96/**
97 * sync_test_and_clear_bit - Clear a bit and return its old value
98 * @nr: Bit to clear
99 * @addr: Address to count from
100 *
101 * This operation is atomic and cannot be reordered.
102 * It can be reorderdered on other architectures other than x86.
103 * It also implies a memory barrier.
104 */
105static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr)
106{
107 int oldbit;
108
109 __asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0"
110 :"=r" (oldbit),"+m" (ADDR)
111 :"Ir" (nr) : "memory");
112 return oldbit;
113}
114
115/**
116 * sync_test_and_change_bit - Change a bit and return its old value
117 * @nr: Bit to change
118 * @addr: Address to count from
119 *
120 * This operation is atomic and cannot be reordered.
121 * It also implies a memory barrier.
122 */
123static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr)
124{
125 int oldbit;
126
127 __asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0"
128 :"=r" (oldbit),"+m" (ADDR)
129 :"Ir" (nr) : "memory");
130 return oldbit;
131}
132
133static __always_inline int sync_const_test_bit(int nr, const volatile unsigned long *addr)
134{
135 return ((1UL << (nr & 31)) &
136 (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
137}
138
139static inline int sync_var_test_bit(int nr, const volatile unsigned long * addr)
140{
141 int oldbit;
142
143 __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
144 :"=r" (oldbit)
145 :"m" (ADDR),"Ir" (nr));
146 return oldbit;
147}
148
149#define sync_test_bit(nr,addr) \
150 (__builtin_constant_p(nr) ? \
151 sync_constant_test_bit((nr),(addr)) : \
152 sync_var_test_bit((nr),(addr)))
153
154#undef ADDR
155
156#endif /* _I386_SYNC_BITOPS_H */
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 098bcee94e38..a6dabbcd6e6a 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -267,6 +267,9 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
267#define cmpxchg(ptr,o,n)\ 267#define cmpxchg(ptr,o,n)\
268 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ 268 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
269 (unsigned long)(n),sizeof(*(ptr)))) 269 (unsigned long)(n),sizeof(*(ptr))))
270#define sync_cmpxchg(ptr,o,n)\
271 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
272 (unsigned long)(n),sizeof(*(ptr))))
270#endif 273#endif
271 274
272static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, 275static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
@@ -296,6 +299,39 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
296 return old; 299 return old;
297} 300}
298 301
302/*
303 * Always use locked operations when touching memory shared with a
304 * hypervisor, since the system may be SMP even if the guest kernel
305 * isn't.
306 */
307static inline unsigned long __sync_cmpxchg(volatile void *ptr,
308 unsigned long old,
309 unsigned long new, int size)
310{
311 unsigned long prev;
312 switch (size) {
313 case 1:
314 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
315 : "=a"(prev)
316 : "q"(new), "m"(*__xg(ptr)), "0"(old)
317 : "memory");
318 return prev;
319 case 2:
320 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
321 : "=a"(prev)
322 : "r"(new), "m"(*__xg(ptr)), "0"(old)
323 : "memory");
324 return prev;
325 case 4:
326 __asm__ __volatile__("lock; cmpxchgl %1,%2"
327 : "=a"(prev)
328 : "r"(new), "m"(*__xg(ptr)), "0"(old)
329 : "memory");
330 return prev;
331 }
332 return old;
333}
334
299#ifndef CONFIG_X86_CMPXCHG 335#ifndef CONFIG_X86_CMPXCHG
300/* 336/*
301 * Building a kernel capable running on 80386. It may be necessary to 337 * Building a kernel capable running on 80386. It may be necessary to