diff options
Diffstat (limited to 'include/asm-i386')
-rw-r--r-- | include/asm-i386/atomic.h | 5 | ||||
-rw-r--r-- | include/asm-i386/cpufeature.h | 1 | ||||
-rw-r--r-- | include/asm-i386/i387.h | 30 | ||||
-rw-r--r-- | include/asm-i386/pgtable-2level.h | 3 | ||||
-rw-r--r-- | include/asm-i386/pgtable-3level.h | 20 | ||||
-rw-r--r-- | include/asm-i386/pgtable.h | 4 | ||||
-rw-r--r-- | include/asm-i386/unistd.h | 3 |
7 files changed, 56 insertions, 10 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index 22d80ece95cb..4ddce5296a78 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h | |||
@@ -183,6 +183,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) | |||
183 | { | 183 | { |
184 | int __i; | 184 | int __i; |
185 | #ifdef CONFIG_M386 | 185 | #ifdef CONFIG_M386 |
186 | unsigned long flags; | ||
186 | if(unlikely(boot_cpu_data.x86==3)) | 187 | if(unlikely(boot_cpu_data.x86==3)) |
187 | goto no_xadd; | 188 | goto no_xadd; |
188 | #endif | 189 | #endif |
@@ -196,10 +197,10 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) | |||
196 | 197 | ||
197 | #ifdef CONFIG_M386 | 198 | #ifdef CONFIG_M386 |
198 | no_xadd: /* Legacy 386 processor */ | 199 | no_xadd: /* Legacy 386 processor */ |
199 | local_irq_disable(); | 200 | local_irq_save(flags); |
200 | __i = atomic_read(v); | 201 | __i = atomic_read(v); |
201 | atomic_set(v, i + __i); | 202 | atomic_set(v, i + __i); |
202 | local_irq_enable(); | 203 | local_irq_restore(flags); |
203 | return i + __i; | 204 | return i + __i; |
204 | #endif | 205 | #endif |
205 | } | 206 | } |
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h index 5c0b5876b931..b44bfc6239cb 100644 --- a/include/asm-i386/cpufeature.h +++ b/include/asm-i386/cpufeature.h | |||
@@ -71,6 +71,7 @@ | |||
71 | #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ | 71 | #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ |
72 | #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ | 72 | #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ |
73 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ | 73 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ |
74 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ | ||
74 | 75 | ||
75 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 76 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
76 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | 77 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ |
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h index 152d0baa576a..bc1d6edae1ed 100644 --- a/include/asm-i386/i387.h +++ b/include/asm-i386/i387.h | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/kernel_stat.h> | ||
16 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
17 | #include <asm/sigcontext.h> | 18 | #include <asm/sigcontext.h> |
18 | #include <asm/user.h> | 19 | #include <asm/user.h> |
@@ -38,17 +39,38 @@ extern void init_fpu(struct task_struct *); | |||
38 | extern void kernel_fpu_begin(void); | 39 | extern void kernel_fpu_begin(void); |
39 | #define kernel_fpu_end() do { stts(); preempt_enable(); } while(0) | 40 | #define kernel_fpu_end() do { stts(); preempt_enable(); } while(0) |
40 | 41 | ||
42 | /* We need a safe address that is cheap to find and that is already | ||
43 | in L1 during context switch. The best choices are unfortunately | ||
44 | different for UP and SMP */ | ||
45 | #ifdef CONFIG_SMP | ||
46 | #define safe_address (__per_cpu_offset[0]) | ||
47 | #else | ||
48 | #define safe_address (kstat_cpu(0).cpustat.user) | ||
49 | #endif | ||
50 | |||
41 | /* | 51 | /* |
42 | * These must be called with preempt disabled | 52 | * These must be called with preempt disabled |
43 | */ | 53 | */ |
44 | static inline void __save_init_fpu( struct task_struct *tsk ) | 54 | static inline void __save_init_fpu( struct task_struct *tsk ) |
45 | { | 55 | { |
56 | /* Use more nops than strictly needed in case the compiler | ||
57 | varies code */ | ||
46 | alternative_input( | 58 | alternative_input( |
47 | "fnsave %1 ; fwait ;" GENERIC_NOP2, | 59 | "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4, |
48 | "fxsave %1 ; fnclex", | 60 | "fxsave %[fx]\n" |
61 | "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", | ||
49 | X86_FEATURE_FXSR, | 62 | X86_FEATURE_FXSR, |
50 | "m" (tsk->thread.i387.fxsave) | 63 | [fx] "m" (tsk->thread.i387.fxsave), |
51 | :"memory"); | 64 | [fsw] "m" (tsk->thread.i387.fxsave.swd) : "memory"); |
65 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
66 | is pending. Clear the x87 state here by setting it to fixed | ||
67 | values. safe_address is a random variable that should be in L1 */ | ||
68 | alternative_input( | ||
69 | GENERIC_NOP8 GENERIC_NOP2, | ||
70 | "emms\n\t" /* clear stack tags */ | ||
71 | "fildl %[addr]", /* set F?P to defined value */ | ||
72 | X86_FEATURE_FXSAVE_LEAK, | ||
73 | [addr] "m" (safe_address)); | ||
52 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 74 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
53 | } | 75 | } |
54 | 76 | ||
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h index 27bde973abc7..2756d4b04c27 100644 --- a/include/asm-i386/pgtable-2level.h +++ b/include/asm-i386/pgtable-2level.h | |||
@@ -18,6 +18,9 @@ | |||
18 | #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) | 18 | #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) |
19 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) | 19 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) |
20 | 20 | ||
21 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) | ||
22 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) | ||
23 | |||
21 | #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0)) | 24 | #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0)) |
22 | #define pte_same(a, b) ((a).pte_low == (b).pte_low) | 25 | #define pte_same(a, b) ((a).pte_low == (b).pte_low) |
23 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 26 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h index 36a5aa63cbbf..dccb1b3337ad 100644 --- a/include/asm-i386/pgtable-3level.h +++ b/include/asm-i386/pgtable-3level.h | |||
@@ -85,6 +85,26 @@ static inline void pud_clear (pud_t * pud) { } | |||
85 | #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ | 85 | #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ |
86 | pmd_index(address)) | 86 | pmd_index(address)) |
87 | 87 | ||
88 | /* | ||
89 | * For PTEs and PDEs, we must clear the P-bit first when clearing a page table | ||
90 | * entry, so clear the bottom half first and enforce ordering with a compiler | ||
91 | * barrier. | ||
92 | */ | ||
93 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
94 | { | ||
95 | ptep->pte_low = 0; | ||
96 | smp_wmb(); | ||
97 | ptep->pte_high = 0; | ||
98 | } | ||
99 | |||
100 | static inline void pmd_clear(pmd_t *pmd) | ||
101 | { | ||
102 | u32 *tmp = (u32 *)pmd; | ||
103 | *tmp = 0; | ||
104 | smp_wmb(); | ||
105 | *(tmp + 1) = 0; | ||
106 | } | ||
107 | |||
88 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 108 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
89 | { | 109 | { |
90 | pte_t res; | 110 | pte_t res; |
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index ee056c41a9fb..672c3f76b9df 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h | |||
@@ -204,12 +204,10 @@ extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; | |||
204 | extern unsigned long pg0[]; | 204 | extern unsigned long pg0[]; |
205 | 205 | ||
206 | #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) | 206 | #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
207 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) | ||
208 | 207 | ||
209 | /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ | 208 | /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ |
210 | #define pmd_none(x) (!(unsigned long)pmd_val(x)) | 209 | #define pmd_none(x) (!(unsigned long)pmd_val(x)) |
211 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | 210 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) |
212 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) | ||
213 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) | 211 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) |
214 | 212 | ||
215 | 213 | ||
@@ -268,7 +266,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long | |||
268 | pte_t pte; | 266 | pte_t pte; |
269 | if (full) { | 267 | if (full) { |
270 | pte = *ptep; | 268 | pte = *ptep; |
271 | *ptep = __pte(0); | 269 | pte_clear(mm, addr, ptep); |
272 | } else { | 270 | } else { |
273 | pte = ptep_get_and_clear(mm, addr, ptep); | 271 | pte = ptep_get_and_clear(mm, addr, ptep); |
274 | } | 272 | } |
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index d81d6cfc1bb4..eb4b152c82fc 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h | |||
@@ -321,8 +321,9 @@ | |||
321 | #define __NR_splice 313 | 321 | #define __NR_splice 313 |
322 | #define __NR_sync_file_range 314 | 322 | #define __NR_sync_file_range 314 |
323 | #define __NR_tee 315 | 323 | #define __NR_tee 315 |
324 | #define __NR_vmsplice 316 | ||
324 | 325 | ||
325 | #define NR_syscalls 316 | 326 | #define NR_syscalls 317 |
326 | 327 | ||
327 | /* | 328 | /* |
328 | * user-visible error numbers are in the range -1 - -128: see | 329 | * user-visible error numbers are in the range -1 - -128: see |