aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/pgtable-3level.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386/pgtable-3level.h')
-rw-r--r--include/asm-i386/pgtable-3level.h69
1 files changed, 42 insertions, 27 deletions
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index 7a2318f38303..eb0f1d7e96a1 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -42,20 +42,23 @@ static inline int pte_exec_kernel(pte_t pte)
42 return pte_x(pte); 42 return pte_x(pte);
43} 43}
44 44
45#ifndef CONFIG_PARAVIRT
46/* Rules for using set_pte: the pte being assigned *must* be 45/* Rules for using set_pte: the pte being assigned *must* be
47 * either not present or in a state where the hardware will 46 * either not present or in a state where the hardware will
48 * not attempt to update the pte. In places where this is 47 * not attempt to update the pte. In places where this is
49 * not possible, use pte_get_and_clear to obtain the old pte 48 * not possible, use pte_get_and_clear to obtain the old pte
50 * value and then use set_pte to update it. -ben 49 * value and then use set_pte to update it. -ben
51 */ 50 */
52static inline void set_pte(pte_t *ptep, pte_t pte) 51static inline void native_set_pte(pte_t *ptep, pte_t pte)
53{ 52{
54 ptep->pte_high = pte.pte_high; 53 ptep->pte_high = pte.pte_high;
55 smp_wmb(); 54 smp_wmb();
56 ptep->pte_low = pte.pte_low; 55 ptep->pte_low = pte.pte_low;
57} 56}
58#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 57static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
58 pte_t *ptep , pte_t pte)
59{
60 native_set_pte(ptep, pte);
61}
59 62
60/* 63/*
61 * Since this is only called on user PTEs, and the page fault handler 64 * Since this is only called on user PTEs, and the page fault handler
@@ -63,7 +66,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
63 * we are justified in merely clearing the PTE present bit, followed 66 * we are justified in merely clearing the PTE present bit, followed
64 * by a set. The ordering here is important. 67 * by a set. The ordering here is important.
65 */ 68 */
66static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) 69static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr,
70 pte_t *ptep, pte_t pte)
67{ 71{
68 ptep->pte_low = 0; 72 ptep->pte_low = 0;
69 smp_wmb(); 73 smp_wmb();
@@ -72,32 +76,48 @@ static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte
72 ptep->pte_low = pte.pte_low; 76 ptep->pte_low = pte.pte_low;
73} 77}
74 78
75#define set_pte_atomic(pteptr,pteval) \ 79static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
76 set_64bit((unsigned long long *)(pteptr),pte_val(pteval)) 80{
77#define set_pmd(pmdptr,pmdval) \ 81 set_64bit((unsigned long long *)(ptep),native_pte_val(pte));
78 set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval)) 82}
79#define set_pud(pudptr,pudval) \ 83static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
80 (*(pudptr) = (pudval)) 84{
85 set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd));
86}
87static inline void native_set_pud(pud_t *pudp, pud_t pud)
88{
89 *pudp = pud;
90}
81 91
82/* 92/*
83 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table 93 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
84 * entry, so clear the bottom half first and enforce ordering with a compiler 94 * entry, so clear the bottom half first and enforce ordering with a compiler
85 * barrier. 95 * barrier.
86 */ 96 */
87static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 97static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
88{ 98{
89 ptep->pte_low = 0; 99 ptep->pte_low = 0;
90 smp_wmb(); 100 smp_wmb();
91 ptep->pte_high = 0; 101 ptep->pte_high = 0;
92} 102}
93 103
94static inline void pmd_clear(pmd_t *pmd) 104static inline void native_pmd_clear(pmd_t *pmd)
95{ 105{
96 u32 *tmp = (u32 *)pmd; 106 u32 *tmp = (u32 *)pmd;
97 *tmp = 0; 107 *tmp = 0;
98 smp_wmb(); 108 smp_wmb();
99 *(tmp + 1) = 0; 109 *(tmp + 1) = 0;
100} 110}
111
112#ifndef CONFIG_PARAVIRT
113#define set_pte(ptep, pte) native_set_pte(ptep, pte)
114#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
115#define set_pte_present(mm, addr, ptep, pte) native_set_pte_present(mm, addr, ptep, pte)
116#define set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte)
117#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
118#define set_pud(pudp, pud) native_set_pud(pudp, pud)
119#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
120#define pmd_clear(pmd) native_pmd_clear(pmd)
101#endif 121#endif
102 122
103/* 123/*
@@ -119,7 +139,8 @@ static inline void pud_clear (pud_t * pud) { }
119#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ 139#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
120 pmd_index(address)) 140 pmd_index(address))
121 141
122static inline pte_t raw_ptep_get_and_clear(pte_t *ptep) 142#ifdef CONFIG_SMP
143static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
123{ 144{
124 pte_t res; 145 pte_t res;
125 146
@@ -130,6 +151,9 @@ static inline pte_t raw_ptep_get_and_clear(pte_t *ptep)
130 151
131 return res; 152 return res;
132} 153}
154#else
155#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
156#endif
133 157
134#define __HAVE_ARCH_PTE_SAME 158#define __HAVE_ARCH_PTE_SAME
135static inline int pte_same(pte_t a, pte_t b) 159static inline int pte_same(pte_t a, pte_t b)
@@ -146,28 +170,21 @@ static inline int pte_none(pte_t pte)
146 170
147static inline unsigned long pte_pfn(pte_t pte) 171static inline unsigned long pte_pfn(pte_t pte)
148{ 172{
149 return (pte.pte_low >> PAGE_SHIFT) | 173 return pte_val(pte) >> PAGE_SHIFT;
150 (pte.pte_high << (32 - PAGE_SHIFT));
151} 174}
152 175
153extern unsigned long long __supported_pte_mask; 176extern unsigned long long __supported_pte_mask;
154 177
155static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) 178static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
156{ 179{
157 pte_t pte; 180 return __pte((((unsigned long long)page_nr << PAGE_SHIFT) |
158 181 pgprot_val(pgprot)) & __supported_pte_mask);
159 pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
160 (pgprot_val(pgprot) >> 32);
161 pte.pte_high &= (__supported_pte_mask >> 32);
162 pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
163 __supported_pte_mask;
164 return pte;
165} 182}
166 183
167static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) 184static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
168{ 185{
169 return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | \ 186 return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) |
170 pgprot_val(pgprot)) & __supported_pte_mask); 187 pgprot_val(pgprot)) & __supported_pte_mask);
171} 188}
172 189
173/* 190/*
@@ -187,6 +204,4 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
187 204
188#define __pmd_free_tlb(tlb, x) do { } while (0) 205#define __pmd_free_tlb(tlb, x) do { } while (0)
189 206
190#define vmalloc_sync_all() ((void)0)
191
192#endif /* _I386_PGTABLE_3LEVEL_H */ 207#endif /* _I386_PGTABLE_3LEVEL_H */