aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/pgtable_32.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/pgtable_32.h')
-rw-r--r--include/asm-x86/pgtable_32.h291
1 files changed, 8 insertions, 283 deletions
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index ed3e70d8d04b..21e70fbf1dae 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -25,20 +25,11 @@
25struct mm_struct; 25struct mm_struct;
26struct vm_area_struct; 26struct vm_area_struct;
27 27
28/*
29 * ZERO_PAGE is a global shared page that is always zero: used
30 * for zero-mapped memory areas etc..
31 */
32#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
33extern unsigned long empty_zero_page[1024];
34extern pgd_t swapper_pg_dir[1024]; 28extern pgd_t swapper_pg_dir[1024];
35extern struct kmem_cache *pmd_cache; 29extern struct kmem_cache *pmd_cache;
36extern spinlock_t pgd_lock;
37extern struct page *pgd_list;
38void check_pgt_cache(void); 30void check_pgt_cache(void);
39 31
40void pmd_ctor(struct kmem_cache *, void *); 32static inline void pgtable_cache_init(void) {}
41void pgtable_cache_init(void);
42void paging_init(void); 33void paging_init(void);
43 34
44 35
@@ -58,9 +49,6 @@ void paging_init(void);
58#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 49#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
59#define PGDIR_MASK (~(PGDIR_SIZE-1)) 50#define PGDIR_MASK (~(PGDIR_SIZE-1))
60 51
61#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
62#define FIRST_USER_ADDRESS 0
63
64#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) 52#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
65#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) 53#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
66 54
@@ -85,113 +73,6 @@ void paging_init(void);
85#endif 73#endif
86 74
87/* 75/*
88 * _PAGE_PSE set in the page directory entry just means that
89 * the page directory entry points directly to a 4MB-aligned block of
90 * memory.
91 */
92#define _PAGE_BIT_PRESENT 0
93#define _PAGE_BIT_RW 1
94#define _PAGE_BIT_USER 2
95#define _PAGE_BIT_PWT 3
96#define _PAGE_BIT_PCD 4
97#define _PAGE_BIT_ACCESSED 5
98#define _PAGE_BIT_DIRTY 6
99#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
100#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
101#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
102#define _PAGE_BIT_UNUSED2 10
103#define _PAGE_BIT_UNUSED3 11
104#define _PAGE_BIT_NX 63
105
106#define _PAGE_PRESENT 0x001
107#define _PAGE_RW 0x002
108#define _PAGE_USER 0x004
109#define _PAGE_PWT 0x008
110#define _PAGE_PCD 0x010
111#define _PAGE_ACCESSED 0x020
112#define _PAGE_DIRTY 0x040
113#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
114#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
115#define _PAGE_UNUSED1 0x200 /* available for programmer */
116#define _PAGE_UNUSED2 0x400
117#define _PAGE_UNUSED3 0x800
118
119/* If _PAGE_PRESENT is clear, we use these: */
120#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
121#define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE;
122 pte_present gives true */
123#ifdef CONFIG_X86_PAE
124#define _PAGE_NX (1ULL<<_PAGE_BIT_NX)
125#else
126#define _PAGE_NX 0
127#endif
128
129#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
130#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
131#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
132
133#define PAGE_NONE \
134 __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
135#define PAGE_SHARED \
136 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
137
138#define PAGE_SHARED_EXEC \
139 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
140#define PAGE_COPY_NOEXEC \
141 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
142#define PAGE_COPY_EXEC \
143 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
144#define PAGE_COPY \
145 PAGE_COPY_NOEXEC
146#define PAGE_READONLY \
147 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
148#define PAGE_READONLY_EXEC \
149 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
150
151#define _PAGE_KERNEL \
152 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
153#define _PAGE_KERNEL_EXEC \
154 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
155
156extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
157#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
158#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
159#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
160#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
161#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
162
163#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
164#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
165#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
166#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
167#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
168#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
169#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
170
171/*
172 * The i386 can't do page protection for execute, and considers that
173 * the same are read. Also, write permissions imply read permissions.
174 * This is the closest we can get..
175 */
176#define __P000 PAGE_NONE
177#define __P001 PAGE_READONLY
178#define __P010 PAGE_COPY
179#define __P011 PAGE_COPY
180#define __P100 PAGE_READONLY_EXEC
181#define __P101 PAGE_READONLY_EXEC
182#define __P110 PAGE_COPY_EXEC
183#define __P111 PAGE_COPY_EXEC
184
185#define __S000 PAGE_NONE
186#define __S001 PAGE_READONLY
187#define __S010 PAGE_SHARED
188#define __S011 PAGE_SHARED
189#define __S100 PAGE_READONLY_EXEC
190#define __S101 PAGE_READONLY_EXEC
191#define __S110 PAGE_SHARED_EXEC
192#define __S111 PAGE_SHARED_EXEC
193
194/*
195 * Define this if things work differently on an i386 and an i486: 76 * Define this if things work differently on an i386 and an i486:
196 * it will (on an i486) warn about kernel memory accesses that are 77 * it will (on an i486) warn about kernel memory accesses that are
197 * done without a 'access_ok(VERIFY_WRITE,..)' 78 * done without a 'access_ok(VERIFY_WRITE,..)'
@@ -211,133 +92,12 @@ extern unsigned long pg0[];
211 92
212#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) 93#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
213 94
214/*
215 * The following only work if pte_present() is true.
216 * Undefined behaviour if not..
217 */
218static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
219static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
220static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
221static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; }
222
223/*
224 * The following only works if pte_present() is not true.
225 */
226static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
227
228static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
229static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
230static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
231static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
232static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
233static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
234static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
235
236#ifdef CONFIG_X86_PAE 95#ifdef CONFIG_X86_PAE
237# include <asm/pgtable-3level.h> 96# include <asm/pgtable-3level.h>
238#else 97#else
239# include <asm/pgtable-2level.h> 98# include <asm/pgtable-2level.h>
240#endif 99#endif
241 100
242#ifndef CONFIG_PARAVIRT
243/*
244 * Rules for using pte_update - it must be called after any PTE update which
245 * has not been done using the set_pte / clear_pte interfaces. It is used by
246 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
247 * updates should either be sets, clears, or set_pte_atomic for P->P
248 * transitions, which means this hook should only be called for user PTEs.
249 * This hook implies a P->P protection or access change has taken place, which
250 * requires a subsequent TLB flush. The notification can optionally be delayed
251 * until the TLB flush event by using the pte_update_defer form of the
252 * interface, but care must be taken to assure that the flush happens while
253 * still holding the same page table lock so that the shadow and primary pages
254 * do not become out of sync on SMP.
255 */
256#define pte_update(mm, addr, ptep) do { } while (0)
257#define pte_update_defer(mm, addr, ptep) do { } while (0)
258#endif
259
260/* local pte updates need not use xchg for locking */
261static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
262{
263 pte_t res = *ptep;
264
265 /* Pure native function needs no input for mm, addr */
266 native_pte_clear(NULL, 0, ptep);
267 return res;
268}
269
270/*
271 * We only update the dirty/accessed state if we set
272 * the dirty bit by hand in the kernel, since the hardware
273 * will do the accessed bit for us, and we don't want to
274 * race with other CPU's that might be updating the dirty
275 * bit at the same time.
276 */
277#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
278#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
279({ \
280 int __changed = !pte_same(*(ptep), entry); \
281 if (__changed && dirty) { \
282 (ptep)->pte_low = (entry).pte_low; \
283 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
284 flush_tlb_page(vma, address); \
285 } \
286 __changed; \
287})
288
289#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
290#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
291 int __ret = 0; \
292 if (pte_young(*(ptep))) \
293 __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
294 &(ptep)->pte_low); \
295 if (__ret) \
296 pte_update((vma)->vm_mm, addr, ptep); \
297 __ret; \
298})
299
300#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
301#define ptep_clear_flush_young(vma, address, ptep) \
302({ \
303 int __young; \
304 __young = ptep_test_and_clear_young((vma), (address), (ptep)); \
305 if (__young) \
306 flush_tlb_page(vma, address); \
307 __young; \
308})
309
310#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
311static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
312{
313 pte_t pte = native_ptep_get_and_clear(ptep);
314 pte_update(mm, addr, ptep);
315 return pte;
316}
317
318#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
319static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
320{
321 pte_t pte;
322 if (full) {
323 /*
324 * Full address destruction in progress; paravirt does not
325 * care about updates and native needs no locking
326 */
327 pte = native_local_ptep_get_and_clear(ptep);
328 } else {
329 pte = ptep_get_and_clear(mm, addr, ptep);
330 }
331 return pte;
332}
333
334#define __HAVE_ARCH_PTEP_SET_WRPROTECT
335static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
336{
337 clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
338 pte_update(mm, addr, ptep);
339}
340
341/* 101/*
342 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); 102 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
343 * 103 *
@@ -367,25 +127,6 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
367 127
368#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 128#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
369 129
370static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
371{
372 pte.pte_low &= _PAGE_CHG_MASK;
373 pte.pte_low |= pgprot_val(newprot);
374#ifdef CONFIG_X86_PAE
375 /*
376 * Chop off the NX bit (if present), and add the NX portion of
377 * the newprot (if present):
378 */
379 pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
380 pte.pte_high |= (pgprot_val(newprot) >> 32) & \
381 (__supported_pte_mask >> 32);
382#endif
383 return pte;
384}
385
386#define pmd_large(pmd) \
387((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
388
389/* 130/*
390 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] 131 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
391 * 132 *
@@ -432,26 +173,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
432#define pmd_page_vaddr(pmd) \ 173#define pmd_page_vaddr(pmd) \
433 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 174 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
434 175
435/*
436 * Helper function that returns the kernel pagetable entry controlling
437 * the virtual address 'address'. NULL means no pagetable entry present.
438 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
439 * as a pte too.
440 */
441extern pte_t *lookup_address(unsigned long address);
442
443/*
444 * Make a given kernel text page executable/non-executable.
445 * Returns the previous executability setting of that page (which
446 * is used to restore the previous state). Used by the SMP bootup code.
447 * NOTE: this is an __init function for security reasons.
448 */
449#ifdef CONFIG_X86_PAE
450 extern int set_kernel_exec(unsigned long vaddr, int enable);
451#else
452 static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
453#endif
454
455#if defined(CONFIG_HIGHPTE) 176#if defined(CONFIG_HIGHPTE)
456#define pte_offset_map(dir, address) \ 177#define pte_offset_map(dir, address) \
457 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) 178 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
@@ -497,13 +218,17 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base)
497 218
498#endif /* !__ASSEMBLY__ */ 219#endif /* !__ASSEMBLY__ */
499 220
221/*
222 * kern_addr_valid() is (1) for FLATMEM and (0) for
223 * SPARSEMEM and DISCONTIGMEM
224 */
500#ifdef CONFIG_FLATMEM 225#ifdef CONFIG_FLATMEM
501#define kern_addr_valid(addr) (1) 226#define kern_addr_valid(addr) (1)
502#endif /* CONFIG_FLATMEM */ 227#else
228#define kern_addr_valid(kaddr) (0)
229#endif
503 230
504#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 231#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
505 remap_pfn_range(vma, vaddr, pfn, size, prot) 232 remap_pfn_range(vma, vaddr, pfn, size, prot)
506 233
507#include <asm-generic/pgtable.h>
508
509#endif /* _I386_PGTABLE_H */ 234#endif /* _I386_PGTABLE_H */