aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386/pgtable.h')
-rw-r--r--include/asm-i386/pgtable.h97
1 files changed, 61 insertions, 36 deletions
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index c3b58d473a55..e16359f81a40 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -159,6 +159,7 @@ void paging_init(void);
159 159
160extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; 160extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
161#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) 161#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
162#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
162#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD) 163#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
163#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) 164#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
164#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) 165#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
@@ -166,6 +167,7 @@ extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
166#define PAGE_KERNEL __pgprot(__PAGE_KERNEL) 167#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
167#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) 168#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
168#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) 169#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
170#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
169#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) 171#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
170#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) 172#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
171#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) 173#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
@@ -241,6 +243,8 @@ static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; re
241static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } 243static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
242static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; } 244static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
243 245
246extern void vmalloc_sync_all(void);
247
244#ifdef CONFIG_X86_PAE 248#ifdef CONFIG_X86_PAE
245# include <asm/pgtable-3level.h> 249# include <asm/pgtable-3level.h>
246#else 250#else
@@ -263,9 +267,18 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
263 */ 267 */
264#define pte_update(mm, addr, ptep) do { } while (0) 268#define pte_update(mm, addr, ptep) do { } while (0)
265#define pte_update_defer(mm, addr, ptep) do { } while (0) 269#define pte_update_defer(mm, addr, ptep) do { } while (0)
266#define paravirt_map_pt_hook(slot, va, pfn) do { } while (0)
267#endif 270#endif
268 271
272/* local pte updates need not use xchg for locking */
273static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
274{
275 pte_t res = *ptep;
276
277 /* Pure native function needs no input for mm, addr */
278 native_pte_clear(NULL, 0, ptep);
279 return res;
280}
281
269/* 282/*
270 * We only update the dirty/accessed state if we set 283 * We only update the dirty/accessed state if we set
271 * the dirty bit by hand in the kernel, since the hardware 284 * the dirty bit by hand in the kernel, since the hardware
@@ -283,12 +296,25 @@ do { \
283 } \ 296 } \
284} while (0) 297} while (0)
285 298
286/*
287 * We don't actually have these, but we want to advertise them so that
288 * we can encompass the flush here.
289 */
290#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 299#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
300#define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \
301 int ret = 0; \
302 if (pte_dirty(*ptep)) \
303 ret = test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); \
304 if (ret) \
305 pte_update_defer(vma->vm_mm, addr, ptep); \
306 ret; \
307})
308
291#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 309#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
310#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
311 int ret = 0; \
312 if (pte_young(*ptep)) \
313 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); \
314 if (ret) \
315 pte_update_defer(vma->vm_mm, addr, ptep); \
316 ret; \
317})
292 318
293/* 319/*
294 * Rules for using ptep_establish: the pte MUST be a user pte, and 320 * Rules for using ptep_establish: the pte MUST be a user pte, and
@@ -305,12 +331,9 @@ do { \
305#define ptep_clear_flush_dirty(vma, address, ptep) \ 331#define ptep_clear_flush_dirty(vma, address, ptep) \
306({ \ 332({ \
307 int __dirty; \ 333 int __dirty; \
308 __dirty = pte_dirty(*(ptep)); \ 334 __dirty = ptep_test_and_clear_dirty((vma), (address), (ptep)); \
309 if (__dirty) { \ 335 if (__dirty) \
310 clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \
311 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
312 flush_tlb_page(vma, address); \ 336 flush_tlb_page(vma, address); \
313 } \
314 __dirty; \ 337 __dirty; \
315}) 338})
316 339
@@ -318,19 +341,16 @@ do { \
318#define ptep_clear_flush_young(vma, address, ptep) \ 341#define ptep_clear_flush_young(vma, address, ptep) \
319({ \ 342({ \
320 int __young; \ 343 int __young; \
321 __young = pte_young(*(ptep)); \ 344 __young = ptep_test_and_clear_young((vma), (address), (ptep)); \
322 if (__young) { \ 345 if (__young) \
323 clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \
324 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
325 flush_tlb_page(vma, address); \ 346 flush_tlb_page(vma, address); \
326 } \
327 __young; \ 347 __young; \
328}) 348})
329 349
330#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 350#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
331static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 351static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
332{ 352{
333 pte_t pte = raw_ptep_get_and_clear(ptep); 353 pte_t pte = native_ptep_get_and_clear(ptep);
334 pte_update(mm, addr, ptep); 354 pte_update(mm, addr, ptep);
335 return pte; 355 return pte;
336} 356}
@@ -340,8 +360,11 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
340{ 360{
341 pte_t pte; 361 pte_t pte;
342 if (full) { 362 if (full) {
343 pte = *ptep; 363 /*
344 pte_clear(mm, addr, ptep); 364 * Full address destruction in progress; paravirt does not
365 * care about updates and native needs no locking
366 */
367 pte = native_local_ptep_get_and_clear(ptep);
345 } else { 368 } else {
346 pte = ptep_get_and_clear(mm, addr, ptep); 369 pte = ptep_get_and_clear(mm, addr, ptep);
347 } 370 }
@@ -470,24 +493,10 @@ extern pte_t *lookup_address(unsigned long address);
470#endif 493#endif
471 494
472#if defined(CONFIG_HIGHPTE) 495#if defined(CONFIG_HIGHPTE)
473#define pte_offset_map(dir, address) \ 496#define pte_offset_map(dir, address) \
474({ \ 497 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
475 pte_t *__ptep; \ 498#define pte_offset_map_nested(dir, address) \
476 unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \ 499 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
477 __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE0);\
478 paravirt_map_pt_hook(KM_PTE0,__ptep, pfn); \
479 __ptep = __ptep + pte_index(address); \
480 __ptep; \
481})
482#define pte_offset_map_nested(dir, address) \
483({ \
484 pte_t *__ptep; \
485 unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
486 __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE1);\
487 paravirt_map_pt_hook(KM_PTE1,__ptep, pfn); \
488 __ptep = __ptep + pte_index(address); \
489 __ptep; \
490})
491#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) 500#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
492#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) 501#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
493#else 502#else
@@ -510,6 +519,22 @@ do { \
510 * tables contain all the necessary information. 519 * tables contain all the necessary information.
511 */ 520 */
512#define update_mmu_cache(vma,address,pte) do { } while (0) 521#define update_mmu_cache(vma,address,pte) do { } while (0)
522
523void native_pagetable_setup_start(pgd_t *base);
524void native_pagetable_setup_done(pgd_t *base);
525
526#ifndef CONFIG_PARAVIRT
527static inline void paravirt_pagetable_setup_start(pgd_t *base)
528{
529 native_pagetable_setup_start(base);
530}
531
532static inline void paravirt_pagetable_setup_done(pgd_t *base)
533{
534 native_pagetable_setup_done(base);
535}
536#endif /* !CONFIG_PARAVIRT */
537
513#endif /* !__ASSEMBLY__ */ 538#endif /* !__ASSEMBLY__ */
514 539
515#ifdef CONFIG_FLATMEM 540#ifdef CONFIG_FLATMEM