diff options
Diffstat (limited to 'include/asm-i386/pgtable.h')
| -rw-r--r-- | include/asm-i386/pgtable.h | 47 |
1 files changed, 22 insertions, 25 deletions
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 09697fec3d2b..541b3e234335 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h | |||
| @@ -246,6 +246,23 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p | |||
| 246 | # include <asm/pgtable-2level.h> | 246 | # include <asm/pgtable-2level.h> |
| 247 | #endif | 247 | #endif |
| 248 | 248 | ||
| 249 | /* | ||
| 250 | * We only update the dirty/accessed state if we set | ||
| 251 | * the dirty bit by hand in the kernel, since the hardware | ||
| 252 | * will do the accessed bit for us, and we don't want to | ||
| 253 | * race with other CPU's that might be updating the dirty | ||
| 254 | * bit at the same time. | ||
| 255 | */ | ||
| 256 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
| 257 | #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ | ||
| 258 | do { \ | ||
| 259 | if (dirty) { \ | ||
| 260 | (ptep)->pte_low = (entry).pte_low; \ | ||
| 261 | flush_tlb_page(vma, address); \ | ||
| 262 | } \ | ||
| 263 | } while (0) | ||
| 264 | |||
| 265 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
| 249 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | 266 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) |
| 250 | { | 267 | { |
| 251 | if (!pte_dirty(*ptep)) | 268 | if (!pte_dirty(*ptep)) |
| @@ -253,6 +270,7 @@ static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned | |||
| 253 | return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); | 270 | return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); |
| 254 | } | 271 | } |
| 255 | 272 | ||
| 273 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
| 256 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | 274 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) |
| 257 | { | 275 | { |
| 258 | if (!pte_young(*ptep)) | 276 | if (!pte_young(*ptep)) |
| @@ -260,6 +278,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned | |||
| 260 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); | 278 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); |
| 261 | } | 279 | } |
| 262 | 280 | ||
| 281 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | ||
| 263 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) | 282 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) |
| 264 | { | 283 | { |
| 265 | pte_t pte; | 284 | pte_t pte; |
| @@ -272,6 +291,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long | |||
| 272 | return pte; | 291 | return pte; |
| 273 | } | 292 | } |
| 274 | 293 | ||
| 294 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
| 275 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 295 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
| 276 | { | 296 | { |
| 277 | clear_bit(_PAGE_BIT_RW, &ptep->pte_low); | 297 | clear_bit(_PAGE_BIT_RW, &ptep->pte_low); |
| @@ -364,11 +384,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
| 364 | #define pte_index(address) \ | 384 | #define pte_index(address) \ |
| 365 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 385 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
| 366 | #define pte_offset_kernel(dir, address) \ | 386 | #define pte_offset_kernel(dir, address) \ |
| 367 | ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address)) | 387 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) |
| 368 | 388 | ||
| 369 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | 389 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) |
| 370 | 390 | ||
| 371 | #define pmd_page_kernel(pmd) \ | 391 | #define pmd_page_vaddr(pmd) \ |
| 372 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | 392 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
| 373 | 393 | ||
| 374 | /* | 394 | /* |
| @@ -391,8 +411,6 @@ extern pte_t *lookup_address(unsigned long address); | |||
| 391 | static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} | 411 | static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} |
| 392 | #endif | 412 | #endif |
| 393 | 413 | ||
| 394 | extern void noexec_setup(const char *str); | ||
| 395 | |||
| 396 | #if defined(CONFIG_HIGHPTE) | 414 | #if defined(CONFIG_HIGHPTE) |
| 397 | #define pte_offset_map(dir, address) \ | 415 | #define pte_offset_map(dir, address) \ |
| 398 | ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) | 416 | ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) |
| @@ -411,23 +429,8 @@ extern void noexec_setup(const char *str); | |||
| 411 | /* | 429 | /* |
| 412 | * The i386 doesn't have any external MMU info: the kernel page | 430 | * The i386 doesn't have any external MMU info: the kernel page |
| 413 | * tables contain all the necessary information. | 431 | * tables contain all the necessary information. |
| 414 | * | ||
| 415 | * Also, we only update the dirty/accessed state if we set | ||
| 416 | * the dirty bit by hand in the kernel, since the hardware | ||
| 417 | * will do the accessed bit for us, and we don't want to | ||
| 418 | * race with other CPU's that might be updating the dirty | ||
| 419 | * bit at the same time. | ||
| 420 | */ | 432 | */ |
| 421 | #define update_mmu_cache(vma,address,pte) do { } while (0) | 433 | #define update_mmu_cache(vma,address,pte) do { } while (0) |
| 422 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
| 423 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | ||
| 424 | do { \ | ||
| 425 | if (__dirty) { \ | ||
| 426 | (__ptep)->pte_low = (__entry).pte_low; \ | ||
| 427 | flush_tlb_page(__vma, __address); \ | ||
| 428 | } \ | ||
| 429 | } while (0) | ||
| 430 | |||
| 431 | #endif /* !__ASSEMBLY__ */ | 434 | #endif /* !__ASSEMBLY__ */ |
| 432 | 435 | ||
| 433 | #ifdef CONFIG_FLATMEM | 436 | #ifdef CONFIG_FLATMEM |
| @@ -441,12 +444,6 @@ extern void noexec_setup(const char *str); | |||
| 441 | #define GET_IOSPACE(pfn) 0 | 444 | #define GET_IOSPACE(pfn) 0 |
| 442 | #define GET_PFN(pfn) (pfn) | 445 | #define GET_PFN(pfn) (pfn) |
| 443 | 446 | ||
| 444 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
| 445 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
| 446 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
| 447 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | ||
| 448 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
| 449 | #define __HAVE_ARCH_PTE_SAME | ||
| 450 | #include <asm-generic/pgtable.h> | 447 | #include <asm-generic/pgtable.h> |
| 451 | 448 | ||
| 452 | #endif /* _I386_PGTABLE_H */ | 449 | #endif /* _I386_PGTABLE_H */ |
