diff options
Diffstat (limited to 'include/asm-s390/pgtable.h')
-rw-r--r-- | include/asm-s390/pgtable.h | 99 |
1 files changed, 71 insertions, 28 deletions
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index 39bb5192dc31..b424ab21f8bd 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h | |||
@@ -424,7 +424,8 @@ static inline pgd_t *get_shadow_pgd(pgd_t *pgdp) | |||
424 | * within a page table are directly modified. Thus, the following | 424 | * within a page table are directly modified. Thus, the following |
425 | * hook is made available. | 425 | * hook is made available. |
426 | */ | 426 | */ |
427 | static inline void set_pte(pte_t *pteptr, pte_t pteval) | 427 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
428 | pte_t *pteptr, pte_t pteval) | ||
428 | { | 429 | { |
429 | pte_t *shadow_pte = get_shadow_pte(pteptr); | 430 | pte_t *shadow_pte = get_shadow_pte(pteptr); |
430 | 431 | ||
@@ -437,7 +438,6 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval) | |||
437 | pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; | 438 | pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; |
438 | } | 439 | } |
439 | } | 440 | } |
440 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
441 | 441 | ||
442 | /* | 442 | /* |
443 | * pgd/pmd/pte query functions | 443 | * pgd/pmd/pte query functions |
@@ -508,7 +508,8 @@ static inline int pte_file(pte_t pte) | |||
508 | return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; | 508 | return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; |
509 | } | 509 | } |
510 | 510 | ||
511 | #define pte_same(a,b) (pte_val(a) == pte_val(b)) | 511 | #define __HAVE_ARCH_PTE_SAME |
512 | #define pte_same(a,b) (pte_val(a) == pte_val(b)) | ||
512 | 513 | ||
513 | /* | 514 | /* |
514 | * query functions pte_write/pte_dirty/pte_young only work if | 515 | * query functions pte_write/pte_dirty/pte_young only work if |
@@ -663,24 +664,19 @@ static inline pte_t pte_mkyoung(pte_t pte) | |||
663 | return pte; | 664 | return pte; |
664 | } | 665 | } |
665 | 666 | ||
666 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | 667 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
668 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | ||
669 | unsigned long addr, pte_t *ptep) | ||
667 | { | 670 | { |
668 | return 0; | 671 | return 0; |
669 | } | 672 | } |
670 | 673 | ||
671 | static inline int | 674 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
672 | ptep_clear_flush_young(struct vm_area_struct *vma, | 675 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, |
673 | unsigned long address, pte_t *ptep) | 676 | unsigned long address, pte_t *ptep) |
674 | { | 677 | { |
675 | /* No need to flush TLB; bits are in storage key */ | 678 | /* No need to flush TLB; bits are in storage key */ |
676 | return ptep_test_and_clear_young(vma, address, ptep); | 679 | return 0; |
677 | } | ||
678 | |||
679 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
680 | { | ||
681 | pte_t pte = *ptep; | ||
682 | pte_clear(mm, addr, ptep); | ||
683 | return pte; | ||
684 | } | 680 | } |
685 | 681 | ||
686 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) | 682 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) |
@@ -709,6 +705,32 @@ static inline void ptep_invalidate(unsigned long address, pte_t *ptep) | |||
709 | __ptep_ipte(address, ptep); | 705 | __ptep_ipte(address, ptep); |
710 | } | 706 | } |
711 | 707 | ||
708 | /* | ||
709 | * This is hard to understand. ptep_get_and_clear and ptep_clear_flush | ||
710 | * both clear the TLB for the unmapped pte. The reason is that | ||
711 | * ptep_get_and_clear is used in common code (e.g. change_pte_range) | ||
712 | * to modify an active pte. The sequence is | ||
713 | * 1) ptep_get_and_clear | ||
714 | * 2) set_pte_at | ||
715 | * 3) flush_tlb_range | ||
716 | * On s390 the tlb needs to get flushed with the modification of the pte | ||
717 | * if the pte is active. The only way how this can be implemented is to | ||
718 | * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range | ||
719 | * is a nop. | ||
720 | */ | ||
721 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
722 | #define ptep_get_and_clear(__mm, __address, __ptep) \ | ||
723 | ({ \ | ||
724 | pte_t __pte = *(__ptep); \ | ||
725 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | ||
726 | (__mm) != current->active_mm) \ | ||
727 | ptep_invalidate(__address, __ptep); \ | ||
728 | else \ | ||
729 | pte_clear((__mm), (__address), (__ptep)); \ | ||
730 | __pte; \ | ||
731 | }) | ||
732 | |||
733 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH | ||
712 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, | 734 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, |
713 | unsigned long address, pte_t *ptep) | 735 | unsigned long address, pte_t *ptep) |
714 | { | 736 | { |
@@ -717,12 +739,40 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, | |||
717 | return pte; | 739 | return pte; |
718 | } | 740 | } |
719 | 741 | ||
720 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 742 | /* |
743 | * The batched pte unmap code uses ptep_get_and_clear_full to clear the | ||
744 | * ptes. Here an optimization is possible. tlb_gather_mmu flushes all | ||
745 | * tlbs of an mm if it can guarantee that the ptes of the mm_struct | ||
746 | * cannot be accessed while the batched unmap is running. In this case | ||
747 | * full==1 and a simple pte_clear is enough. See tlb.h. | ||
748 | */ | ||
749 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | ||
750 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | ||
751 | unsigned long addr, | ||
752 | pte_t *ptep, int full) | ||
721 | { | 753 | { |
722 | pte_t old_pte = *ptep; | 754 | pte_t pte = *ptep; |
723 | set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); | 755 | |
756 | if (full) | ||
757 | pte_clear(mm, addr, ptep); | ||
758 | else | ||
759 | ptep_invalidate(addr, ptep); | ||
760 | return pte; | ||
724 | } | 761 | } |
725 | 762 | ||
763 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
764 | #define ptep_set_wrprotect(__mm, __addr, __ptep) \ | ||
765 | ({ \ | ||
766 | pte_t __pte = *(__ptep); \ | ||
767 | if (pte_write(__pte)) { \ | ||
768 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | ||
769 | (__mm) != current->active_mm) \ | ||
770 | ptep_invalidate(__addr, __ptep); \ | ||
771 | set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ | ||
772 | } \ | ||
773 | }) | ||
774 | |||
775 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
726 | #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ | 776 | #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ |
727 | ({ \ | 777 | ({ \ |
728 | int __changed = !pte_same(*(__ptep), __entry); \ | 778 | int __changed = !pte_same(*(__ptep), __entry); \ |
@@ -740,11 +790,13 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
740 | * should therefore only be called if it is not mapped in any | 790 | * should therefore only be called if it is not mapped in any |
741 | * address space. | 791 | * address space. |
742 | */ | 792 | */ |
793 | #define __HAVE_ARCH_PAGE_TEST_DIRTY | ||
743 | static inline int page_test_dirty(struct page *page) | 794 | static inline int page_test_dirty(struct page *page) |
744 | { | 795 | { |
745 | return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; | 796 | return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; |
746 | } | 797 | } |
747 | 798 | ||
799 | #define __HAVE_ARCH_PAGE_CLEAR_DIRTY | ||
748 | static inline void page_clear_dirty(struct page *page) | 800 | static inline void page_clear_dirty(struct page *page) |
749 | { | 801 | { |
750 | page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); | 802 | page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); |
@@ -753,6 +805,7 @@ static inline void page_clear_dirty(struct page *page) | |||
753 | /* | 805 | /* |
754 | * Test and clear referenced bit in storage key. | 806 | * Test and clear referenced bit in storage key. |
755 | */ | 807 | */ |
808 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG | ||
756 | static inline int page_test_and_clear_young(struct page *page) | 809 | static inline int page_test_and_clear_young(struct page *page) |
757 | { | 810 | { |
758 | unsigned long physpage = page_to_phys(page); | 811 | unsigned long physpage = page_to_phys(page); |
@@ -930,16 +983,6 @@ extern int remove_shared_memory(unsigned long start, unsigned long size); | |||
930 | #define __HAVE_ARCH_MEMMAP_INIT | 983 | #define __HAVE_ARCH_MEMMAP_INIT |
931 | extern void memmap_init(unsigned long, int, unsigned long, unsigned long); | 984 | extern void memmap_init(unsigned long, int, unsigned long, unsigned long); |
932 | 985 | ||
933 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
934 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
935 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | ||
936 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
937 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH | ||
938 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
939 | #define __HAVE_ARCH_PTE_SAME | ||
940 | #define __HAVE_ARCH_PAGE_TEST_DIRTY | ||
941 | #define __HAVE_ARCH_PAGE_CLEAR_DIRTY | ||
942 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG | ||
943 | #include <asm-generic/pgtable.h> | 986 | #include <asm-generic/pgtable.h> |
944 | 987 | ||
945 | #endif /* _S390_PAGE_H */ | 988 | #endif /* _S390_PAGE_H */ |