diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/include/asm/page.h | 56 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 58 |
2 files changed, 59 insertions, 55 deletions
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 3c987e9ec8d..81ee2776088 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -107,8 +107,8 @@ typedef pte_t *pgtable_t; | |||
107 | #define __pgd(x) ((pgd_t) { (x) } ) | 107 | #define __pgd(x) ((pgd_t) { (x) } ) |
108 | #define __pgprot(x) ((pgprot_t) { (x) } ) | 108 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
109 | 109 | ||
110 | static inline void | 110 | static inline void page_set_storage_key(unsigned long addr, |
111 | page_set_storage_key(unsigned long addr, unsigned int skey, int mapped) | 111 | unsigned char skey, int mapped) |
112 | { | 112 | { |
113 | if (!mapped) | 113 | if (!mapped) |
114 | asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0" | 114 | asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0" |
@@ -117,15 +117,59 @@ page_set_storage_key(unsigned long addr, unsigned int skey, int mapped) | |||
117 | asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); | 117 | asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); |
118 | } | 118 | } |
119 | 119 | ||
120 | static inline unsigned int | 120 | static inline unsigned char page_get_storage_key(unsigned long addr) |
121 | page_get_storage_key(unsigned long addr) | ||
122 | { | 121 | { |
123 | unsigned int skey; | 122 | unsigned char skey; |
124 | 123 | ||
125 | asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0)); | 124 | asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr)); |
126 | return skey; | 125 | return skey; |
127 | } | 126 | } |
128 | 127 | ||
128 | static inline int page_reset_referenced(unsigned long addr) | ||
129 | { | ||
130 | unsigned int ipm; | ||
131 | |||
132 | asm volatile( | ||
133 | " rrbe 0,%1\n" | ||
134 | " ipm %0\n" | ||
135 | : "=d" (ipm) : "a" (addr) : "cc"); | ||
136 | return !!(ipm & 0x20000000); | ||
137 | } | ||
138 | |||
139 | /* Bits int the storage key */ | ||
140 | #define _PAGE_CHANGED 0x02 /* HW changed bit */ | ||
141 | #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ | ||
142 | #define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */ | ||
143 | #define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ | ||
144 | |||
145 | /* | ||
146 | * Test and clear dirty bit in storage key. | ||
147 | * We can't clear the changed bit atomically. This is a potential | ||
148 | * race against modification of the referenced bit. This function | ||
149 | * should therefore only be called if it is not mapped in any | ||
150 | * address space. | ||
151 | */ | ||
152 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY | ||
153 | static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped) | ||
154 | { | ||
155 | unsigned char skey; | ||
156 | |||
157 | skey = page_get_storage_key(pfn << PAGE_SHIFT); | ||
158 | if (!(skey & _PAGE_CHANGED)) | ||
159 | return 0; | ||
160 | page_set_storage_key(pfn << PAGE_SHIFT, skey & ~_PAGE_CHANGED, mapped); | ||
161 | return 1; | ||
162 | } | ||
163 | |||
164 | /* | ||
165 | * Test and clear referenced bit in storage key. | ||
166 | */ | ||
167 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG | ||
168 | static inline int page_test_and_clear_young(unsigned long pfn) | ||
169 | { | ||
170 | return page_reset_referenced(pfn << PAGE_SHIFT); | ||
171 | } | ||
172 | |||
129 | struct page; | 173 | struct page; |
130 | void arch_free_page(struct page *page, int order); | 174 | void arch_free_page(struct page *page, int order); |
131 | void arch_alloc_page(struct page *page, int order); | 175 | void arch_alloc_page(struct page *page, int order); |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 763620ec792..4ca4dd2b329 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -373,10 +373,6 @@ extern unsigned long VMALLOC_START; | |||
373 | #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ | 373 | #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ |
374 | _ASCE_ALT_EVENT) | 374 | _ASCE_ALT_EVENT) |
375 | 375 | ||
376 | /* Bits int the storage key */ | ||
377 | #define _PAGE_CHANGED 0x02 /* HW changed bit */ | ||
378 | #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ | ||
379 | |||
380 | /* | 376 | /* |
381 | * Page protection definitions. | 377 | * Page protection definitions. |
382 | */ | 378 | */ |
@@ -555,8 +551,6 @@ static inline void rcp_unlock(pte_t *ptep) | |||
555 | #endif | 551 | #endif |
556 | } | 552 | } |
557 | 553 | ||
558 | /* forward declaration for SetPageUptodate in page-flags.h*/ | ||
559 | static inline void page_clear_dirty(struct page *page, int mapped); | ||
560 | #include <linux/page-flags.h> | 554 | #include <linux/page-flags.h> |
561 | 555 | ||
562 | static inline void ptep_rcp_copy(pte_t *ptep) | 556 | static inline void ptep_rcp_copy(pte_t *ptep) |
@@ -566,7 +560,7 @@ static inline void ptep_rcp_copy(pte_t *ptep) | |||
566 | unsigned int skey; | 560 | unsigned int skey; |
567 | unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | 561 | unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); |
568 | 562 | ||
569 | skey = page_get_storage_key(page_to_phys(page)); | 563 | skey = page_get_storage_key(pte_val(*ptep) >> PAGE_SHIFT); |
570 | if (skey & _PAGE_CHANGED) { | 564 | if (skey & _PAGE_CHANGED) { |
571 | set_bit_simple(RCP_GC_BIT, pgste); | 565 | set_bit_simple(RCP_GC_BIT, pgste); |
572 | set_bit_simple(KVM_UD_BIT, pgste); | 566 | set_bit_simple(KVM_UD_BIT, pgste); |
@@ -760,6 +754,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, | |||
760 | { | 754 | { |
761 | int dirty; | 755 | int dirty; |
762 | unsigned long *pgste; | 756 | unsigned long *pgste; |
757 | unsigned long pfn; | ||
763 | struct page *page; | 758 | struct page *page; |
764 | unsigned int skey; | 759 | unsigned int skey; |
765 | 760 | ||
@@ -767,8 +762,9 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, | |||
767 | return -EINVAL; | 762 | return -EINVAL; |
768 | rcp_lock(ptep); | 763 | rcp_lock(ptep); |
769 | pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | 764 | pgste = (unsigned long *) (ptep + PTRS_PER_PTE); |
770 | page = virt_to_page(pte_val(*ptep)); | 765 | pfn = pte_val(*ptep) >> PAGE_SHIFT; |
771 | skey = page_get_storage_key(page_to_phys(page)); | 766 | page = pfn_to_page(pfn); |
767 | skey = page_get_storage_key(pfn); | ||
772 | if (skey & _PAGE_CHANGED) { | 768 | if (skey & _PAGE_CHANGED) { |
773 | set_bit_simple(RCP_GC_BIT, pgste); | 769 | set_bit_simple(RCP_GC_BIT, pgste); |
774 | set_bit_simple(KVM_UD_BIT, pgste); | 770 | set_bit_simple(KVM_UD_BIT, pgste); |
@@ -779,7 +775,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, | |||
779 | } | 775 | } |
780 | dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste); | 776 | dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste); |
781 | if (skey & _PAGE_CHANGED) | 777 | if (skey & _PAGE_CHANGED) |
782 | page_clear_dirty(page, 1); | 778 | page_set_storage_key(pfn, skey & ~_PAGE_CHANGED, 1); |
783 | rcp_unlock(ptep); | 779 | rcp_unlock(ptep); |
784 | return dirty; | 780 | return dirty; |
785 | } | 781 | } |
@@ -790,16 +786,16 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | |||
790 | unsigned long addr, pte_t *ptep) | 786 | unsigned long addr, pte_t *ptep) |
791 | { | 787 | { |
792 | #ifdef CONFIG_PGSTE | 788 | #ifdef CONFIG_PGSTE |
793 | unsigned long physpage; | 789 | unsigned long pfn; |
794 | int young; | 790 | int young; |
795 | unsigned long *pgste; | 791 | unsigned long *pgste; |
796 | 792 | ||
797 | if (!vma->vm_mm->context.has_pgste) | 793 | if (!vma->vm_mm->context.has_pgste) |
798 | return 0; | 794 | return 0; |
799 | physpage = pte_val(*ptep) & PAGE_MASK; | 795 | pfn = pte_val(*ptep) >> PAGE_SHIFT; |
800 | pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | 796 | pgste = (unsigned long *) (ptep + PTRS_PER_PTE); |
801 | 797 | ||
802 | young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0); | 798 | young = ((page_get_storage_key(pfn) & _PAGE_REFERENCED) != 0); |
803 | rcp_lock(ptep); | 799 | rcp_lock(ptep); |
804 | if (young) | 800 | if (young) |
805 | set_bit_simple(RCP_GR_BIT, pgste); | 801 | set_bit_simple(RCP_GR_BIT, pgste); |
@@ -937,42 +933,6 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | |||
937 | }) | 933 | }) |
938 | 934 | ||
939 | /* | 935 | /* |
940 | * Test and clear dirty bit in storage key. | ||
941 | * We can't clear the changed bit atomically. This is a potential | ||
942 | * race against modification of the referenced bit. This function | ||
943 | * should therefore only be called if it is not mapped in any | ||
944 | * address space. | ||
945 | */ | ||
946 | #define __HAVE_ARCH_PAGE_TEST_DIRTY | ||
947 | static inline int page_test_dirty(struct page *page) | ||
948 | { | ||
949 | return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; | ||
950 | } | ||
951 | |||
952 | #define __HAVE_ARCH_PAGE_CLEAR_DIRTY | ||
953 | static inline void page_clear_dirty(struct page *page, int mapped) | ||
954 | { | ||
955 | page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, mapped); | ||
956 | } | ||
957 | |||
958 | /* | ||
959 | * Test and clear referenced bit in storage key. | ||
960 | */ | ||
961 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG | ||
962 | static inline int page_test_and_clear_young(struct page *page) | ||
963 | { | ||
964 | unsigned long physpage = page_to_phys(page); | ||
965 | int ccode; | ||
966 | |||
967 | asm volatile( | ||
968 | " rrbe 0,%1\n" | ||
969 | " ipm %0\n" | ||
970 | " srl %0,28\n" | ||
971 | : "=d" (ccode) : "a" (physpage) : "cc" ); | ||
972 | return ccode & 2; | ||
973 | } | ||
974 | |||
975 | /* | ||
976 | * Conversion functions: convert a page and protection to a page entry, | 936 | * Conversion functions: convert a page and protection to a page entry, |
977 | * and a page entry and page directory to the page they refer to. | 937 | * and a page entry and page directory to the page they refer to. |
978 | */ | 938 | */ |