aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2011-05-23 04:24:39 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2011-05-23 04:24:31 -0400
commit2d42552d1c1659b014851cf449ad2fe458509128 (patch)
treeb9ef22867ce52e23b5249a7ad38637eec40363b8
parentc26001d4e9133fe45e47eee18cfd826219e71fb9 (diff)
[S390] merge page_test_dirty and page_clear_dirty
The page_clear_dirty primitive always sets the default storage key which resets the access control bits and the fetch protection bit. That will surprise a KVM guest that sets non-zero access control bits or the fetch protection bit. Merge page_test_dirty and page_clear_dirty back to a single function and only clear the dirty bit from the storage key. In addition move the function page_test_and_clear_dirty and page_test_and_clear_young to page.h where they belong. This requires to change the parameter from a struct page * to a page frame number. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/page.h56
-rw-r--r--arch/s390/include/asm/pgtable.h58
-rw-r--r--include/asm-generic/pgtable.h12
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--mm/rmap.c11
5 files changed, 68 insertions, 71 deletions
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 3c987e9ec8d..81ee2776088 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -107,8 +107,8 @@ typedef pte_t *pgtable_t;
107#define __pgd(x) ((pgd_t) { (x) } ) 107#define __pgd(x) ((pgd_t) { (x) } )
108#define __pgprot(x) ((pgprot_t) { (x) } ) 108#define __pgprot(x) ((pgprot_t) { (x) } )
109 109
110static inline void 110static inline void page_set_storage_key(unsigned long addr,
111page_set_storage_key(unsigned long addr, unsigned int skey, int mapped) 111 unsigned char skey, int mapped)
112{ 112{
113 if (!mapped) 113 if (!mapped)
114 asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0" 114 asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
@@ -117,15 +117,59 @@ page_set_storage_key(unsigned long addr, unsigned int skey, int mapped)
117 asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); 117 asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
118} 118}
119 119
120static inline unsigned int 120static inline unsigned char page_get_storage_key(unsigned long addr)
121page_get_storage_key(unsigned long addr)
122{ 121{
123 unsigned int skey; 122 unsigned char skey;
124 123
125 asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0)); 124 asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
126 return skey; 125 return skey;
127} 126}
128 127
128static inline int page_reset_referenced(unsigned long addr)
129{
130 unsigned int ipm;
131
132 asm volatile(
133 " rrbe 0,%1\n"
134 " ipm %0\n"
135 : "=d" (ipm) : "a" (addr) : "cc");
136 return !!(ipm & 0x20000000);
137}
138
139/* Bits int the storage key */
140#define _PAGE_CHANGED 0x02 /* HW changed bit */
141#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
142#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */
143#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
144
145/*
146 * Test and clear dirty bit in storage key.
147 * We can't clear the changed bit atomically. This is a potential
148 * race against modification of the referenced bit. This function
149 * should therefore only be called if it is not mapped in any
150 * address space.
151 */
152#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
153static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped)
154{
155 unsigned char skey;
156
157 skey = page_get_storage_key(pfn << PAGE_SHIFT);
158 if (!(skey & _PAGE_CHANGED))
159 return 0;
160 page_set_storage_key(pfn << PAGE_SHIFT, skey & ~_PAGE_CHANGED, mapped);
161 return 1;
162}
163
164/*
165 * Test and clear referenced bit in storage key.
166 */
167#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
168static inline int page_test_and_clear_young(unsigned long pfn)
169{
170 return page_reset_referenced(pfn << PAGE_SHIFT);
171}
172
129struct page; 173struct page;
130void arch_free_page(struct page *page, int order); 174void arch_free_page(struct page *page, int order);
131void arch_alloc_page(struct page *page, int order); 175void arch_alloc_page(struct page *page, int order);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 763620ec792..4ca4dd2b329 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -373,10 +373,6 @@ extern unsigned long VMALLOC_START;
373#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ 373#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
374 _ASCE_ALT_EVENT) 374 _ASCE_ALT_EVENT)
375 375
376/* Bits int the storage key */
377#define _PAGE_CHANGED 0x02 /* HW changed bit */
378#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
379
380/* 376/*
381 * Page protection definitions. 377 * Page protection definitions.
382 */ 378 */
@@ -555,8 +551,6 @@ static inline void rcp_unlock(pte_t *ptep)
555#endif 551#endif
556} 552}
557 553
558/* forward declaration for SetPageUptodate in page-flags.h*/
559static inline void page_clear_dirty(struct page *page, int mapped);
560#include <linux/page-flags.h> 554#include <linux/page-flags.h>
561 555
562static inline void ptep_rcp_copy(pte_t *ptep) 556static inline void ptep_rcp_copy(pte_t *ptep)
@@ -566,7 +560,7 @@ static inline void ptep_rcp_copy(pte_t *ptep)
566 unsigned int skey; 560 unsigned int skey;
567 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 561 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
568 562
569 skey = page_get_storage_key(page_to_phys(page)); 563 skey = page_get_storage_key(pte_val(*ptep) >> PAGE_SHIFT);
570 if (skey & _PAGE_CHANGED) { 564 if (skey & _PAGE_CHANGED) {
571 set_bit_simple(RCP_GC_BIT, pgste); 565 set_bit_simple(RCP_GC_BIT, pgste);
572 set_bit_simple(KVM_UD_BIT, pgste); 566 set_bit_simple(KVM_UD_BIT, pgste);
@@ -760,6 +754,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
760{ 754{
761 int dirty; 755 int dirty;
762 unsigned long *pgste; 756 unsigned long *pgste;
757 unsigned long pfn;
763 struct page *page; 758 struct page *page;
764 unsigned int skey; 759 unsigned int skey;
765 760
@@ -767,8 +762,9 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
767 return -EINVAL; 762 return -EINVAL;
768 rcp_lock(ptep); 763 rcp_lock(ptep);
769 pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 764 pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
770 page = virt_to_page(pte_val(*ptep)); 765 pfn = pte_val(*ptep) >> PAGE_SHIFT;
771 skey = page_get_storage_key(page_to_phys(page)); 766 page = pfn_to_page(pfn);
767 skey = page_get_storage_key(pfn);
772 if (skey & _PAGE_CHANGED) { 768 if (skey & _PAGE_CHANGED) {
773 set_bit_simple(RCP_GC_BIT, pgste); 769 set_bit_simple(RCP_GC_BIT, pgste);
774 set_bit_simple(KVM_UD_BIT, pgste); 770 set_bit_simple(KVM_UD_BIT, pgste);
@@ -779,7 +775,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
779 } 775 }
780 dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste); 776 dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
781 if (skey & _PAGE_CHANGED) 777 if (skey & _PAGE_CHANGED)
782 page_clear_dirty(page, 1); 778 page_set_storage_key(pfn, skey & ~_PAGE_CHANGED, 1);
783 rcp_unlock(ptep); 779 rcp_unlock(ptep);
784 return dirty; 780 return dirty;
785} 781}
@@ -790,16 +786,16 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
790 unsigned long addr, pte_t *ptep) 786 unsigned long addr, pte_t *ptep)
791{ 787{
792#ifdef CONFIG_PGSTE 788#ifdef CONFIG_PGSTE
793 unsigned long physpage; 789 unsigned long pfn;
794 int young; 790 int young;
795 unsigned long *pgste; 791 unsigned long *pgste;
796 792
797 if (!vma->vm_mm->context.has_pgste) 793 if (!vma->vm_mm->context.has_pgste)
798 return 0; 794 return 0;
799 physpage = pte_val(*ptep) & PAGE_MASK; 795 pfn = pte_val(*ptep) >> PAGE_SHIFT;
800 pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 796 pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
801 797
802 young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0); 798 young = ((page_get_storage_key(pfn) & _PAGE_REFERENCED) != 0);
803 rcp_lock(ptep); 799 rcp_lock(ptep);
804 if (young) 800 if (young)
805 set_bit_simple(RCP_GR_BIT, pgste); 801 set_bit_simple(RCP_GR_BIT, pgste);
@@ -937,42 +933,6 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
937}) 933})
938 934
939/* 935/*
940 * Test and clear dirty bit in storage key.
941 * We can't clear the changed bit atomically. This is a potential
942 * race against modification of the referenced bit. This function
943 * should therefore only be called if it is not mapped in any
944 * address space.
945 */
946#define __HAVE_ARCH_PAGE_TEST_DIRTY
947static inline int page_test_dirty(struct page *page)
948{
949 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
950}
951
952#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
953static inline void page_clear_dirty(struct page *page, int mapped)
954{
955 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, mapped);
956}
957
958/*
959 * Test and clear referenced bit in storage key.
960 */
961#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
962static inline int page_test_and_clear_young(struct page *page)
963{
964 unsigned long physpage = page_to_phys(page);
965 int ccode;
966
967 asm volatile(
968 " rrbe 0,%1\n"
969 " ipm %0\n"
970 " srl %0,28\n"
971 : "=d" (ccode) : "a" (physpage) : "cc" );
972 return ccode & 2;
973}
974
975/*
976 * Conversion functions: convert a page and protection to a page entry, 936 * Conversion functions: convert a page and protection to a page entry,
977 * and a page entry and page directory to the page they refer to. 937 * and a page entry and page directory to the page they refer to.
978 */ 938 */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index b4bfe338ea0..e9b8e5926be 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -184,22 +184,18 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
184#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 184#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
185#endif 185#endif
186 186
187#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY 187#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
188#define page_test_dirty(page) (0) 188#define page_test_and_clear_dirty(pfn, mapped) (0)
189#endif 189#endif
190 190
191#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY 191#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
192#define page_clear_dirty(page, mapped) do { } while (0)
193#endif
194
195#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
196#define pte_maybe_dirty(pte) pte_dirty(pte) 192#define pte_maybe_dirty(pte) pte_dirty(pte)
197#else 193#else
198#define pte_maybe_dirty(pte) (1) 194#define pte_maybe_dirty(pte) (1)
199#endif 195#endif
200 196
201#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG 197#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
202#define page_test_and_clear_young(page) (0) 198#define page_test_and_clear_young(pfn) (0)
203#endif 199#endif
204 200
205#ifndef __HAVE_ARCH_PGD_OFFSET_GATE 201#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 811183de1ef..79a6700b716 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -308,7 +308,7 @@ static inline void SetPageUptodate(struct page *page)
308{ 308{
309#ifdef CONFIG_S390 309#ifdef CONFIG_S390
310 if (!test_and_set_bit(PG_uptodate, &page->flags)) 310 if (!test_and_set_bit(PG_uptodate, &page->flags))
311 page_clear_dirty(page, 0); 311 page_set_storage_key(page_to_pfn(page), PAGE_DEFAULT_KEY, 0);
312#else 312#else
313 /* 313 /*
314 * Memory barrier must be issued before setting the PG_uptodate bit, 314 * Memory barrier must be issued before setting the PG_uptodate bit,
diff --git a/mm/rmap.c b/mm/rmap.c
index 8da044a1db0..522e4a93cad 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -719,7 +719,7 @@ int page_referenced(struct page *page,
719 unlock_page(page); 719 unlock_page(page);
720 } 720 }
721out: 721out:
722 if (page_test_and_clear_young(page)) 722 if (page_test_and_clear_young(page_to_pfn(page)))
723 referenced++; 723 referenced++;
724 724
725 return referenced; 725 return referenced;
@@ -785,10 +785,8 @@ int page_mkclean(struct page *page)
785 struct address_space *mapping = page_mapping(page); 785 struct address_space *mapping = page_mapping(page);
786 if (mapping) { 786 if (mapping) {
787 ret = page_mkclean_file(mapping, page); 787 ret = page_mkclean_file(mapping, page);
788 if (page_test_dirty(page)) { 788 if (page_test_and_clear_dirty(page_to_pfn(page), 1))
789 page_clear_dirty(page, 1);
790 ret = 1; 789 ret = 1;
791 }
792 } 790 }
793 } 791 }
794 792
@@ -981,10 +979,9 @@ void page_remove_rmap(struct page *page)
981 * not if it's in swapcache - there might be another pte slot 979 * not if it's in swapcache - there might be another pte slot
982 * containing the swap entry, but page not yet written to swap. 980 * containing the swap entry, but page not yet written to swap.
983 */ 981 */
984 if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) { 982 if ((!PageAnon(page) || PageSwapCache(page)) &&
985 page_clear_dirty(page, 1); 983 page_test_and_clear_dirty(page_to_pfn(page), 1))
986 set_page_dirty(page); 984 set_page_dirty(page);
987 }
988 /* 985 /*
989 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED 986 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
990 * and not charged by memcg for now. 987 * and not charged by memcg for now.