diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2013-07-23 16:11:42 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2013-08-29 07:20:11 -0400 |
commit | 0944fe3f4a323f436180d39402cae7f9c46ead17 (patch) | |
tree | 7b2ada69ff7e3c1fae20ec0b1dffe5e0d0ec2cc6 /arch/s390/mm | |
parent | fbd70035fb2b03deb346052084794bc1d0e25aa2 (diff) |
s390/mm: implement software referenced bits
The last remaining use for the storage key of the s390 architecture
is reference counting. The alternative is to make page table entries
invalid while they are old. On access the fault handler marks the
pte/pmd as young which makes the pte/pmd valid if the access rights
allow read access. The pte/pmd invalidations required for software
managed reference bits cost a bit of performance, on the other hand
the RRBE/RRBM instructions to read and reset the referenced bits are
quite expensive as well.
Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/hugetlbpage.c | 58 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 18 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 1 |
3 files changed, 47 insertions, 30 deletions
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index b0bd0ae17796..248445f92604 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c | |||
@@ -10,19 +10,25 @@ | |||
10 | 10 | ||
11 | static inline pmd_t __pte_to_pmd(pte_t pte) | 11 | static inline pmd_t __pte_to_pmd(pte_t pte) |
12 | { | 12 | { |
13 | int none, prot; | 13 | int none, young, prot; |
14 | pmd_t pmd; | 14 | pmd_t pmd; |
15 | 15 | ||
16 | /* | 16 | /* |
17 | * Convert encoding pte bits pmd bits | 17 | * Convert encoding pte bits pmd bits |
18 | * .IR.....wdtp ..R...I..... | 18 | * .IR...wrdytp ..R...I...y. |
19 | * empty .10.....0000 -> ..0...1..... | 19 | * empty .10...000000 -> ..0...1...0. |
20 | * prot-none, clean .11.....0001 -> ..1...1..... | 20 | * prot-none, clean, old .11...000001 -> ..0...1...1. |
21 | * prot-none, dirty .10.....0101 -> ..1...1..... | 21 | * prot-none, clean, young .11...000101 -> ..1...1...1. |
22 | * read-only, clean .01.....0001 -> ..1...0..... | 22 | * prot-none, dirty, old .10...001001 -> ..0...1...1. |
23 | * read-only, dirty .01.....0101 -> ..1...0..... | 23 | * prot-none, dirty, young .10...001101 -> ..1...1...1. |
24 | * read-write, clean .01.....1001 -> ..0...0..... | 24 | * read-only, clean, old .11...010001 -> ..1...1...0. |
25 | * read-write, dirty .00.....1101 -> ..0...0..... | 25 | * read-only, clean, young .01...010101 -> ..1...0...1. |
26 | * read-only, dirty, old .11...011001 -> ..1...1...0. | ||
27 | * read-only, dirty, young .01...011101 -> ..1...0...1. | ||
28 | * read-write, clean, old .11...110001 -> ..0...1...0. | ||
29 | * read-write, clean, young .01...110101 -> ..0...0...1. | ||
30 | * read-write, dirty, old .10...111001 -> ..0...1...0. | ||
31 | * read-write, dirty, young .00...111101 -> ..0...0...1. | ||
26 | * Huge ptes are dirty by definition, a clean pte is made dirty | 32 | * Huge ptes are dirty by definition, a clean pte is made dirty |
27 | * by the conversion. | 33 | * by the conversion. |
28 | */ | 34 | */ |
@@ -31,9 +37,14 @@ static inline pmd_t __pte_to_pmd(pte_t pte) | |||
31 | if (pte_val(pte) & _PAGE_INVALID) | 37 | if (pte_val(pte) & _PAGE_INVALID) |
32 | pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; | 38 | pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; |
33 | none = (pte_val(pte) & _PAGE_PRESENT) && | 39 | none = (pte_val(pte) & _PAGE_PRESENT) && |
34 | (pte_val(pte) & _PAGE_INVALID); | 40 | !(pte_val(pte) & _PAGE_READ) && |
35 | prot = (pte_val(pte) & _PAGE_PROTECT); | 41 | !(pte_val(pte) & _PAGE_WRITE); |
36 | if (prot || none) | 42 | prot = (pte_val(pte) & _PAGE_PROTECT) && |
43 | !(pte_val(pte) & _PAGE_WRITE); | ||
44 | young = pte_val(pte) & _PAGE_YOUNG; | ||
45 | if (none || young) | ||
46 | pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; | ||
47 | if (prot || (none && young)) | ||
37 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; | 48 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; |
38 | } else | 49 | } else |
39 | pmd_val(pmd) = _SEGMENT_ENTRY_INVALID; | 50 | pmd_val(pmd) = _SEGMENT_ENTRY_INVALID; |
@@ -46,11 +57,14 @@ static inline pte_t __pmd_to_pte(pmd_t pmd) | |||
46 | 57 | ||
47 | /* | 58 | /* |
48 | * Convert encoding pmd bits pte bits | 59 | * Convert encoding pmd bits pte bits |
49 | * ..R...I..... .IR.....wdtp | 60 | * ..R...I...y. .IR...wrdytp |
50 | * empty ..0...1..... -> .10.....0000 | 61 | * empty ..0...1...0. -> .10...000000 |
51 | * prot-none, young ..1...1..... -> .10.....0101 | 62 | * prot-none, old ..0...1...1. -> .10...001001 |
52 | * read-only, young ..1...0..... -> .01.....0101 | 63 | * prot-none, young ..1...1...1. -> .10...001101 |
53 | * read-write, young ..0...0..... -> .00.....1101 | 64 | * read-only, old ..1...1...0. -> .11...011001 |
65 | * read-only, young ..1...0...1. -> .01...011101 | ||
66 | * read-write, old ..0...1...0. -> .10...111001 | ||
67 | * read-write, young ..0...0...1. -> .00...111101 | ||
54 | * Huge ptes are dirty by definition | 68 | * Huge ptes are dirty by definition |
55 | */ | 69 | */ |
56 | if (pmd_present(pmd)) { | 70 | if (pmd_present(pmd)) { |
@@ -58,11 +72,17 @@ static inline pte_t __pmd_to_pte(pmd_t pmd) | |||
58 | (pmd_val(pmd) & PAGE_MASK); | 72 | (pmd_val(pmd) & PAGE_MASK); |
59 | if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) | 73 | if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) |
60 | pte_val(pte) |= _PAGE_INVALID; | 74 | pte_val(pte) |= _PAGE_INVALID; |
61 | else { | 75 | if (pmd_prot_none(pmd)) { |
76 | if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) | ||
77 | pte_val(pte) |= _PAGE_YOUNG; | ||
78 | } else { | ||
79 | pte_val(pte) |= _PAGE_READ; | ||
62 | if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) | 80 | if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) |
63 | pte_val(pte) |= _PAGE_PROTECT; | 81 | pte_val(pte) |= _PAGE_PROTECT; |
64 | else | 82 | else |
65 | pte_val(pte) |= _PAGE_WRITE; | 83 | pte_val(pte) |= _PAGE_WRITE; |
84 | if (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) | ||
85 | pte_val(pte) |= _PAGE_YOUNG; | ||
66 | } | 86 | } |
67 | } else | 87 | } else |
68 | pte_val(pte) = _PAGE_INVALID; | 88 | pte_val(pte) = _PAGE_INVALID; |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index befaea7003f7..6d16132d0850 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -754,7 +754,8 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, | |||
754 | atomic_set(&page->_mapcount, 3); | 754 | atomic_set(&page->_mapcount, 3); |
755 | table = (unsigned long *) page_to_phys(page); | 755 | table = (unsigned long *) page_to_phys(page); |
756 | clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); | 756 | clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); |
757 | clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); | 757 | clear_table(table + PTRS_PER_PTE, PGSTE_HR_BIT | PGSTE_HC_BIT, |
758 | PAGE_SIZE/2); | ||
758 | return table; | 759 | return table; |
759 | } | 760 | } |
760 | 761 | ||
@@ -792,26 +793,21 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, | |||
792 | pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48; | 793 | pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48; |
793 | pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; | 794 | pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; |
794 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { | 795 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { |
795 | unsigned long address, bits; | 796 | unsigned long address, bits, skey; |
796 | unsigned char skey; | ||
797 | 797 | ||
798 | address = pte_val(*ptep) & PAGE_MASK; | 798 | address = pte_val(*ptep) & PAGE_MASK; |
799 | skey = page_get_storage_key(address); | 799 | skey = (unsigned long) page_get_storage_key(address); |
800 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); | 800 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); |
801 | skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT); | ||
801 | /* Set storage key ACC and FP */ | 802 | /* Set storage key ACC and FP */ |
802 | page_set_storage_key(address, | 803 | page_set_storage_key(address, skey, !nq); |
803 | (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)), | ||
804 | !nq); | ||
805 | |||
806 | /* Merge host changed & referenced into pgste */ | 804 | /* Merge host changed & referenced into pgste */ |
807 | pgste_val(new) |= bits << 52; | 805 | pgste_val(new) |= bits << 52; |
808 | /* Transfer skey changed & referenced bit to kvm user bits */ | ||
809 | pgste_val(new) |= bits << 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */ | ||
810 | } | 806 | } |
811 | /* changing the guest storage key is considered a change of the page */ | 807 | /* changing the guest storage key is considered a change of the page */ |
812 | if ((pgste_val(new) ^ pgste_val(old)) & | 808 | if ((pgste_val(new) ^ pgste_val(old)) & |
813 | (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) | 809 | (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) |
814 | pgste_val(new) |= PGSTE_UC_BIT; | 810 | pgste_val(new) |= PGSTE_HC_BIT; |
815 | 811 | ||
816 | pgste_set_unlock(ptep, new); | 812 | pgste_set_unlock(ptep, new); |
817 | pte_unmap_unlock(*ptep, ptl); | 813 | pte_unmap_unlock(*ptep, ptl); |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index e1299d40818d..bcfb70b60be6 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -118,6 +118,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | |||
118 | !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { | 118 | !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { |
119 | pmd_val(*pm_dir) = __pa(address) | | 119 | pmd_val(*pm_dir) = __pa(address) | |
120 | _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | | 120 | _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | |
121 | _SEGMENT_ENTRY_YOUNG | | ||
121 | (ro ? _SEGMENT_ENTRY_PROTECT : 0); | 122 | (ro ? _SEGMENT_ENTRY_PROTECT : 0); |
122 | address += PMD_SIZE; | 123 | address += PMD_SIZE; |
123 | continue; | 124 | continue; |