aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDominik Dingel <dingel@linux.vnet.ibm.com>2014-01-14 09:02:11 -0500
committerChristian Borntraeger <borntraeger@de.ibm.com>2014-04-22 03:36:23 -0400
commit65eef33550f68e9a7f7d2dc64da94fb6cb85be2c (patch)
tree7a1d6447ee180fcf2f648040ac0b4439dbd908d5
parent0f689a33ad17845363acdc6d52783befd6ad116c (diff)
KVM: s390: Adding skey bit to mmu context
For lazy storage key handling, we need a mechanism to track if the process ever issued a storage key operation. This patch adds the basic infrastructure for making the storage key handling optional, but still leaves it enabled for now by default. Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h1
-rw-r--r--arch/s390/include/asm/pgtable.h41
3 files changed, 30 insertions, 14 deletions
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index f77695a82f64..a5e656260a70 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -16,6 +16,8 @@ typedef struct {
16 unsigned long vdso_base; 16 unsigned long vdso_base;
17 /* The mmu context has extended page tables. */ 17 /* The mmu context has extended page tables. */
18 unsigned int has_pgste:1; 18 unsigned int has_pgste:1;
19 /* The mmu context uses storage keys. */
20 unsigned int use_skey:1;
19} mm_context_t; 21} mm_context_t;
20 22
21#define INIT_MM_CONTEXT(name) \ 23#define INIT_MM_CONTEXT(name) \
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 71be346d0e3c..05925ead0748 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -23,6 +23,7 @@ static inline int init_new_context(struct task_struct *tsk,
23 mm->context.asce_bits |= _ASCE_TYPE_REGION3; 23 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
24#endif 24#endif
25 mm->context.has_pgste = 0; 25 mm->context.has_pgste = 0;
26 mm->context.use_skey = 1;
26 mm->context.asce_limit = STACK_TOP_MAX; 27 mm->context.asce_limit = STACK_TOP_MAX;
27 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 28 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
28 return 0; 29 return 0;
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 12f75313e086..e88e9f6b07cc 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -466,6 +466,16 @@ static inline int mm_has_pgste(struct mm_struct *mm)
466#endif 466#endif
467 return 0; 467 return 0;
468} 468}
469
470static inline int mm_use_skey(struct mm_struct *mm)
471{
472#ifdef CONFIG_PGSTE
473 if (mm->context.use_skey)
474 return 1;
475#endif
476 return 0;
477}
478
469/* 479/*
470 * pgd/pmd/pte query functions 480 * pgd/pmd/pte query functions
471 */ 481 */
@@ -699,12 +709,13 @@ static inline void pgste_set(pte_t *ptep, pgste_t pgste)
699#endif 709#endif
700} 710}
701 711
702static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) 712static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste,
713 struct mm_struct *mm)
703{ 714{
704#ifdef CONFIG_PGSTE 715#ifdef CONFIG_PGSTE
705 unsigned long address, bits, skey; 716 unsigned long address, bits, skey;
706 717
707 if (pte_val(*ptep) & _PAGE_INVALID) 718 if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID)
708 return pgste; 719 return pgste;
709 address = pte_val(*ptep) & PAGE_MASK; 720 address = pte_val(*ptep) & PAGE_MASK;
710 skey = (unsigned long) page_get_storage_key(address); 721 skey = (unsigned long) page_get_storage_key(address);
@@ -729,10 +740,11 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
729 740
730} 741}
731 742
732static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) 743static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste,
744 struct mm_struct *mm)
733{ 745{
734#ifdef CONFIG_PGSTE 746#ifdef CONFIG_PGSTE
735 if (pte_val(*ptep) & _PAGE_INVALID) 747 if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID)
736 return pgste; 748 return pgste;
737 /* Get referenced bit from storage key */ 749 /* Get referenced bit from storage key */
738 if (page_reset_referenced(pte_val(*ptep) & PAGE_MASK)) 750 if (page_reset_referenced(pte_val(*ptep) & PAGE_MASK))
@@ -741,13 +753,14 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
741 return pgste; 753 return pgste;
742} 754}
743 755
744static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) 756static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
757 struct mm_struct *mm)
745{ 758{
746#ifdef CONFIG_PGSTE 759#ifdef CONFIG_PGSTE
747 unsigned long address; 760 unsigned long address;
748 unsigned long nkey; 761 unsigned long nkey;
749 762
750 if (pte_val(entry) & _PAGE_INVALID) 763 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
751 return; 764 return;
752 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); 765 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
753 address = pte_val(entry) & PAGE_MASK; 766 address = pte_val(entry) & PAGE_MASK;
@@ -870,7 +883,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
870 if (mm_has_pgste(mm)) { 883 if (mm_has_pgste(mm)) {
871 pgste = pgste_get_lock(ptep); 884 pgste = pgste_get_lock(ptep);
872 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; 885 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
873 pgste_set_key(ptep, pgste, entry); 886 pgste_set_key(ptep, pgste, entry, mm);
874 pgste_set_pte(ptep, entry); 887 pgste_set_pte(ptep, entry);
875 pgste_set_unlock(ptep, pgste); 888 pgste_set_unlock(ptep, pgste);
876 } else { 889 } else {
@@ -1028,7 +1041,7 @@ static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
1028 1041
1029 if (mm_has_pgste(mm)) { 1042 if (mm_has_pgste(mm)) {
1030 pgste = pgste_get_lock(ptep); 1043 pgste = pgste_get_lock(ptep);
1031 pgste = pgste_update_all(ptep, pgste); 1044 pgste = pgste_update_all(ptep, pgste, mm);
1032 dirty = !!(pgste_val(pgste) & PGSTE_HC_BIT); 1045 dirty = !!(pgste_val(pgste) & PGSTE_HC_BIT);
1033 pgste_val(pgste) &= ~PGSTE_HC_BIT; 1046 pgste_val(pgste) &= ~PGSTE_HC_BIT;
1034 pgste_set_unlock(ptep, pgste); 1047 pgste_set_unlock(ptep, pgste);
@@ -1048,7 +1061,7 @@ static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
1048 1061
1049 if (mm_has_pgste(mm)) { 1062 if (mm_has_pgste(mm)) {
1050 pgste = pgste_get_lock(ptep); 1063 pgste = pgste_get_lock(ptep);
1051 pgste = pgste_update_young(ptep, pgste); 1064 pgste = pgste_update_young(ptep, pgste, mm);
1052 young = !!(pgste_val(pgste) & PGSTE_HR_BIT); 1065 young = !!(pgste_val(pgste) & PGSTE_HR_BIT);
1053 pgste_val(pgste) &= ~PGSTE_HR_BIT; 1066 pgste_val(pgste) &= ~PGSTE_HR_BIT;
1054 pgste_set_unlock(ptep, pgste); 1067 pgste_set_unlock(ptep, pgste);
@@ -1182,7 +1195,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1182 pte_val(*ptep) = _PAGE_INVALID; 1195 pte_val(*ptep) = _PAGE_INVALID;
1183 1196
1184 if (mm_has_pgste(mm)) { 1197 if (mm_has_pgste(mm)) {
1185 pgste = pgste_update_all(&pte, pgste); 1198 pgste = pgste_update_all(&pte, pgste, mm);
1186 pgste_set_unlock(ptep, pgste); 1199 pgste_set_unlock(ptep, pgste);
1187 } 1200 }
1188 return pte; 1201 return pte;
@@ -1205,7 +1218,7 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1205 ptep_flush_lazy(mm, address, ptep); 1218 ptep_flush_lazy(mm, address, ptep);
1206 1219
1207 if (mm_has_pgste(mm)) { 1220 if (mm_has_pgste(mm)) {
1208 pgste = pgste_update_all(&pte, pgste); 1221 pgste = pgste_update_all(&pte, pgste, mm);
1209 pgste_set(ptep, pgste); 1222 pgste_set(ptep, pgste);
1210 } 1223 }
1211 return pte; 1224 return pte;
@@ -1219,7 +1232,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1219 1232
1220 if (mm_has_pgste(mm)) { 1233 if (mm_has_pgste(mm)) {
1221 pgste = pgste_get(ptep); 1234 pgste = pgste_get(ptep);
1222 pgste_set_key(ptep, pgste, pte); 1235 pgste_set_key(ptep, pgste, pte, mm);
1223 pgste_set_pte(ptep, pte); 1236 pgste_set_pte(ptep, pte);
1224 pgste_set_unlock(ptep, pgste); 1237 pgste_set_unlock(ptep, pgste);
1225 } else 1238 } else
@@ -1246,7 +1259,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1246 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == 1259 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
1247 _PGSTE_GPS_USAGE_UNUSED) 1260 _PGSTE_GPS_USAGE_UNUSED)
1248 pte_val(pte) |= _PAGE_UNUSED; 1261 pte_val(pte) |= _PAGE_UNUSED;
1249 pgste = pgste_update_all(&pte, pgste); 1262 pgste = pgste_update_all(&pte, pgste, vma->vm_mm);
1250 pgste_set_unlock(ptep, pgste); 1263 pgste_set_unlock(ptep, pgste);
1251 } 1264 }
1252 return pte; 1265 return pte;
@@ -1278,7 +1291,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1278 pte_val(*ptep) = _PAGE_INVALID; 1291 pte_val(*ptep) = _PAGE_INVALID;
1279 1292
1280 if (!full && mm_has_pgste(mm)) { 1293 if (!full && mm_has_pgste(mm)) {
1281 pgste = pgste_update_all(&pte, pgste); 1294 pgste = pgste_update_all(&pte, pgste, mm);
1282 pgste_set_unlock(ptep, pgste); 1295 pgste_set_unlock(ptep, pgste);
1283 } 1296 }
1284 return pte; 1297 return pte;