aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorDominik Dingel <dingel@linux.vnet.ibm.com>2014-10-23 06:08:38 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-10-27 08:27:25 -0400
commit2faee8ff9dc6f4bfe46f6d2d110add858140fb20 (patch)
treebcdbe2fd7e77b4e3d9e42ef5f06725a3d8ef7fbd /arch/s390/mm
parent593befa6ab74a805e4f503c8c737c3cffa8066b6 (diff)
s390/mm: prevent and break zero page mappings in case of storage keys
As soon as storage keys are enabled we need to stop working on zero page mappings to prevent inconsistencies between storage keys and pgste. Otherwise following data corruption could happen: 1) guest enables storage key 2) guest sets storage key for not mapped page X -> change goes to PGSTE 3) guest reads from page X -> as X was not dirty before, the page will be zero page backed, storage key from PGSTE for X will go to storage key for zero page 4) guest sets storage key for not mapped page Y (same logic as above 5) guest reads from page Y -> as Y was not dirty before, the page will be zero page backed, storage key from PGSTE for Y will got to storage key for zero page overwriting storage key for X While holding the mmap sem, we are safe against changes on entries we already fixed, as every fault would need to take the mmap_sem (read). Other vCPUs executing storage key instructions will get a one time interception and be serialized also with mmap_sem. Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/pgtable.c13
1 files changed, 12 insertions, 1 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 019afdf50b1a..0f1e9ff6bc12 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1256,6 +1256,15 @@ static int __s390_enable_skey(pte_t *pte, unsigned long addr,
1256 pgste_t pgste; 1256 pgste_t pgste;
1257 1257
1258 pgste = pgste_get_lock(pte); 1258 pgste = pgste_get_lock(pte);
1259 /*
1260 * Remove all zero page mappings,
1261 * after establishing a policy to forbid zero page mappings
1262 * following faults for that page will get fresh anonymous pages
1263 */
1264 if (is_zero_pfn(pte_pfn(*pte))) {
1265 ptep_flush_direct(walk->mm, addr, pte);
1266 pte_val(*pte) = _PAGE_INVALID;
1267 }
1259 /* Clear storage key */ 1268 /* Clear storage key */
1260 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | 1269 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
1261 PGSTE_GR_BIT | PGSTE_GC_BIT); 1270 PGSTE_GR_BIT | PGSTE_GC_BIT);
@@ -1274,9 +1283,11 @@ void s390_enable_skey(void)
1274 down_write(&mm->mmap_sem); 1283 down_write(&mm->mmap_sem);
1275 if (mm_use_skey(mm)) 1284 if (mm_use_skey(mm))
1276 goto out_up; 1285 goto out_up;
1286
1287 mm->context.use_skey = 1;
1288
1277 walk.mm = mm; 1289 walk.mm = mm;
1278 walk_page_range(0, TASK_SIZE, &walk); 1290 walk_page_range(0, TASK_SIZE, &walk);
1279 mm->context.use_skey = 1;
1280 1291
1281out_up: 1292out_up:
1282 up_write(&mm->mmap_sem); 1293 up_write(&mm->mmap_sem);