diff options
Diffstat (limited to 'arch/s390/mm/pgtable.c')
-rw-r--r-- | arch/s390/mm/pgtable.c | 185 |
1 files changed, 82 insertions, 103 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 1b79ca67392f..71c7eff2c89f 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <linux/rcupdate.h> | 18 | #include <linux/rcupdate.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/swapops.h> | 20 | #include <linux/swapops.h> |
21 | #include <linux/ksm.h> | ||
22 | #include <linux/mman.h> | ||
21 | 23 | ||
22 | #include <asm/pgtable.h> | 24 | #include <asm/pgtable.h> |
23 | #include <asm/pgalloc.h> | 25 | #include <asm/pgalloc.h> |
@@ -750,8 +752,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len) | |||
750 | break; | 752 | break; |
751 | /* Walk the process page table, lock and get pte pointer */ | 753 | /* Walk the process page table, lock and get pte pointer */ |
752 | ptep = get_locked_pte(gmap->mm, addr, &ptl); | 754 | ptep = get_locked_pte(gmap->mm, addr, &ptl); |
753 | if (unlikely(!ptep)) | 755 | VM_BUG_ON(!ptep); |
754 | continue; | ||
755 | /* Set notification bit in the pgste of the pte */ | 756 | /* Set notification bit in the pgste of the pte */ |
756 | entry = *ptep; | 757 | entry = *ptep; |
757 | if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) { | 758 | if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) { |
@@ -761,7 +762,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len) | |||
761 | gaddr += PAGE_SIZE; | 762 | gaddr += PAGE_SIZE; |
762 | len -= PAGE_SIZE; | 763 | len -= PAGE_SIZE; |
763 | } | 764 | } |
764 | spin_unlock(ptl); | 765 | pte_unmap_unlock(ptep, ptl); |
765 | } | 766 | } |
766 | up_read(&gmap->mm->mmap_sem); | 767 | up_read(&gmap->mm->mmap_sem); |
767 | return rc; | 768 | return rc; |
@@ -834,99 +835,6 @@ static inline void page_table_free_pgste(unsigned long *table) | |||
834 | __free_page(page); | 835 | __free_page(page); |
835 | } | 836 | } |
836 | 837 | ||
837 | static inline unsigned long page_table_reset_pte(struct mm_struct *mm, pmd_t *pmd, | ||
838 | unsigned long addr, unsigned long end, bool init_skey) | ||
839 | { | ||
840 | pte_t *start_pte, *pte; | ||
841 | spinlock_t *ptl; | ||
842 | pgste_t pgste; | ||
843 | |||
844 | start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); | ||
845 | pte = start_pte; | ||
846 | do { | ||
847 | pgste = pgste_get_lock(pte); | ||
848 | pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; | ||
849 | if (init_skey) { | ||
850 | unsigned long address; | ||
851 | |||
852 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | | ||
853 | PGSTE_GR_BIT | PGSTE_GC_BIT); | ||
854 | |||
855 | /* skip invalid and not writable pages */ | ||
856 | if (pte_val(*pte) & _PAGE_INVALID || | ||
857 | !(pte_val(*pte) & _PAGE_WRITE)) { | ||
858 | pgste_set_unlock(pte, pgste); | ||
859 | continue; | ||
860 | } | ||
861 | |||
862 | address = pte_val(*pte) & PAGE_MASK; | ||
863 | page_set_storage_key(address, PAGE_DEFAULT_KEY, 1); | ||
864 | } | ||
865 | pgste_set_unlock(pte, pgste); | ||
866 | } while (pte++, addr += PAGE_SIZE, addr != end); | ||
867 | pte_unmap_unlock(start_pte, ptl); | ||
868 | |||
869 | return addr; | ||
870 | } | ||
871 | |||
872 | static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, pud_t *pud, | ||
873 | unsigned long addr, unsigned long end, bool init_skey) | ||
874 | { | ||
875 | unsigned long next; | ||
876 | pmd_t *pmd; | ||
877 | |||
878 | pmd = pmd_offset(pud, addr); | ||
879 | do { | ||
880 | next = pmd_addr_end(addr, end); | ||
881 | if (pmd_none_or_clear_bad(pmd)) | ||
882 | continue; | ||
883 | next = page_table_reset_pte(mm, pmd, addr, next, init_skey); | ||
884 | } while (pmd++, addr = next, addr != end); | ||
885 | |||
886 | return addr; | ||
887 | } | ||
888 | |||
889 | static inline unsigned long page_table_reset_pud(struct mm_struct *mm, pgd_t *pgd, | ||
890 | unsigned long addr, unsigned long end, bool init_skey) | ||
891 | { | ||
892 | unsigned long next; | ||
893 | pud_t *pud; | ||
894 | |||
895 | pud = pud_offset(pgd, addr); | ||
896 | do { | ||
897 | next = pud_addr_end(addr, end); | ||
898 | if (pud_none_or_clear_bad(pud)) | ||
899 | continue; | ||
900 | next = page_table_reset_pmd(mm, pud, addr, next, init_skey); | ||
901 | } while (pud++, addr = next, addr != end); | ||
902 | |||
903 | return addr; | ||
904 | } | ||
905 | |||
906 | void page_table_reset_pgste(struct mm_struct *mm, unsigned long start, | ||
907 | unsigned long end, bool init_skey) | ||
908 | { | ||
909 | unsigned long addr, next; | ||
910 | pgd_t *pgd; | ||
911 | |||
912 | down_write(&mm->mmap_sem); | ||
913 | if (init_skey && mm_use_skey(mm)) | ||
914 | goto out_up; | ||
915 | addr = start; | ||
916 | pgd = pgd_offset(mm, addr); | ||
917 | do { | ||
918 | next = pgd_addr_end(addr, end); | ||
919 | if (pgd_none_or_clear_bad(pgd)) | ||
920 | continue; | ||
921 | next = page_table_reset_pud(mm, pgd, addr, next, init_skey); | ||
922 | } while (pgd++, addr = next, addr != end); | ||
923 | if (init_skey) | ||
924 | current->mm->context.use_skey = 1; | ||
925 | out_up: | ||
926 | up_write(&mm->mmap_sem); | ||
927 | } | ||
928 | EXPORT_SYMBOL(page_table_reset_pgste); | ||
929 | |||
930 | int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, | 838 | int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, |
931 | unsigned long key, bool nq) | 839 | unsigned long key, bool nq) |
932 | { | 840 | { |
@@ -992,11 +900,6 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) | |||
992 | return NULL; | 900 | return NULL; |
993 | } | 901 | } |
994 | 902 | ||
995 | void page_table_reset_pgste(struct mm_struct *mm, unsigned long start, | ||
996 | unsigned long end, bool init_skey) | ||
997 | { | ||
998 | } | ||
999 | |||
1000 | static inline void page_table_free_pgste(unsigned long *table) | 903 | static inline void page_table_free_pgste(unsigned long *table) |
1001 | { | 904 | { |
1002 | } | 905 | } |
@@ -1347,13 +1250,89 @@ EXPORT_SYMBOL_GPL(s390_enable_sie); | |||
1347 | * Enable storage key handling from now on and initialize the storage | 1250 | * Enable storage key handling from now on and initialize the storage |
1348 | * keys with the default key. | 1251 | * keys with the default key. |
1349 | */ | 1252 | */ |
1350 | void s390_enable_skey(void) | 1253 | static int __s390_enable_skey(pte_t *pte, unsigned long addr, |
1254 | unsigned long next, struct mm_walk *walk) | ||
1351 | { | 1255 | { |
1352 | page_table_reset_pgste(current->mm, 0, TASK_SIZE, true); | 1256 | unsigned long ptev; |
1257 | pgste_t pgste; | ||
1258 | |||
1259 | pgste = pgste_get_lock(pte); | ||
1260 | /* | ||
1261 | * Remove all zero page mappings, | ||
1262 | * after establishing a policy to forbid zero page mappings | ||
1263 | * following faults for that page will get fresh anonymous pages | ||
1264 | */ | ||
1265 | if (is_zero_pfn(pte_pfn(*pte))) { | ||
1266 | ptep_flush_direct(walk->mm, addr, pte); | ||
1267 | pte_val(*pte) = _PAGE_INVALID; | ||
1268 | } | ||
1269 | /* Clear storage key */ | ||
1270 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | | ||
1271 | PGSTE_GR_BIT | PGSTE_GC_BIT); | ||
1272 | ptev = pte_val(*pte); | ||
1273 | if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) | ||
1274 | page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1); | ||
1275 | pgste_set_unlock(pte, pgste); | ||
1276 | return 0; | ||
1277 | } | ||
1278 | |||
1279 | int s390_enable_skey(void) | ||
1280 | { | ||
1281 | struct mm_walk walk = { .pte_entry = __s390_enable_skey }; | ||
1282 | struct mm_struct *mm = current->mm; | ||
1283 | struct vm_area_struct *vma; | ||
1284 | int rc = 0; | ||
1285 | |||
1286 | down_write(&mm->mmap_sem); | ||
1287 | if (mm_use_skey(mm)) | ||
1288 | goto out_up; | ||
1289 | |||
1290 | mm->context.use_skey = 1; | ||
1291 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | ||
1292 | if (ksm_madvise(vma, vma->vm_start, vma->vm_end, | ||
1293 | MADV_UNMERGEABLE, &vma->vm_flags)) { | ||
1294 | mm->context.use_skey = 0; | ||
1295 | rc = -ENOMEM; | ||
1296 | goto out_up; | ||
1297 | } | ||
1298 | } | ||
1299 | mm->def_flags &= ~VM_MERGEABLE; | ||
1300 | |||
1301 | walk.mm = mm; | ||
1302 | walk_page_range(0, TASK_SIZE, &walk); | ||
1303 | |||
1304 | out_up: | ||
1305 | up_write(&mm->mmap_sem); | ||
1306 | return rc; | ||
1353 | } | 1307 | } |
1354 | EXPORT_SYMBOL_GPL(s390_enable_skey); | 1308 | EXPORT_SYMBOL_GPL(s390_enable_skey); |
1355 | 1309 | ||
1356 | /* | 1310 | /* |
1311 | * Reset CMMA state, make all pages stable again. | ||
1312 | */ | ||
1313 | static int __s390_reset_cmma(pte_t *pte, unsigned long addr, | ||
1314 | unsigned long next, struct mm_walk *walk) | ||
1315 | { | ||
1316 | pgste_t pgste; | ||
1317 | |||
1318 | pgste = pgste_get_lock(pte); | ||
1319 | pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; | ||
1320 | pgste_set_unlock(pte, pgste); | ||
1321 | return 0; | ||
1322 | } | ||
1323 | |||
1324 | void s390_reset_cmma(struct mm_struct *mm) | ||
1325 | { | ||
1326 | struct mm_walk walk = { .pte_entry = __s390_reset_cmma }; | ||
1327 | |||
1328 | down_write(&mm->mmap_sem); | ||
1329 | walk.mm = mm; | ||
1330 | walk_page_range(0, TASK_SIZE, &walk); | ||
1331 | up_write(&mm->mmap_sem); | ||
1332 | } | ||
1333 | EXPORT_SYMBOL_GPL(s390_reset_cmma); | ||
1334 | |||
1335 | /* | ||
1357 | * Test and reset if a guest page is dirty | 1336 | * Test and reset if a guest page is dirty |
1358 | */ | 1337 | */ |
1359 | bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap) | 1338 | bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap) |