diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2016-03-08 05:08:09 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2016-03-08 09:00:12 -0500 |
commit | ebde765c0e85f48534f98779b22349bf00761b61 (patch) | |
tree | af05d33a2b90519d8296fe248bb8bf70b3355259 /arch/s390/mm | |
parent | 988b86e69ded17f0f1209fd3ef1c4c7f1567dcc1 (diff) |
s390/mm: uninline ptep_xxx functions from pgtable.h
The code in the various ptep_xxx functions has grown quite large,
consolidate them to four out-of-line functions:
ptep_xchg_direct to exchange a pte with another with immediate flushing
ptep_xchg_lazy to exchange a pte with another in a batched update
ptep_modify_prot_start to begin a protection flags update
ptep_modify_prot_commit to commit a protection flags update
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/pgtable.c | 295 |
1 files changed, 266 insertions, 29 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 6acd7174fe75..30033aad17da 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -772,7 +772,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len) | |||
772 | EXPORT_SYMBOL_GPL(gmap_ipte_notify); | 772 | EXPORT_SYMBOL_GPL(gmap_ipte_notify); |
773 | 773 | ||
774 | /** | 774 | /** |
775 | * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte. | 775 | * ptep_ipte_notify - call all invalidation callbacks for a specific pte. |
776 | * @mm: pointer to the process mm_struct | 776 | * @mm: pointer to the process mm_struct |
777 | * @addr: virtual address in the process address space | 777 | * @addr: virtual address in the process address space |
778 | * @pte: pointer to the page table entry | 778 | * @pte: pointer to the page table entry |
@@ -780,7 +780,7 @@ EXPORT_SYMBOL_GPL(gmap_ipte_notify); | |||
780 | * This function is assumed to be called with the page table lock held | 780 | * This function is assumed to be called with the page table lock held |
781 | * for the pte to notify. | 781 | * for the pte to notify. |
782 | */ | 782 | */ |
783 | void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte) | 783 | void ptep_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte) |
784 | { | 784 | { |
785 | unsigned long offset, gaddr; | 785 | unsigned long offset, gaddr; |
786 | unsigned long *table; | 786 | unsigned long *table; |
@@ -801,7 +801,7 @@ void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte) | |||
801 | } | 801 | } |
802 | spin_unlock(&gmap_notifier_lock); | 802 | spin_unlock(&gmap_notifier_lock); |
803 | } | 803 | } |
804 | EXPORT_SYMBOL_GPL(gmap_do_ipte_notify); | 804 | EXPORT_SYMBOL_GPL(ptep_ipte_notify); |
805 | 805 | ||
806 | int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, | 806 | int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, |
807 | unsigned long key, bool nq) | 807 | unsigned long key, bool nq) |
@@ -1158,6 +1158,266 @@ static inline void thp_split_mm(struct mm_struct *mm) | |||
1158 | } | 1158 | } |
1159 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 1159 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
1160 | 1160 | ||
1161 | static inline pte_t ptep_flush_direct(struct mm_struct *mm, | ||
1162 | unsigned long addr, pte_t *ptep) | ||
1163 | { | ||
1164 | int active, count; | ||
1165 | pte_t old; | ||
1166 | |||
1167 | old = *ptep; | ||
1168 | if (unlikely(pte_val(old) & _PAGE_INVALID)) | ||
1169 | return old; | ||
1170 | active = (mm == current->active_mm) ? 1 : 0; | ||
1171 | count = atomic_add_return(0x10000, &mm->context.attach_count); | ||
1172 | if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && | ||
1173 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) | ||
1174 | __ptep_ipte_local(addr, ptep); | ||
1175 | else | ||
1176 | __ptep_ipte(addr, ptep); | ||
1177 | atomic_sub(0x10000, &mm->context.attach_count); | ||
1178 | return old; | ||
1179 | } | ||
1180 | |||
1181 | static inline pte_t ptep_flush_lazy(struct mm_struct *mm, | ||
1182 | unsigned long addr, pte_t *ptep) | ||
1183 | { | ||
1184 | int active, count; | ||
1185 | pte_t old; | ||
1186 | |||
1187 | old = *ptep; | ||
1188 | if (unlikely(pte_val(old) & _PAGE_INVALID)) | ||
1189 | return old; | ||
1190 | active = (mm == current->active_mm) ? 1 : 0; | ||
1191 | count = atomic_add_return(0x10000, &mm->context.attach_count); | ||
1192 | if ((count & 0xffff) <= active) { | ||
1193 | pte_val(*ptep) |= _PAGE_INVALID; | ||
1194 | mm->context.flush_mm = 1; | ||
1195 | } else | ||
1196 | __ptep_ipte(addr, ptep); | ||
1197 | atomic_sub(0x10000, &mm->context.attach_count); | ||
1198 | return old; | ||
1199 | } | ||
1200 | |||
1201 | static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste, | ||
1202 | struct mm_struct *mm) | ||
1203 | { | ||
1204 | #ifdef CONFIG_PGSTE | ||
1205 | unsigned long address, bits, skey; | ||
1206 | |||
1207 | if (!mm_use_skey(mm) || pte_val(pte) & _PAGE_INVALID) | ||
1208 | return pgste; | ||
1209 | address = pte_val(pte) & PAGE_MASK; | ||
1210 | skey = (unsigned long) page_get_storage_key(address); | ||
1211 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); | ||
1212 | /* Transfer page changed & referenced bit to guest bits in pgste */ | ||
1213 | pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ | ||
1214 | /* Copy page access key and fetch protection bit to pgste */ | ||
1215 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); | ||
1216 | pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; | ||
1217 | #endif | ||
1218 | return pgste; | ||
1219 | |||
1220 | } | ||
1221 | |||
1222 | static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry, | ||
1223 | struct mm_struct *mm) | ||
1224 | { | ||
1225 | #ifdef CONFIG_PGSTE | ||
1226 | unsigned long address; | ||
1227 | unsigned long nkey; | ||
1228 | |||
1229 | if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID) | ||
1230 | return; | ||
1231 | VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); | ||
1232 | address = pte_val(entry) & PAGE_MASK; | ||
1233 | /* | ||
1234 | * Set page access key and fetch protection bit from pgste. | ||
1235 | * The guest C/R information is still in the PGSTE, set real | ||
1236 | * key C/R to 0. | ||
1237 | */ | ||
1238 | nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; | ||
1239 | nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48; | ||
1240 | page_set_storage_key(address, nkey, 0); | ||
1241 | #endif | ||
1242 | } | ||
1243 | |||
1244 | static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) | ||
1245 | { | ||
1246 | #ifdef CONFIG_PGSTE | ||
1247 | if ((pte_val(entry) & _PAGE_PRESENT) && | ||
1248 | (pte_val(entry) & _PAGE_WRITE) && | ||
1249 | !(pte_val(entry) & _PAGE_INVALID)) { | ||
1250 | if (!MACHINE_HAS_ESOP) { | ||
1251 | /* | ||
1252 | * Without enhanced suppression-on-protection force | ||
1253 | * the dirty bit on for all writable ptes. | ||
1254 | */ | ||
1255 | pte_val(entry) |= _PAGE_DIRTY; | ||
1256 | pte_val(entry) &= ~_PAGE_PROTECT; | ||
1257 | } | ||
1258 | if (!(pte_val(entry) & _PAGE_PROTECT)) | ||
1259 | /* This pte allows write access, set user-dirty */ | ||
1260 | pgste_val(pgste) |= PGSTE_UC_BIT; | ||
1261 | } | ||
1262 | #endif | ||
1263 | *ptep = entry; | ||
1264 | return pgste; | ||
1265 | } | ||
1266 | |||
1267 | static inline pgste_t pgste_ipte_notify(struct mm_struct *mm, | ||
1268 | unsigned long addr, | ||
1269 | pte_t *ptep, pgste_t pgste) | ||
1270 | { | ||
1271 | #ifdef CONFIG_PGSTE | ||
1272 | if (pgste_val(pgste) & PGSTE_IN_BIT) { | ||
1273 | pgste_val(pgste) &= ~PGSTE_IN_BIT; | ||
1274 | ptep_ipte_notify(mm, addr, ptep); | ||
1275 | } | ||
1276 | #endif | ||
1277 | return pgste; | ||
1278 | } | ||
1279 | |||
1280 | #ifdef CONFIG_PGSTE | ||
1281 | /* | ||
1282 | * Test and reset if a guest page is dirty | ||
1283 | */ | ||
1284 | bool pgste_test_and_clear_dirty(struct mm_struct *mm, unsigned long addr) | ||
1285 | { | ||
1286 | spinlock_t *ptl; | ||
1287 | pgste_t pgste; | ||
1288 | pte_t *ptep; | ||
1289 | pte_t pte; | ||
1290 | bool dirty; | ||
1291 | |||
1292 | ptep = get_locked_pte(mm, addr, &ptl); | ||
1293 | if (unlikely(!ptep)) | ||
1294 | return false; | ||
1295 | |||
1296 | pgste = pgste_get_lock(ptep); | ||
1297 | dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT); | ||
1298 | pgste_val(pgste) &= ~PGSTE_UC_BIT; | ||
1299 | pte = *ptep; | ||
1300 | if (dirty && (pte_val(pte) & _PAGE_PRESENT)) { | ||
1301 | pgste = pgste_ipte_notify(mm, addr, ptep, pgste); | ||
1302 | __ptep_ipte(addr, ptep); | ||
1303 | if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) | ||
1304 | pte_val(pte) |= _PAGE_PROTECT; | ||
1305 | else | ||
1306 | pte_val(pte) |= _PAGE_INVALID; | ||
1307 | *ptep = pte; | ||
1308 | } | ||
1309 | pgste_set_unlock(ptep, pgste); | ||
1310 | |||
1311 | spin_unlock(ptl); | ||
1312 | return dirty; | ||
1313 | } | ||
1314 | EXPORT_SYMBOL_GPL(pgste_test_and_clear_dirty); | ||
1315 | |||
1316 | void set_pte_pgste_at(struct mm_struct *mm, unsigned long addr, | ||
1317 | pte_t *ptep, pte_t entry) | ||
1318 | { | ||
1319 | pgste_t pgste; | ||
1320 | |||
1321 | /* the mm_has_pgste() check is done in set_pte_at() */ | ||
1322 | pgste = pgste_get_lock(ptep); | ||
1323 | pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; | ||
1324 | pgste_set_key(ptep, pgste, entry, mm); | ||
1325 | pgste = pgste_set_pte(ptep, pgste, entry); | ||
1326 | pgste_set_unlock(ptep, pgste); | ||
1327 | } | ||
1328 | EXPORT_SYMBOL(set_pte_pgste_at); | ||
1329 | #endif | ||
1330 | |||
1331 | static inline pgste_t ptep_xchg_start(struct mm_struct *mm, | ||
1332 | unsigned long addr, pte_t *ptep) | ||
1333 | { | ||
1334 | pgste_t pgste = __pgste(0); | ||
1335 | |||
1336 | if (mm_has_pgste(mm)) { | ||
1337 | pgste = pgste_get_lock(ptep); | ||
1338 | pgste = pgste_ipte_notify(mm, addr, ptep, pgste); | ||
1339 | } | ||
1340 | return pgste; | ||
1341 | } | ||
1342 | |||
1343 | static inline void ptep_xchg_commit(struct mm_struct *mm, | ||
1344 | unsigned long addr, pte_t *ptep, | ||
1345 | pgste_t pgste, pte_t old, pte_t new) | ||
1346 | { | ||
1347 | if (mm_has_pgste(mm)) { | ||
1348 | if (pte_val(old) & _PAGE_INVALID) | ||
1349 | pgste_set_key(ptep, pgste, new, mm); | ||
1350 | if (pte_val(new) & _PAGE_INVALID) { | ||
1351 | pgste = pgste_update_all(old, pgste, mm); | ||
1352 | if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == | ||
1353 | _PGSTE_GPS_USAGE_UNUSED) | ||
1354 | pte_val(old) |= _PAGE_UNUSED; | ||
1355 | } | ||
1356 | pgste = pgste_set_pte(ptep, pgste, new); | ||
1357 | pgste_set_unlock(ptep, pgste); | ||
1358 | } else { | ||
1359 | *ptep = new; | ||
1360 | } | ||
1361 | } | ||
1362 | |||
1363 | pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr, | ||
1364 | pte_t *ptep, pte_t new) | ||
1365 | { | ||
1366 | pgste_t pgste; | ||
1367 | pte_t old; | ||
1368 | |||
1369 | pgste = ptep_xchg_start(mm, addr, ptep); | ||
1370 | old = ptep_flush_direct(mm, addr, ptep); | ||
1371 | ptep_xchg_commit(mm, addr, ptep, pgste, old, new); | ||
1372 | return old; | ||
1373 | } | ||
1374 | EXPORT_SYMBOL(ptep_xchg_direct); | ||
1375 | |||
1376 | pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr, | ||
1377 | pte_t *ptep, pte_t new) | ||
1378 | { | ||
1379 | pgste_t pgste; | ||
1380 | pte_t old; | ||
1381 | |||
1382 | pgste = ptep_xchg_start(mm, addr, ptep); | ||
1383 | old = ptep_flush_lazy(mm, addr, ptep); | ||
1384 | ptep_xchg_commit(mm, addr, ptep, pgste, old, new); | ||
1385 | return old; | ||
1386 | } | ||
1387 | EXPORT_SYMBOL(ptep_xchg_lazy); | ||
1388 | |||
1389 | pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, | ||
1390 | pte_t *ptep) | ||
1391 | { | ||
1392 | pgste_t pgste; | ||
1393 | pte_t old; | ||
1394 | |||
1395 | pgste = ptep_xchg_start(mm, addr, ptep); | ||
1396 | old = ptep_flush_lazy(mm, addr, ptep); | ||
1397 | if (mm_has_pgste(mm)) { | ||
1398 | pgste = pgste_update_all(old, pgste, mm); | ||
1399 | pgste_set(ptep, pgste); | ||
1400 | } | ||
1401 | return old; | ||
1402 | } | ||
1403 | EXPORT_SYMBOL(ptep_modify_prot_start); | ||
1404 | |||
1405 | void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | ||
1406 | pte_t *ptep, pte_t pte) | ||
1407 | { | ||
1408 | pgste_t pgste; | ||
1409 | |||
1410 | if (mm_has_pgste(mm)) { | ||
1411 | pgste = pgste_get(ptep); | ||
1412 | pgste_set_key(ptep, pgste, pte, mm); | ||
1413 | pgste = pgste_set_pte(ptep, pgste, pte); | ||
1414 | pgste_set_unlock(ptep, pgste); | ||
1415 | } else { | ||
1416 | *ptep = pte; | ||
1417 | } | ||
1418 | } | ||
1419 | EXPORT_SYMBOL(ptep_modify_prot_commit); | ||
1420 | |||
1161 | /* | 1421 | /* |
1162 | * switch on pgstes for its userspace process (for kvm) | 1422 | * switch on pgstes for its userspace process (for kvm) |
1163 | */ | 1423 | */ |
@@ -1190,17 +1450,15 @@ static int __s390_enable_skey(pte_t *pte, unsigned long addr, | |||
1190 | unsigned long ptev; | 1450 | unsigned long ptev; |
1191 | pgste_t pgste; | 1451 | pgste_t pgste; |
1192 | 1452 | ||
1193 | pgste = pgste_get_lock(pte); | ||
1194 | /* | 1453 | /* |
1195 | * Remove all zero page mappings, | 1454 | * Remove all zero page mappings, |
1196 | * after establishing a policy to forbid zero page mappings | 1455 | * after establishing a policy to forbid zero page mappings |
1197 | * following faults for that page will get fresh anonymous pages | 1456 | * following faults for that page will get fresh anonymous pages |
1198 | */ | 1457 | */ |
1199 | if (is_zero_pfn(pte_pfn(*pte))) { | 1458 | if (is_zero_pfn(pte_pfn(*pte))) |
1200 | ptep_flush_direct(walk->mm, addr, pte); | 1459 | ptep_xchg_direct(walk->mm, addr, pte, __pte(_PAGE_INVALID)); |
1201 | pte_val(*pte) = _PAGE_INVALID; | ||
1202 | } | ||
1203 | /* Clear storage key */ | 1460 | /* Clear storage key */ |
1461 | pgste = pgste_get_lock(pte); | ||
1204 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | | 1462 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | |
1205 | PGSTE_GR_BIT | PGSTE_GC_BIT); | 1463 | PGSTE_GR_BIT | PGSTE_GC_BIT); |
1206 | ptev = pte_val(*pte); | 1464 | ptev = pte_val(*pte); |
@@ -1266,27 +1524,6 @@ void s390_reset_cmma(struct mm_struct *mm) | |||
1266 | } | 1524 | } |
1267 | EXPORT_SYMBOL_GPL(s390_reset_cmma); | 1525 | EXPORT_SYMBOL_GPL(s390_reset_cmma); |
1268 | 1526 | ||
1269 | /* | ||
1270 | * Test and reset if a guest page is dirty | ||
1271 | */ | ||
1272 | bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap) | ||
1273 | { | ||
1274 | pte_t *pte; | ||
1275 | spinlock_t *ptl; | ||
1276 | bool dirty = false; | ||
1277 | |||
1278 | pte = get_locked_pte(gmap->mm, address, &ptl); | ||
1279 | if (unlikely(!pte)) | ||
1280 | return false; | ||
1281 | |||
1282 | if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte)) | ||
1283 | dirty = true; | ||
1284 | |||
1285 | spin_unlock(ptl); | ||
1286 | return dirty; | ||
1287 | } | ||
1288 | EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty); | ||
1289 | |||
1290 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 1527 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1291 | int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, | 1528 | int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, |
1292 | pmd_t *pmdp) | 1529 | pmd_t *pmdp) |