aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2014-07-24 05:03:41 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-08-01 08:55:05 -0400
commit152125b7a882df36a55a8eadbea6d0edf1461ee7 (patch)
tree247d93a58d6ff9ff9ccbe66840acaa240069d879 /arch/s390
parent55e4283c3eb1d850893f645dd695c9c75d5fa1fc (diff)
s390/mm: implement dirty bits for large segment table entries
The large segment table entry format has block of bits for the ACC/F values for the large page. These bits are valid only if another bit (AV bit 0x10000) of the segment table entry is set. The ACC/F bits do not have a meaning if the AV bit is off. This allows to put the THP splitting bit, the segment young bit and the new segment dirty bit into the ACC/F bits as long as the AV bit stays off. The dirty and young information is only available if the pmd is large. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/pgtable.h197
-rw-r--r--arch/s390/mm/hugetlbpage.c103
-rw-r--r--arch/s390/mm/pgtable.c3
3 files changed, 159 insertions, 144 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index fcba5e03839f..b76317c1f3eb 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -287,7 +287,14 @@ extern unsigned long MODULES_END;
287#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ 287#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
288#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ 288#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
289#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ 289#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
290#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_PROTECT 290
291#define _SEGMENT_ENTRY_DIRTY 0 /* No sw dirty bit for 31-bit */
292#define _SEGMENT_ENTRY_YOUNG 0 /* No sw young bit for 31-bit */
293#define _SEGMENT_ENTRY_READ 0 /* No sw read bit for 31-bit */
294#define _SEGMENT_ENTRY_WRITE 0 /* No sw write bit for 31-bit */
295#define _SEGMENT_ENTRY_LARGE 0 /* No large pages for 31-bit */
296#define _SEGMENT_ENTRY_BITS_LARGE 0
297#define _SEGMENT_ENTRY_ORIGIN_LARGE 0
291 298
292#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) 299#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
293#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) 300#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
@@ -350,7 +357,7 @@ extern unsigned long MODULES_END;
350 357
351/* Bits in the segment table entry */ 358/* Bits in the segment table entry */
352#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL 359#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
353#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff1ff33UL 360#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
354#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ 361#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
355#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ 362#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
356#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ 363#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
@@ -359,30 +366,34 @@ extern unsigned long MODULES_END;
359#define _SEGMENT_ENTRY (0) 366#define _SEGMENT_ENTRY (0)
360#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) 367#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
361 368
362#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ 369#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
363#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ 370#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
364#define _SEGMENT_ENTRY_SPLIT 0x001 /* THP splitting bit */ 371#define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */
365#define _SEGMENT_ENTRY_YOUNG 0x002 /* SW segment young bit */ 372#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
366#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_YOUNG 373#define _SEGMENT_ENTRY_CO 0x0100 /* change-recording override */
374#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
375#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
367 376
368/* 377/*
369 * Segment table entry encoding (R = read-only, I = invalid, y = young bit): 378 * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
370 * ..R...I...y. 379 * dy..R...I...wr
371 * prot-none, old ..0...1...1. 380 * prot-none, clean, old 00..1...1...00
372 * prot-none, young ..1...1...1. 381 * prot-none, clean, young 01..1...1...00
373 * read-only, old ..1...1...0. 382 * prot-none, dirty, old 10..1...1...00
374 * read-only, young ..1...0...1. 383 * prot-none, dirty, young 11..1...1...00
375 * read-write, old ..0...1...0. 384 * read-only, clean, old 00..1...1...01
376 * read-write, young ..0...0...1. 385 * read-only, clean, young 01..1...0...01
386 * read-only, dirty, old 10..1...1...01
387 * read-only, dirty, young 11..1...0...01
388 * read-write, clean, old 00..1...1...11
389 * read-write, clean, young 01..1...0...11
390 * read-write, dirty, old 10..0...1...11
391 * read-write, dirty, young 11..0...0...11
377 * The segment table origin is used to distinguish empty (origin==0) from 392 * The segment table origin is used to distinguish empty (origin==0) from
378 * read-write, old segment table entries (origin!=0) 393 * read-write, old segment table entries (origin!=0)
379 */ 394 */
380 395
381#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */ 396#define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */
382
383/* Set of bits not changed in pmd_modify */
384#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
385 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
386 397
387/* Page status table bits for virtualization */ 398/* Page status table bits for virtualization */
388#define PGSTE_ACC_BITS 0xf000000000000000UL 399#define PGSTE_ACC_BITS 0xf000000000000000UL
@@ -455,10 +466,11 @@ extern unsigned long MODULES_END;
455 * Segment entry (large page) protection definitions. 466 * Segment entry (large page) protection definitions.
456 */ 467 */
457#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ 468#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
458 _SEGMENT_ENTRY_NONE)
459#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_INVALID | \
460 _SEGMENT_ENTRY_PROTECT) 469 _SEGMENT_ENTRY_PROTECT)
461#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_INVALID) 470#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
471 _SEGMENT_ENTRY_READ)
472#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
473 _SEGMENT_ENTRY_WRITE)
462 474
463static inline int mm_has_pgste(struct mm_struct *mm) 475static inline int mm_has_pgste(struct mm_struct *mm)
464{ 476{
@@ -569,25 +581,23 @@ static inline int pmd_none(pmd_t pmd)
569 581
570static inline int pmd_large(pmd_t pmd) 582static inline int pmd_large(pmd_t pmd)
571{ 583{
572#ifdef CONFIG_64BIT
573 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; 584 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
574#else
575 return 0;
576#endif
577} 585}
578 586
579static inline int pmd_prot_none(pmd_t pmd) 587static inline int pmd_pfn(pmd_t pmd)
580{ 588{
581 return (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) && 589 unsigned long origin_mask;
582 (pmd_val(pmd) & _SEGMENT_ENTRY_NONE); 590
591 origin_mask = _SEGMENT_ENTRY_ORIGIN;
592 if (pmd_large(pmd))
593 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
594 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
583} 595}
584 596
585static inline int pmd_bad(pmd_t pmd) 597static inline int pmd_bad(pmd_t pmd)
586{ 598{
587#ifdef CONFIG_64BIT
588 if (pmd_large(pmd)) 599 if (pmd_large(pmd))
589 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0; 600 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
590#endif
591 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; 601 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
592} 602}
593 603
@@ -607,20 +617,22 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
607#define __HAVE_ARCH_PMD_WRITE 617#define __HAVE_ARCH_PMD_WRITE
608static inline int pmd_write(pmd_t pmd) 618static inline int pmd_write(pmd_t pmd)
609{ 619{
610 if (pmd_prot_none(pmd)) 620 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
611 return 0; 621}
612 return (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) == 0; 622
623static inline int pmd_dirty(pmd_t pmd)
624{
625 int dirty = 1;
626 if (pmd_large(pmd))
627 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
628 return dirty;
613} 629}
614 630
615static inline int pmd_young(pmd_t pmd) 631static inline int pmd_young(pmd_t pmd)
616{ 632{
617 int young = 0; 633 int young = 1;
618#ifdef CONFIG_64BIT 634 if (pmd_large(pmd))
619 if (pmd_prot_none(pmd))
620 young = (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) != 0;
621 else
622 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; 635 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
623#endif
624 return young; 636 return young;
625} 637}
626 638
@@ -1391,7 +1403,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1391#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 1403#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1392#define pte_page(x) pfn_to_page(pte_pfn(x)) 1404#define pte_page(x) pfn_to_page(pte_pfn(x))
1393 1405
1394#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) 1406#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1395 1407
1396/* Find an entry in the lowest level page table.. */ 1408/* Find an entry in the lowest level page table.. */
1397#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) 1409#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
@@ -1413,41 +1425,75 @@ static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1413 return pgprot_val(SEGMENT_WRITE); 1425 return pgprot_val(SEGMENT_WRITE);
1414} 1426}
1415 1427
1416static inline pmd_t pmd_mkyoung(pmd_t pmd) 1428static inline pmd_t pmd_wrprotect(pmd_t pmd)
1417{ 1429{
1418#ifdef CONFIG_64BIT 1430 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1419 if (pmd_prot_none(pmd)) { 1431 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1432 return pmd;
1433}
1434
1435static inline pmd_t pmd_mkwrite(pmd_t pmd)
1436{
1437 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1438 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1439 return pmd;
1440 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1441 return pmd;
1442}
1443
1444static inline pmd_t pmd_mkclean(pmd_t pmd)
1445{
1446 if (pmd_large(pmd)) {
1447 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1420 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1448 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1421 } else { 1449 }
1450 return pmd;
1451}
1452
1453static inline pmd_t pmd_mkdirty(pmd_t pmd)
1454{
1455 if (pmd_large(pmd)) {
1456 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY;
1457 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1458 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1459 }
1460 return pmd;
1461}
1462
1463static inline pmd_t pmd_mkyoung(pmd_t pmd)
1464{
1465 if (pmd_large(pmd)) {
1422 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1466 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1423 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; 1467 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1468 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1424 } 1469 }
1425#endif
1426 return pmd; 1470 return pmd;
1427} 1471}
1428 1472
1429static inline pmd_t pmd_mkold(pmd_t pmd) 1473static inline pmd_t pmd_mkold(pmd_t pmd)
1430{ 1474{
1431#ifdef CONFIG_64BIT 1475 if (pmd_large(pmd)) {
1432 if (pmd_prot_none(pmd)) {
1433 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1434 } else {
1435 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; 1476 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1436 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1477 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1437 } 1478 }
1438#endif
1439 return pmd; 1479 return pmd;
1440} 1480}
1441 1481
1442static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1482static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1443{ 1483{
1444 int young; 1484 if (pmd_large(pmd)) {
1445 1485 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1446 young = pmd_young(pmd); 1486 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1447 pmd_val(pmd) &= _SEGMENT_CHG_MASK; 1487 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT;
1488 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1489 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1490 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1491 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1492 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1493 return pmd;
1494 }
1495 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1448 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1496 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1449 if (young)
1450 pmd = pmd_mkyoung(pmd);
1451 return pmd; 1497 return pmd;
1452} 1498}
1453 1499
@@ -1455,16 +1501,9 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1455{ 1501{
1456 pmd_t __pmd; 1502 pmd_t __pmd;
1457 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); 1503 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1458 return pmd_mkyoung(__pmd); 1504 return __pmd;
1459} 1505}
1460 1506
1461static inline pmd_t pmd_mkwrite(pmd_t pmd)
1462{
1463 /* Do not clobber PROT_NONE segments! */
1464 if (!pmd_prot_none(pmd))
1465 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1466 return pmd;
1467}
1468#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ 1507#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1469 1508
1470static inline void __pmdp_csp(pmd_t *pmdp) 1509static inline void __pmdp_csp(pmd_t *pmdp)
@@ -1555,34 +1594,21 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1555 1594
1556static inline int pmd_trans_splitting(pmd_t pmd) 1595static inline int pmd_trans_splitting(pmd_t pmd)
1557{ 1596{
1558 return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; 1597 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) &&
1598 (pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT);
1559} 1599}
1560 1600
1561static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1601static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1562 pmd_t *pmdp, pmd_t entry) 1602 pmd_t *pmdp, pmd_t entry)
1563{ 1603{
1564 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INVALID) && MACHINE_HAS_EDAT1)
1565 pmd_val(entry) |= _SEGMENT_ENTRY_CO;
1566 *pmdp = entry; 1604 *pmdp = entry;
1567} 1605}
1568 1606
1569static inline pmd_t pmd_mkhuge(pmd_t pmd) 1607static inline pmd_t pmd_mkhuge(pmd_t pmd)
1570{ 1608{
1571 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; 1609 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1572 return pmd; 1610 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1573} 1611 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1574
1575static inline pmd_t pmd_wrprotect(pmd_t pmd)
1576{
1577 /* Do not clobber PROT_NONE segments! */
1578 if (!pmd_prot_none(pmd))
1579 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1580 return pmd;
1581}
1582
1583static inline pmd_t pmd_mkdirty(pmd_t pmd)
1584{
1585 /* No dirty bit in the segment table entry. */
1586 return pmd; 1612 return pmd;
1587} 1613}
1588 1614
@@ -1647,11 +1673,6 @@ static inline int has_transparent_hugepage(void)
1647{ 1673{
1648 return MACHINE_HAS_HPAGE ? 1 : 0; 1674 return MACHINE_HAS_HPAGE ? 1 : 0;
1649} 1675}
1650
1651static inline unsigned long pmd_pfn(pmd_t pmd)
1652{
1653 return pmd_val(pmd) >> PAGE_SHIFT;
1654}
1655#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1676#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1656 1677
1657/* 1678/*
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 0ff66a7e29bb..389bc17934b7 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -10,42 +10,33 @@
10 10
11static inline pmd_t __pte_to_pmd(pte_t pte) 11static inline pmd_t __pte_to_pmd(pte_t pte)
12{ 12{
13 int none, young, prot;
14 pmd_t pmd; 13 pmd_t pmd;
15 14
16 /* 15 /*
17 * Convert encoding pte bits pmd bits 16 * Convert encoding pte bits pmd bits
18 * .IR...wrdytp ..R...I...y. 17 * .IR...wrdytp dy..R...I...wr
19 * empty .10...000000 -> ..0...1...0. 18 * empty .10...000000 -> 00..0...1...00
20 * prot-none, clean, old .11...000001 -> ..0...1...1. 19 * prot-none, clean, old .11...000001 -> 00..1...1...00
21 * prot-none, clean, young .11...000101 -> ..1...1...1. 20 * prot-none, clean, young .11...000101 -> 01..1...1...00
22 * prot-none, dirty, old .10...001001 -> ..0...1...1. 21 * prot-none, dirty, old .10...001001 -> 10..1...1...00
23 * prot-none, dirty, young .10...001101 -> ..1...1...1. 22 * prot-none, dirty, young .10...001101 -> 11..1...1...00
24 * read-only, clean, old .11...010001 -> ..1...1...0. 23 * read-only, clean, old .11...010001 -> 00..1...1...01
25 * read-only, clean, young .01...010101 -> ..1...0...1. 24 * read-only, clean, young .01...010101 -> 01..1...0...01
26 * read-only, dirty, old .11...011001 -> ..1...1...0. 25 * read-only, dirty, old .11...011001 -> 10..1...1...01
27 * read-only, dirty, young .01...011101 -> ..1...0...1. 26 * read-only, dirty, young .01...011101 -> 11..1...0...01
28 * read-write, clean, old .11...110001 -> ..0...1...0. 27 * read-write, clean, old .11...110001 -> 00..0...1...11
29 * read-write, clean, young .01...110101 -> ..0...0...1. 28 * read-write, clean, young .01...110101 -> 01..0...0...11
30 * read-write, dirty, old .10...111001 -> ..0...1...0. 29 * read-write, dirty, old .10...111001 -> 10..0...1...11
31 * read-write, dirty, young .00...111101 -> ..0...0...1. 30 * read-write, dirty, young .00...111101 -> 11..0...0...11
32 * Huge ptes are dirty by definition, a clean pte is made dirty
33 * by the conversion.
34 */ 31 */
35 if (pte_present(pte)) { 32 if (pte_present(pte)) {
36 pmd_val(pmd) = pte_val(pte) & PAGE_MASK; 33 pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
37 if (pte_val(pte) & _PAGE_INVALID) 34 pmd_val(pmd) |= (pte_val(pte) & _PAGE_READ) >> 4;
38 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 35 pmd_val(pmd) |= (pte_val(pte) & _PAGE_WRITE) >> 4;
39 none = (pte_val(pte) & _PAGE_PRESENT) && 36 pmd_val(pmd) |= (pte_val(pte) & _PAGE_INVALID) >> 5;
40 !(pte_val(pte) & _PAGE_READ) && 37 pmd_val(pmd) |= (pte_val(pte) & _PAGE_PROTECT);
41 !(pte_val(pte) & _PAGE_WRITE); 38 pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10;
42 prot = (pte_val(pte) & _PAGE_PROTECT) && 39 pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10;
43 !(pte_val(pte) & _PAGE_WRITE);
44 young = pte_val(pte) & _PAGE_YOUNG;
45 if (none || young)
46 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
47 if (prot || (none && young))
48 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
49 } else 40 } else
50 pmd_val(pmd) = _SEGMENT_ENTRY_INVALID; 41 pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
51 return pmd; 42 return pmd;
@@ -56,34 +47,31 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
56 pte_t pte; 47 pte_t pte;
57 48
58 /* 49 /*
59 * Convert encoding pmd bits pte bits 50 * Convert encoding pmd bits pte bits
60 * ..R...I...y. .IR...wrdytp 51 * dy..R...I...wr .IR...wrdytp
61 * empty ..0...1...0. -> .10...000000 52 * empty 00..0...1...00 -> .10...001100
62 * prot-none, old ..0...1...1. -> .10...001001 53 * prot-none, clean, old 00..0...1...00 -> .10...000001
63 * prot-none, young ..1...1...1. -> .10...001101 54 * prot-none, clean, young 01..0...1...00 -> .10...000101
64 * read-only, old ..1...1...0. -> .11...011001 55 * prot-none, dirty, old 10..0...1...00 -> .10...001001
65 * read-only, young ..1...0...1. -> .01...011101 56 * prot-none, dirty, young 11..0...1...00 -> .10...001101
66 * read-write, old ..0...1...0. -> .10...111001 57 * read-only, clean, old 00..1...1...01 -> .11...010001
67 * read-write, young ..0...0...1. -> .00...111101 58 * read-only, clean, young 01..1...1...01 -> .11...010101
68 * Huge ptes are dirty by definition 59 * read-only, dirty, old 10..1...1...01 -> .11...011001
60 * read-only, dirty, young 11..1...1...01 -> .11...011101
61 * read-write, clean, old 00..0...1...11 -> .10...110001
62 * read-write, clean, young 01..0...1...11 -> .10...110101
63 * read-write, dirty, old 10..0...1...11 -> .10...111001
64 * read-write, dirty, young 11..0...1...11 -> .10...111101
69 */ 65 */
70 if (pmd_present(pmd)) { 66 if (pmd_present(pmd)) {
71 pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY | 67 pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE;
72 (pmd_val(pmd) & PAGE_MASK); 68 pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT;
73 if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) 69 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_READ) << 4;
74 pte_val(pte) |= _PAGE_INVALID; 70 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4;
75 if (pmd_prot_none(pmd)) { 71 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5;
76 if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) 72 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT);
77 pte_val(pte) |= _PAGE_YOUNG; 73 pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10;
78 } else { 74 pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10;
79 pte_val(pte) |= _PAGE_READ;
80 if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
81 pte_val(pte) |= _PAGE_PROTECT;
82 else
83 pte_val(pte) |= _PAGE_WRITE;
84 if (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)
85 pte_val(pte) |= _PAGE_YOUNG;
86 }
87 } else 75 } else
88 pte_val(pte) = _PAGE_INVALID; 76 pte_val(pte) = _PAGE_INVALID;
89 return pte; 77 return pte;
@@ -96,6 +84,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
96 84
97 pmd = __pte_to_pmd(pte); 85 pmd = __pte_to_pmd(pte);
98 if (!MACHINE_HAS_HPAGE) { 86 if (!MACHINE_HAS_HPAGE) {
87 /* Emulated huge ptes loose the dirty and young bit */
99 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; 88 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
100 pmd_val(pmd) |= pte_page(pte)[1].index; 89 pmd_val(pmd) |= pte_page(pte)[1].index;
101 } else 90 } else
@@ -113,6 +102,8 @@ pte_t huge_ptep_get(pte_t *ptep)
113 origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN; 102 origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
114 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; 103 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
115 pmd_val(pmd) |= *(unsigned long *) origin; 104 pmd_val(pmd) |= *(unsigned long *) origin;
105 /* Emulated huge ptes are young and dirty by definition */
106 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG | _SEGMENT_ENTRY_DIRTY;
116 } 107 }
117 return __pmd_to_pte(pmd); 108 return __pmd_to_pte(pmd);
118} 109}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index f90ad8592b36..19daa53a3da4 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1433,6 +1433,9 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
1433{ 1433{
1434 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1434 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1435 1435
1436 entry = pmd_mkyoung(entry);
1437 if (dirty)
1438 entry = pmd_mkdirty(entry);
1436 if (pmd_same(*pmdp, entry)) 1439 if (pmd_same(*pmdp, entry))
1437 return 0; 1440 return 0;
1438 pmdp_invalidate(vma, address, pmdp); 1441 pmdp_invalidate(vma, address, pmdp);