aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-s390/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-s390/pgtable.h')
-rw-r--r--include/asm-s390/pgtable.h94
1 files changed, 46 insertions, 48 deletions
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index e965309fedac..519f0a5ff181 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -31,9 +31,9 @@
31 * the S390 page table tree. 31 * the S390 page table tree.
32 */ 32 */
33#ifndef __ASSEMBLY__ 33#ifndef __ASSEMBLY__
34#include <linux/mm_types.h>
34#include <asm/bug.h> 35#include <asm/bug.h>
35#include <asm/processor.h> 36#include <asm/processor.h>
36#include <linux/threads.h>
37 37
38struct vm_area_struct; /* forward declaration (include/linux/mm.h) */ 38struct vm_area_struct; /* forward declaration (include/linux/mm.h) */
39struct mm_struct; 39struct mm_struct;
@@ -554,9 +554,10 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
554 /* ipte in zarch mode can do the math */ 554 /* ipte in zarch mode can do the math */
555 pte_t *pto = ptep; 555 pte_t *pto = ptep;
556#endif 556#endif
557 asm volatile ("ipte %2,%3" 557 asm volatile(
558 : "=m" (*ptep) : "m" (*ptep), 558 " ipte %2,%3"
559 "a" (pto), "a" (address) ); 559 : "=m" (*ptep) : "m" (*ptep),
560 "a" (pto), "a" (address));
560 } 561 }
561 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 562 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
562} 563}
@@ -596,30 +597,31 @@ ptep_establish(struct vm_area_struct *vma,
596 * should therefore only be called if it is not mapped in any 597 * should therefore only be called if it is not mapped in any
597 * address space. 598 * address space.
598 */ 599 */
599#define page_test_and_clear_dirty(_page) \ 600static inline int page_test_and_clear_dirty(struct page *page)
600({ \ 601{
601 struct page *__page = (_page); \ 602 unsigned long physpage = page_to_phys(page);
602 unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ 603 int skey = page_get_storage_key(physpage);
603 int __skey = page_get_storage_key(__physpage); \ 604
604 if (__skey & _PAGE_CHANGED) \ 605 if (skey & _PAGE_CHANGED)
605 page_set_storage_key(__physpage, __skey & ~_PAGE_CHANGED);\ 606 page_set_storage_key(physpage, skey & ~_PAGE_CHANGED);
606 (__skey & _PAGE_CHANGED); \ 607 return skey & _PAGE_CHANGED;
607}) 608}
608 609
609/* 610/*
610 * Test and clear referenced bit in storage key. 611 * Test and clear referenced bit in storage key.
611 */ 612 */
612#define page_test_and_clear_young(page) \ 613static inline int page_test_and_clear_young(struct page *page)
613({ \ 614{
614 struct page *__page = (page); \ 615 unsigned long physpage = page_to_phys(page);
615 unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ 616 int ccode;
616 int __ccode; \ 617
617 asm volatile ("rrbe 0,%1\n\t" \ 618 asm volatile(
618 "ipm %0\n\t" \ 619 " rrbe 0,%1\n"
619 "srl %0,28\n\t" \ 620 " ipm %0\n"
620 : "=d" (__ccode) : "a" (__physpage) : "cc" ); \ 621 " srl %0,28\n"
621 (__ccode & 2); \ 622 : "=d" (ccode) : "a" (physpage) : "cc" );
622}) 623 return ccode & 2;
624}
623 625
624/* 626/*
625 * Conversion functions: convert a page and protection to a page entry, 627 * Conversion functions: convert a page and protection to a page entry,
@@ -632,32 +634,28 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
632 return __pte; 634 return __pte;
633} 635}
634 636
635#define mk_pte(pg, pgprot) \ 637static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
636({ \ 638{
637 struct page *__page = (pg); \ 639 unsigned long physpage = page_to_phys(page);
638 pgprot_t __pgprot = (pgprot); \
639 unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \
640 pte_t __pte = mk_pte_phys(__physpage, __pgprot); \
641 __pte; \
642})
643 640
644#define pfn_pte(pfn, pgprot) \ 641 return mk_pte_phys(physpage, pgprot);
645({ \ 642}
646 pgprot_t __pgprot = (pgprot); \ 643
647 unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \ 644static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
648 pte_t __pte = mk_pte_phys(__physpage, __pgprot); \ 645{
649 __pte; \ 646 unsigned long physpage = __pa((pfn) << PAGE_SHIFT);
650}) 647
648 return mk_pte_phys(physpage, pgprot);
649}
651 650
652#ifdef __s390x__ 651#ifdef __s390x__
653 652
654#define pfn_pmd(pfn, pgprot) \ 653static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
655({ \ 654{
656 pgprot_t __pgprot = (pgprot); \ 655 unsigned long physpage = __pa((pfn) << PAGE_SHIFT);
657 unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \ 656
658 pmd_t __pmd = __pmd(__physpage + pgprot_val(__pgprot)); \ 657 return __pmd(physpage + pgprot_val(pgprot));
659 __pmd; \ 658}
660})
661 659
662#endif /* __s390x__ */ 660#endif /* __s390x__ */
663 661
@@ -666,11 +664,11 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
666 664
667#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) 665#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
668 666
669#define pmd_page(pmd) (mem_map+(pmd_val(pmd) >> PAGE_SHIFT)) 667#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
670 668
671#define pgd_page_vaddr(pgd) (pgd_val(pgd) & PAGE_MASK) 669#define pgd_page_vaddr(pgd) (pgd_val(pgd) & PAGE_MASK)
672 670
673#define pgd_page(pgd) (mem_map+(pgd_val(pgd) >> PAGE_SHIFT)) 671#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
674 672
675/* to find an entry in a page-table-directory */ 673/* to find an entry in a page-table-directory */
676#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 674#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))