aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/mmu.h3
-rw-r--r--arch/s390/include/asm/mmu_context.h19
-rw-r--r--arch/s390/include/asm/pgtable.h8
3 files changed, 22 insertions, 8 deletions
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 5dd5e7b3476f..d2b4ff831477 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -7,7 +7,8 @@ typedef struct {
7 unsigned long asce_bits; 7 unsigned long asce_bits;
8 unsigned long asce_limit; 8 unsigned long asce_limit;
9 int noexec; 9 int noexec;
10 int pgstes; 10 int has_pgste; /* The mmu context has extended page tables */
11 int alloc_pgste; /* cloned contexts will have extended page tables */
11} mm_context_t; 12} mm_context_t;
12 13
13#endif 14#endif
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 4c2fbf48c9c4..28ec870655af 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -20,12 +20,25 @@ static inline int init_new_context(struct task_struct *tsk,
20#ifdef CONFIG_64BIT 20#ifdef CONFIG_64BIT
21 mm->context.asce_bits |= _ASCE_TYPE_REGION3; 21 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
22#endif 22#endif
23 if (current->mm->context.pgstes) { 23 if (current->mm->context.alloc_pgste) {
24 /*
25 * alloc_pgste indicates, that any NEW context will be created
26 * with extended page tables. The old context is unchanged. The
27 * page table allocation and the page table operations will
28 * look at has_pgste to distinguish normal and extended page
29 * tables. The only way to create extended page tables is to
30 * set alloc_pgste and then create a new context (e.g. dup_mm).
31 * The page table allocation is called after init_new_context
32 * and if has_pgste is set, it will create extended page
33 * tables.
34 */
24 mm->context.noexec = 0; 35 mm->context.noexec = 0;
25 mm->context.pgstes = 1; 36 mm->context.has_pgste = 1;
37 mm->context.alloc_pgste = 1;
26 } else { 38 } else {
27 mm->context.noexec = s390_noexec; 39 mm->context.noexec = s390_noexec;
28 mm->context.pgstes = 0; 40 mm->context.has_pgste = 0;
41 mm->context.alloc_pgste = 0;
29 } 42 }
30 mm->context.asce_limit = STACK_TOP_MAX; 43 mm->context.asce_limit = STACK_TOP_MAX;
31 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 44 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 1a928f84afd6..7fc76133b3e4 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -679,7 +679,7 @@ static inline void pmd_clear(pmd_t *pmd)
679 679
680static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 680static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
681{ 681{
682 if (mm->context.pgstes) 682 if (mm->context.has_pgste)
683 ptep_rcp_copy(ptep); 683 ptep_rcp_copy(ptep);
684 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 684 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
685 if (mm->context.noexec) 685 if (mm->context.noexec)
@@ -763,7 +763,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
763 struct page *page; 763 struct page *page;
764 unsigned int skey; 764 unsigned int skey;
765 765
766 if (!mm->context.pgstes) 766 if (!mm->context.has_pgste)
767 return -EINVAL; 767 return -EINVAL;
768 rcp_lock(ptep); 768 rcp_lock(ptep);
769 pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 769 pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
@@ -794,7 +794,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
794 int young; 794 int young;
795 unsigned long *pgste; 795 unsigned long *pgste;
796 796
797 if (!vma->vm_mm->context.pgstes) 797 if (!vma->vm_mm->context.has_pgste)
798 return 0; 798 return 0;
799 physpage = pte_val(*ptep) & PAGE_MASK; 799 physpage = pte_val(*ptep) & PAGE_MASK;
800 pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 800 pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
@@ -844,7 +844,7 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
844static inline void ptep_invalidate(struct mm_struct *mm, 844static inline void ptep_invalidate(struct mm_struct *mm,
845 unsigned long address, pte_t *ptep) 845 unsigned long address, pte_t *ptep)
846{ 846{
847 if (mm->context.pgstes) { 847 if (mm->context.has_pgste) {
848 rcp_lock(ptep); 848 rcp_lock(ptep);
849 __ptep_ipte(address, ptep); 849 __ptep_ipte(address, ptep);
850 ptep_rcp_copy(ptep); 850 ptep_rcp_copy(ptep);