diff options
Diffstat (limited to 'arch/s390/include/asm/mmu_context.h')
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 22 |
1 files changed, 2 insertions, 20 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 084e7755ed9b..9f973d8de90e 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -21,24 +21,7 @@ static inline int init_new_context(struct task_struct *tsk, | |||
21 | #ifdef CONFIG_64BIT | 21 | #ifdef CONFIG_64BIT |
22 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; | 22 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; |
23 | #endif | 23 | #endif |
24 | if (current->mm && current->mm->context.alloc_pgste) { | 24 | mm->context.has_pgste = 0; |
25 | /* | ||
26 | * alloc_pgste indicates, that any NEW context will be created | ||
27 | * with extended page tables. The old context is unchanged. The | ||
28 | * page table allocation and the page table operations will | ||
29 | * look at has_pgste to distinguish normal and extended page | ||
30 | * tables. The only way to create extended page tables is to | ||
31 | * set alloc_pgste and then create a new context (e.g. dup_mm). | ||
32 | * The page table allocation is called after init_new_context | ||
33 | * and if has_pgste is set, it will create extended page | ||
34 | * tables. | ||
35 | */ | ||
36 | mm->context.has_pgste = 1; | ||
37 | mm->context.alloc_pgste = 1; | ||
38 | } else { | ||
39 | mm->context.has_pgste = 0; | ||
40 | mm->context.alloc_pgste = 0; | ||
41 | } | ||
42 | mm->context.asce_limit = STACK_TOP_MAX; | 25 | mm->context.asce_limit = STACK_TOP_MAX; |
43 | crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); | 26 | crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); |
44 | return 0; | 27 | return 0; |
@@ -77,8 +60,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
77 | WARN_ON(atomic_read(&prev->context.attach_count) < 0); | 60 | WARN_ON(atomic_read(&prev->context.attach_count) < 0); |
78 | atomic_inc(&next->context.attach_count); | 61 | atomic_inc(&next->context.attach_count); |
79 | /* Check for TLBs not flushed yet */ | 62 | /* Check for TLBs not flushed yet */ |
80 | if (next->context.flush_mm) | 63 | __tlb_flush_mm_lazy(next); |
81 | __tlb_flush_mm(next); | ||
82 | } | 64 | } |
83 | 65 | ||
84 | #define enter_lazy_tlb(mm,tsk) do { } while (0) | 66 | #define enter_lazy_tlb(mm,tsk) do { } while (0) |