aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-s390/mmu_context.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-s390/mmu_context.h')
-rw-r--r--include/asm-s390/mmu_context.h50
1 files changed, 33 insertions, 17 deletions
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index bcf24a873874..1d21da220d49 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -9,6 +9,7 @@
9#ifndef __S390_MMU_CONTEXT_H 9#ifndef __S390_MMU_CONTEXT_H
10#define __S390_MMU_CONTEXT_H 10#define __S390_MMU_CONTEXT_H
11 11
12#include <asm/pgalloc.h>
12/* 13/*
13 * get a new mmu context.. S390 don't know about contexts. 14 * get a new mmu context.. S390 don't know about contexts.
14 */ 15 */
@@ -16,29 +17,44 @@
16 17
17#define destroy_context(mm) do { } while (0) 18#define destroy_context(mm) do { } while (0)
18 19
20#ifndef __s390x__
21#define LCTL_OPCODE "lctl"
22#define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK)
23#else
24#define LCTL_OPCODE "lctlg"
25#define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK)
26#endif
27
19static inline void enter_lazy_tlb(struct mm_struct *mm, 28static inline void enter_lazy_tlb(struct mm_struct *mm,
20 struct task_struct *tsk) 29 struct task_struct *tsk)
21{ 30{
22} 31}
23 32
24static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 33static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
25 struct task_struct *tsk) 34 struct task_struct *tsk)
26{ 35{
27 if (prev != next) { 36 pgd_t *shadow_pgd = get_shadow_pgd(next->pgd);
28#ifndef __s390x__ 37
29 S390_lowcore.user_asce = (__pa(next->pgd)&PAGE_MASK) | 38 if (prev != next) {
30 (_SEGMENT_TABLE|USER_STD_MASK); 39 S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) |
31 /* Load home space page table origin. */ 40 PGTABLE_BITS;
32 asm volatile("lctl 13,13,%0" 41 if (shadow_pgd) {
33 : : "m" (S390_lowcore.user_asce) ); 42 /* Load primary/secondary space page table origin. */
34#else /* __s390x__ */ 43 S390_lowcore.user_exec_asce =
35 S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) | 44 (__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS;
36 (_REGION_TABLE|USER_STD_MASK); 45 asm volatile(LCTL_OPCODE" 1,1,%0\n"
37 /* Load home space page table origin. */ 46 LCTL_OPCODE" 7,7,%1"
38 asm volatile("lctlg 13,13,%0" 47 : : "m" (S390_lowcore.user_exec_asce),
39 : : "m" (S390_lowcore.user_asce) ); 48 "m" (S390_lowcore.user_asce) );
40#endif /* __s390x__ */ 49 } else if (switch_amode) {
41 } 50 /* Load primary space page table origin. */
51 asm volatile(LCTL_OPCODE" 1,1,%0"
52 : : "m" (S390_lowcore.user_asce) );
53 } else
54 /* Load home space page table origin. */
55 asm volatile(LCTL_OPCODE" 13,13,%0"
56 : : "m" (S390_lowcore.user_asce) );
57 }
42 cpu_set(smp_processor_id(), next->cpu_vm_mask); 58 cpu_set(smp_processor_id(), next->cpu_vm_mask);
43} 59}
44 60
@@ -51,4 +67,4 @@ static inline void activate_mm(struct mm_struct *prev,
51 set_fs(current->thread.mm_segment); 67 set_fs(current->thread.mm_segment);
52} 68}
53 69
54#endif 70#endif /* __S390_MMU_CONTEXT_H */