aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-s390/mmu_context.h
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2007-10-22 06:52:47 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2007-10-22 06:52:49 -0400
commit3610cce87af0693603db171d5b6f6735f5e3dc5b (patch)
tree9aa7d9a0924b2f075c1b95ed57bb63ed512165c9 /include/asm-s390/mmu_context.h
parente4aa402e7a3b6b87d8df6243a37171cdcd2f01c2 (diff)
[S390] Cleanup page table definitions.
- De-confuse the defines for the address-space-control-elements and the segment/region table entries. - Create out of line functions for page table allocation / freeing. - Simplify get_shadow_xxx functions. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'include/asm-s390/mmu_context.h')
-rw-r--r--include/asm-s390/mmu_context.h50
1 files changed, 24 insertions, 26 deletions
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index 501cb9b06314..05b842126b99 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -21,45 +21,43 @@
21 21
22#ifndef __s390x__ 22#ifndef __s390x__
23#define LCTL_OPCODE "lctl" 23#define LCTL_OPCODE "lctl"
24#define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK)
25#else 24#else
26#define LCTL_OPCODE "lctlg" 25#define LCTL_OPCODE "lctlg"
27#define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK)
28#endif 26#endif
29 27
30static inline void enter_lazy_tlb(struct mm_struct *mm, 28static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
31 struct task_struct *tsk)
32{ 29{
30 pgd_t *pgd = mm->pgd;
31 unsigned long asce_bits;
32
33 /* Calculate asce bits from the first pgd table entry. */
34 asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
35#ifdef CONFIG_64BIT
36 asce_bits |= _ASCE_TYPE_REGION3;
37#endif
38 S390_lowcore.user_asce = asce_bits | __pa(pgd);
39 if (switch_amode) {
40 /* Load primary space page table origin. */
41 pgd_t *shadow_pgd = get_shadow_table(pgd) ? : pgd;
42 S390_lowcore.user_exec_asce = asce_bits | __pa(shadow_pgd);
43 asm volatile(LCTL_OPCODE" 1,1,%0\n"
44 : : "m" (S390_lowcore.user_exec_asce) );
45 } else
46 /* Load home space page table origin. */
47 asm volatile(LCTL_OPCODE" 13,13,%0"
48 : : "m" (S390_lowcore.user_asce) );
33} 49}
34 50
35static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 51static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
36 struct task_struct *tsk) 52 struct task_struct *tsk)
37{ 53{
38 pgd_t *shadow_pgd = get_shadow_pgd(next->pgd); 54 if (unlikely(prev == next))
39 55 return;
40 if (prev != next) {
41 S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) |
42 PGTABLE_BITS;
43 if (shadow_pgd) {
44 /* Load primary/secondary space page table origin. */
45 S390_lowcore.user_exec_asce =
46 (__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS;
47 asm volatile(LCTL_OPCODE" 1,1,%0\n"
48 LCTL_OPCODE" 7,7,%1"
49 : : "m" (S390_lowcore.user_exec_asce),
50 "m" (S390_lowcore.user_asce) );
51 } else if (switch_amode) {
52 /* Load primary space page table origin. */
53 asm volatile(LCTL_OPCODE" 1,1,%0"
54 : : "m" (S390_lowcore.user_asce) );
55 } else
56 /* Load home space page table origin. */
57 asm volatile(LCTL_OPCODE" 13,13,%0"
58 : : "m" (S390_lowcore.user_asce) );
59 }
60 cpu_set(smp_processor_id(), next->cpu_vm_mask); 56 cpu_set(smp_processor_id(), next->cpu_vm_mask);
57 update_mm(next, tsk);
61} 58}
62 59
60#define enter_lazy_tlb(mm,tsk) do { } while (0)
63#define deactivate_mm(tsk,mm) do { } while (0) 61#define deactivate_mm(tsk,mm) do { } while (0)
64 62
65static inline void activate_mm(struct mm_struct *prev, 63static inline void activate_mm(struct mm_struct *prev,