diff options
Diffstat (limited to 'arch/m68k/include/asm/mmu_context.h')
-rw-r--r-- | arch/m68k/include/asm/mmu_context.h | 176 |
1 files changed, 173 insertions, 3 deletions
diff --git a/arch/m68k/include/asm/mmu_context.h b/arch/m68k/include/asm/mmu_context.h index b440928fc6c..7d4341e55a9 100644 --- a/arch/m68k/include/asm/mmu_context.h +++ b/arch/m68k/include/asm/mmu_context.h | |||
@@ -1,5 +1,175 @@ | |||
1 | #ifdef __uClinux__ | 1 | #ifndef __M68K_MMU_CONTEXT_H |
2 | #include "mmu_context_no.h" | 2 | #define __M68K_MMU_CONTEXT_H |
3 | |||
4 | #include <asm-generic/mm_hooks.h> | ||
5 | |||
6 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | ||
7 | { | ||
8 | } | ||
9 | |||
10 | #ifdef CONFIG_MMU | ||
11 | #ifndef CONFIG_SUN3 | ||
12 | |||
13 | #include <asm/setup.h> | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/pgalloc.h> | ||
16 | |||
17 | static inline int init_new_context(struct task_struct *tsk, | ||
18 | struct mm_struct *mm) | ||
19 | { | ||
20 | mm->context = virt_to_phys(mm->pgd); | ||
21 | return 0; | ||
22 | } | ||
23 | |||
24 | #define destroy_context(mm) do { } while(0) | ||
25 | |||
26 | static inline void switch_mm_0230(struct mm_struct *mm) | ||
27 | { | ||
28 | unsigned long crp[2] = { | ||
29 | 0x80000000 | _PAGE_TABLE, mm->context | ||
30 | }; | ||
31 | unsigned long tmp; | ||
32 | |||
33 | asm volatile (".chip 68030"); | ||
34 | |||
35 | /* flush MC68030/MC68020 caches (they are virtually addressed) */ | ||
36 | asm volatile ( | ||
37 | "movec %%cacr,%0;" | ||
38 | "orw %1,%0; " | ||
39 | "movec %0,%%cacr" | ||
40 | : "=d" (tmp) : "di" (FLUSH_I_AND_D)); | ||
41 | |||
42 | /* Switch the root pointer. For a 030-only kernel, | ||
43 | * avoid flushing the whole ATC, we only need to | ||
44 | * flush the user entries. The 68851 does this by | ||
45 | * itself. Avoid a runtime check here. | ||
46 | */ | ||
47 | asm volatile ( | ||
48 | #ifdef CPU_M68030_ONLY | ||
49 | "pmovefd %0,%%crp; " | ||
50 | "pflush #0,#4" | ||
3 | #else | 51 | #else |
4 | #include "mmu_context_mm.h" | 52 | "pmove %0,%%crp" |
5 | #endif | 53 | #endif |
54 | : : "m" (crp[0])); | ||
55 | |||
56 | asm volatile (".chip 68k"); | ||
57 | } | ||
58 | |||
59 | static inline void switch_mm_0460(struct mm_struct *mm) | ||
60 | { | ||
61 | asm volatile (".chip 68040"); | ||
62 | |||
63 | /* flush address translation cache (user entries) */ | ||
64 | asm volatile ("pflushan"); | ||
65 | |||
66 | /* switch the root pointer */ | ||
67 | asm volatile ("movec %0,%%urp" : : "r" (mm->context)); | ||
68 | |||
69 | if (CPU_IS_060) { | ||
70 | unsigned long tmp; | ||
71 | |||
72 | /* clear user entries in the branch cache */ | ||
73 | asm volatile ( | ||
74 | "movec %%cacr,%0; " | ||
75 | "orl %1,%0; " | ||
76 | "movec %0,%%cacr" | ||
77 | : "=d" (tmp): "di" (0x00200000)); | ||
78 | } | ||
79 | |||
80 | asm volatile (".chip 68k"); | ||
81 | } | ||
82 | |||
83 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) | ||
84 | { | ||
85 | if (prev != next) { | ||
86 | if (CPU_IS_020_OR_030) | ||
87 | switch_mm_0230(next); | ||
88 | else | ||
89 | switch_mm_0460(next); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | #define deactivate_mm(tsk,mm) do { } while (0) | ||
94 | |||
95 | static inline void activate_mm(struct mm_struct *prev_mm, | ||
96 | struct mm_struct *next_mm) | ||
97 | { | ||
98 | next_mm->context = virt_to_phys(next_mm->pgd); | ||
99 | |||
100 | if (CPU_IS_020_OR_030) | ||
101 | switch_mm_0230(next_mm); | ||
102 | else | ||
103 | switch_mm_0460(next_mm); | ||
104 | } | ||
105 | |||
106 | #else /* CONFIG_SUN3 */ | ||
107 | #include <asm/sun3mmu.h> | ||
108 | #include <linux/sched.h> | ||
109 | |||
110 | extern unsigned long get_free_context(struct mm_struct *mm); | ||
111 | extern void clear_context(unsigned long context); | ||
112 | |||
113 | /* set the context for a new task to unmapped */ | ||
114 | static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
115 | { | ||
116 | mm->context = SUN3_INVALID_CONTEXT; | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | /* find the context given to this process, and if it hasn't already | ||
121 | got one, go get one for it. */ | ||
122 | static inline void get_mmu_context(struct mm_struct *mm) | ||
123 | { | ||
124 | if(mm->context == SUN3_INVALID_CONTEXT) | ||
125 | mm->context = get_free_context(mm); | ||
126 | } | ||
127 | |||
128 | /* flush context if allocated... */ | ||
129 | static inline void destroy_context(struct mm_struct *mm) | ||
130 | { | ||
131 | if(mm->context != SUN3_INVALID_CONTEXT) | ||
132 | clear_context(mm->context); | ||
133 | } | ||
134 | |||
135 | static inline void activate_context(struct mm_struct *mm) | ||
136 | { | ||
137 | get_mmu_context(mm); | ||
138 | sun3_put_context(mm->context); | ||
139 | } | ||
140 | |||
141 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) | ||
142 | { | ||
143 | activate_context(tsk->mm); | ||
144 | } | ||
145 | |||
146 | #define deactivate_mm(tsk,mm) do { } while (0) | ||
147 | |||
148 | static inline void activate_mm(struct mm_struct *prev_mm, | ||
149 | struct mm_struct *next_mm) | ||
150 | { | ||
151 | activate_context(next_mm); | ||
152 | } | ||
153 | |||
154 | #endif | ||
155 | #else /* !CONFIG_MMU */ | ||
156 | |||
157 | static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
158 | { | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | |||
163 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) | ||
164 | { | ||
165 | } | ||
166 | |||
167 | #define destroy_context(mm) do { } while (0) | ||
168 | #define deactivate_mm(tsk,mm) do { } while (0) | ||
169 | |||
170 | static inline void activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) | ||
171 | { | ||
172 | } | ||
173 | |||
174 | #endif /* CONFIG_MMU */ | ||
175 | #endif /* __M68K_MMU_CONTEXT_H */ | ||