diff options
author | Paul Mackerras <paulus@samba.org> | 2005-10-10 07:58:35 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-10-10 07:58:35 -0400 |
commit | ab1f9dac6eea25ee59e4c8e1cf0b7476afbbfe07 (patch) | |
tree | 03577652197b5e58c348ede3c474bc8dd47e046c /arch/powerpc/mm/stab.c | |
parent | 70d64ceaa1a84d2502405422a4dfd3f87786a347 (diff) |
powerpc: Merge arch/ppc64/mm to arch/powerpc/mm
This moves the remaining files in arch/ppc64/mm to arch/powerpc/mm,
and arranges that we use them when compiling with ARCH=ppc64.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/stab.c')
-rw-r--r-- | arch/powerpc/mm/stab.c | 279 |
1 files changed, 279 insertions, 0 deletions
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c new file mode 100644 index 000000000000..1b83f002bf27 --- /dev/null +++ b/arch/powerpc/mm/stab.c | |||
@@ -0,0 +1,279 @@ | |||
1 | /* | ||
2 | * PowerPC64 Segment Translation Support. | ||
3 | * | ||
4 | * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com | ||
5 | * Copyright (c) 2001 Dave Engebretsen | ||
6 | * | ||
7 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version | ||
12 | * 2 of the License, or (at your option) any later version. | ||
13 | */ | ||
14 | |||
15 | #include <linux/config.h> | ||
16 | #include <asm/pgtable.h> | ||
17 | #include <asm/mmu.h> | ||
18 | #include <asm/mmu_context.h> | ||
19 | #include <asm/paca.h> | ||
20 | #include <asm/cputable.h> | ||
21 | #include <asm/lmb.h> | ||
22 | #include <asm/abs_addr.h> | ||
23 | |||
24 | struct stab_entry { | ||
25 | unsigned long esid_data; | ||
26 | unsigned long vsid_data; | ||
27 | }; | ||
28 | |||
29 | /* Both the segment table and SLB code uses the following cache */ | ||
30 | #define NR_STAB_CACHE_ENTRIES 8 | ||
31 | DEFINE_PER_CPU(long, stab_cache_ptr); | ||
32 | DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]); | ||
33 | |||
34 | /* | ||
35 | * Create a segment table entry for the given esid/vsid pair. | ||
36 | */ | ||
37 | static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid) | ||
38 | { | ||
39 | unsigned long esid_data, vsid_data; | ||
40 | unsigned long entry, group, old_esid, castout_entry, i; | ||
41 | unsigned int global_entry; | ||
42 | struct stab_entry *ste, *castout_ste; | ||
43 | unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE; | ||
44 | |||
45 | vsid_data = vsid << STE_VSID_SHIFT; | ||
46 | esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V; | ||
47 | if (! kernel_segment) | ||
48 | esid_data |= STE_ESID_KS; | ||
49 | |||
50 | /* Search the primary group first. */ | ||
51 | global_entry = (esid & 0x1f) << 3; | ||
52 | ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7)); | ||
53 | |||
54 | /* Find an empty entry, if one exists. */ | ||
55 | for (group = 0; group < 2; group++) { | ||
56 | for (entry = 0; entry < 8; entry++, ste++) { | ||
57 | if (!(ste->esid_data & STE_ESID_V)) { | ||
58 | ste->vsid_data = vsid_data; | ||
59 | asm volatile("eieio":::"memory"); | ||
60 | ste->esid_data = esid_data; | ||
61 | return (global_entry | entry); | ||
62 | } | ||
63 | } | ||
64 | /* Now search the secondary group. */ | ||
65 | global_entry = ((~esid) & 0x1f) << 3; | ||
66 | ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7)); | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * Could not find empty entry, pick one with a round robin selection. | ||
71 | * Search all entries in the two groups. | ||
72 | */ | ||
73 | castout_entry = get_paca()->stab_rr; | ||
74 | for (i = 0; i < 16; i++) { | ||
75 | if (castout_entry < 8) { | ||
76 | global_entry = (esid & 0x1f) << 3; | ||
77 | ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7)); | ||
78 | castout_ste = ste + castout_entry; | ||
79 | } else { | ||
80 | global_entry = ((~esid) & 0x1f) << 3; | ||
81 | ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7)); | ||
82 | castout_ste = ste + (castout_entry - 8); | ||
83 | } | ||
84 | |||
85 | /* Dont cast out the first kernel segment */ | ||
86 | if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE) | ||
87 | break; | ||
88 | |||
89 | castout_entry = (castout_entry + 1) & 0xf; | ||
90 | } | ||
91 | |||
92 | get_paca()->stab_rr = (castout_entry + 1) & 0xf; | ||
93 | |||
94 | /* Modify the old entry to the new value. */ | ||
95 | |||
96 | /* Force previous translations to complete. DRENG */ | ||
97 | asm volatile("isync" : : : "memory"); | ||
98 | |||
99 | old_esid = castout_ste->esid_data >> SID_SHIFT; | ||
100 | castout_ste->esid_data = 0; /* Invalidate old entry */ | ||
101 | |||
102 | asm volatile("sync" : : : "memory"); /* Order update */ | ||
103 | |||
104 | castout_ste->vsid_data = vsid_data; | ||
105 | asm volatile("eieio" : : : "memory"); /* Order update */ | ||
106 | castout_ste->esid_data = esid_data; | ||
107 | |||
108 | asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT)); | ||
109 | /* Ensure completion of slbie */ | ||
110 | asm volatile("sync" : : : "memory"); | ||
111 | |||
112 | return (global_entry | (castout_entry & 0x7)); | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * Allocate a segment table entry for the given ea and mm | ||
117 | */ | ||
118 | static int __ste_allocate(unsigned long ea, struct mm_struct *mm) | ||
119 | { | ||
120 | unsigned long vsid; | ||
121 | unsigned char stab_entry; | ||
122 | unsigned long offset; | ||
123 | |||
124 | /* Kernel or user address? */ | ||
125 | if (ea >= KERNELBASE) { | ||
126 | vsid = get_kernel_vsid(ea); | ||
127 | } else { | ||
128 | if ((ea >= TASK_SIZE_USER64) || (! mm)) | ||
129 | return 1; | ||
130 | |||
131 | vsid = get_vsid(mm->context.id, ea); | ||
132 | } | ||
133 | |||
134 | stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid); | ||
135 | |||
136 | if (ea < KERNELBASE) { | ||
137 | offset = __get_cpu_var(stab_cache_ptr); | ||
138 | if (offset < NR_STAB_CACHE_ENTRIES) | ||
139 | __get_cpu_var(stab_cache[offset++]) = stab_entry; | ||
140 | else | ||
141 | offset = NR_STAB_CACHE_ENTRIES+1; | ||
142 | __get_cpu_var(stab_cache_ptr) = offset; | ||
143 | |||
144 | /* Order update */ | ||
145 | asm volatile("sync":::"memory"); | ||
146 | } | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | int ste_allocate(unsigned long ea) | ||
152 | { | ||
153 | return __ste_allocate(ea, current->mm); | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Do the segment table work for a context switch: flush all user | ||
158 | * entries from the table, then preload some probably useful entries | ||
159 | * for the new task | ||
160 | */ | ||
161 | void switch_stab(struct task_struct *tsk, struct mm_struct *mm) | ||
162 | { | ||
163 | struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; | ||
164 | struct stab_entry *ste; | ||
165 | unsigned long offset = __get_cpu_var(stab_cache_ptr); | ||
166 | unsigned long pc = KSTK_EIP(tsk); | ||
167 | unsigned long stack = KSTK_ESP(tsk); | ||
168 | unsigned long unmapped_base; | ||
169 | |||
170 | /* Force previous translations to complete. DRENG */ | ||
171 | asm volatile("isync" : : : "memory"); | ||
172 | |||
173 | if (offset <= NR_STAB_CACHE_ENTRIES) { | ||
174 | int i; | ||
175 | |||
176 | for (i = 0; i < offset; i++) { | ||
177 | ste = stab + __get_cpu_var(stab_cache[i]); | ||
178 | ste->esid_data = 0; /* invalidate entry */ | ||
179 | } | ||
180 | } else { | ||
181 | unsigned long entry; | ||
182 | |||
183 | /* Invalidate all entries. */ | ||
184 | ste = stab; | ||
185 | |||
186 | /* Never flush the first entry. */ | ||
187 | ste += 1; | ||
188 | for (entry = 1; | ||
189 | entry < (PAGE_SIZE / sizeof(struct stab_entry)); | ||
190 | entry++, ste++) { | ||
191 | unsigned long ea; | ||
192 | ea = ste->esid_data & ESID_MASK; | ||
193 | if (ea < KERNELBASE) { | ||
194 | ste->esid_data = 0; | ||
195 | } | ||
196 | } | ||
197 | } | ||
198 | |||
199 | asm volatile("sync; slbia; sync":::"memory"); | ||
200 | |||
201 | __get_cpu_var(stab_cache_ptr) = 0; | ||
202 | |||
203 | /* Now preload some entries for the new task */ | ||
204 | if (test_tsk_thread_flag(tsk, TIF_32BIT)) | ||
205 | unmapped_base = TASK_UNMAPPED_BASE_USER32; | ||
206 | else | ||
207 | unmapped_base = TASK_UNMAPPED_BASE_USER64; | ||
208 | |||
209 | __ste_allocate(pc, mm); | ||
210 | |||
211 | if (GET_ESID(pc) == GET_ESID(stack)) | ||
212 | return; | ||
213 | |||
214 | __ste_allocate(stack, mm); | ||
215 | |||
216 | if ((GET_ESID(pc) == GET_ESID(unmapped_base)) | ||
217 | || (GET_ESID(stack) == GET_ESID(unmapped_base))) | ||
218 | return; | ||
219 | |||
220 | __ste_allocate(unmapped_base, mm); | ||
221 | |||
222 | /* Order update */ | ||
223 | asm volatile("sync" : : : "memory"); | ||
224 | } | ||
225 | |||
226 | extern void slb_initialize(void); | ||
227 | |||
228 | /* | ||
229 | * Allocate segment tables for secondary CPUs. These must all go in | ||
230 | * the first (bolted) segment, so that do_stab_bolted won't get a | ||
231 | * recursive segment miss on the segment table itself. | ||
232 | */ | ||
233 | void stabs_alloc(void) | ||
234 | { | ||
235 | int cpu; | ||
236 | |||
237 | if (cpu_has_feature(CPU_FTR_SLB)) | ||
238 | return; | ||
239 | |||
240 | for_each_cpu(cpu) { | ||
241 | unsigned long newstab; | ||
242 | |||
243 | if (cpu == 0) | ||
244 | continue; /* stab for CPU 0 is statically allocated */ | ||
245 | |||
246 | newstab = lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, 1<<SID_SHIFT); | ||
247 | if (! newstab) | ||
248 | panic("Unable to allocate segment table for CPU %d.\n", | ||
249 | cpu); | ||
250 | |||
251 | newstab += KERNELBASE; | ||
252 | |||
253 | memset((void *)newstab, 0, PAGE_SIZE); | ||
254 | |||
255 | paca[cpu].stab_addr = newstab; | ||
256 | paca[cpu].stab_real = virt_to_abs(newstab); | ||
257 | printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx virtual, 0x%lx absolute\n", cpu, paca[cpu].stab_addr, paca[cpu].stab_real); | ||
258 | } | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * Build an entry for the base kernel segment and put it into | ||
263 | * the segment table or SLB. All other segment table or SLB | ||
264 | * entries are faulted in. | ||
265 | */ | ||
266 | void stab_initialize(unsigned long stab) | ||
267 | { | ||
268 | unsigned long vsid = get_kernel_vsid(KERNELBASE); | ||
269 | |||
270 | if (cpu_has_feature(CPU_FTR_SLB)) { | ||
271 | slb_initialize(); | ||
272 | } else { | ||
273 | asm volatile("isync; slbia; isync":::"memory"); | ||
274 | make_ste(stab, GET_ESID(KERNELBASE), vsid); | ||
275 | |||
276 | /* Order update */ | ||
277 | asm volatile("sync":::"memory"); | ||
278 | } | ||
279 | } | ||