diff options
Diffstat (limited to 'arch/um/sys-i386/tls.c')
-rw-r--r-- | arch/um/sys-i386/tls.c | 384 |
1 files changed, 384 insertions, 0 deletions
diff --git a/arch/um/sys-i386/tls.c b/arch/um/sys-i386/tls.c new file mode 100644 index 000000000000..a3188e861cc7 --- /dev/null +++ b/arch/um/sys-i386/tls.c | |||
@@ -0,0 +1,384 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "linux/config.h" | ||
7 | #include "linux/kernel.h" | ||
8 | #include "linux/sched.h" | ||
9 | #include "linux/slab.h" | ||
10 | #include "linux/types.h" | ||
11 | #include "asm/uaccess.h" | ||
12 | #include "asm/ptrace.h" | ||
13 | #include "asm/segment.h" | ||
14 | #include "asm/smp.h" | ||
15 | #include "asm/desc.h" | ||
16 | #include "choose-mode.h" | ||
17 | #include "kern.h" | ||
18 | #include "kern_util.h" | ||
19 | #include "mode_kern.h" | ||
20 | #include "os.h" | ||
21 | #include "mode.h" | ||
22 | |||
23 | #ifdef CONFIG_MODE_SKAS | ||
24 | #include "skas.h" | ||
25 | #endif | ||
26 | |||
27 | /* If needed we can detect when it's uninitialized. */ | ||
28 | static int host_supports_tls = -1; | ||
29 | int host_gdt_entry_tls_min = -1; | ||
30 | |||
31 | #ifdef CONFIG_MODE_SKAS | ||
32 | int do_set_thread_area_skas(struct user_desc *info) | ||
33 | { | ||
34 | int ret; | ||
35 | u32 cpu; | ||
36 | |||
37 | cpu = get_cpu(); | ||
38 | ret = os_set_thread_area(info, userspace_pid[cpu]); | ||
39 | put_cpu(); | ||
40 | return ret; | ||
41 | } | ||
42 | |||
43 | int do_get_thread_area_skas(struct user_desc *info) | ||
44 | { | ||
45 | int ret; | ||
46 | u32 cpu; | ||
47 | |||
48 | cpu = get_cpu(); | ||
49 | ret = os_get_thread_area(info, userspace_pid[cpu]); | ||
50 | put_cpu(); | ||
51 | return ret; | ||
52 | } | ||
53 | #endif | ||
54 | |||
55 | /* | ||
56 | * sys_get_thread_area: get a yet unused TLS descriptor index. | ||
57 | * XXX: Consider leaving one free slot for glibc usage at first place. This must | ||
58 | * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else. | ||
59 | * | ||
60 | * Also, this must be tested when compiling in SKAS mode with dinamic linking | ||
61 | * and running against NPTL. | ||
62 | */ | ||
63 | static int get_free_idx(struct task_struct* task) | ||
64 | { | ||
65 | struct thread_struct *t = &task->thread; | ||
66 | int idx; | ||
67 | |||
68 | if (!t->arch.tls_array) | ||
69 | return GDT_ENTRY_TLS_MIN; | ||
70 | |||
71 | for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) | ||
72 | if (!t->arch.tls_array[idx].present) | ||
73 | return idx + GDT_ENTRY_TLS_MIN; | ||
74 | return -ESRCH; | ||
75 | } | ||
76 | |||
77 | static inline void clear_user_desc(struct user_desc* info) | ||
78 | { | ||
79 | /* Postcondition: LDT_empty(info) returns true. */ | ||
80 | memset(info, 0, sizeof(*info)); | ||
81 | |||
82 | /* Check the LDT_empty or the i386 sys_get_thread_area code - we obtain | ||
83 | * indeed an empty user_desc. | ||
84 | */ | ||
85 | info->read_exec_only = 1; | ||
86 | info->seg_not_present = 1; | ||
87 | } | ||
88 | |||
89 | #define O_FORCE 1 | ||
90 | |||
91 | static int load_TLS(int flags, struct task_struct *to) | ||
92 | { | ||
93 | int ret = 0; | ||
94 | int idx; | ||
95 | |||
96 | for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) { | ||
97 | struct uml_tls_struct* curr = &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN]; | ||
98 | |||
99 | /* Actually, now if it wasn't flushed it gets cleared and | ||
100 | * flushed to the host, which will clear it.*/ | ||
101 | if (!curr->present) { | ||
102 | if (!curr->flushed) { | ||
103 | clear_user_desc(&curr->tls); | ||
104 | curr->tls.entry_number = idx; | ||
105 | } else { | ||
106 | WARN_ON(!LDT_empty(&curr->tls)); | ||
107 | continue; | ||
108 | } | ||
109 | } | ||
110 | |||
111 | if (!(flags & O_FORCE) && curr->flushed) | ||
112 | continue; | ||
113 | |||
114 | ret = do_set_thread_area(&curr->tls); | ||
115 | if (ret) | ||
116 | goto out; | ||
117 | |||
118 | curr->flushed = 1; | ||
119 | } | ||
120 | out: | ||
121 | return ret; | ||
122 | } | ||
123 | |||
124 | /* Verify if we need to do a flush for the new process, i.e. if there are any | ||
125 | * present desc's, only if they haven't been flushed. | ||
126 | */ | ||
127 | static inline int needs_TLS_update(struct task_struct *task) | ||
128 | { | ||
129 | int i; | ||
130 | int ret = 0; | ||
131 | |||
132 | for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) { | ||
133 | struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; | ||
134 | |||
135 | /* Can't test curr->present, we may need to clear a descriptor | ||
136 | * which had a value. */ | ||
137 | if (curr->flushed) | ||
138 | continue; | ||
139 | ret = 1; | ||
140 | break; | ||
141 | } | ||
142 | return ret; | ||
143 | } | ||
144 | |||
145 | /* On a newly forked process, the TLS descriptors haven't yet been flushed. So | ||
146 | * we mark them as such and the first switch_to will do the job. | ||
147 | */ | ||
148 | void clear_flushed_tls(struct task_struct *task) | ||
149 | { | ||
150 | int i; | ||
151 | |||
152 | for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) { | ||
153 | struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; | ||
154 | |||
155 | /* Still correct to do this, if it wasn't present on the host it | ||
156 | * will remain as flushed as it was. */ | ||
157 | if (!curr->present) | ||
158 | continue; | ||
159 | |||
160 | curr->flushed = 0; | ||
161 | } | ||
162 | } | ||
163 | |||
164 | /* In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a | ||
165 | * common host process. So this is needed in SKAS0 too. | ||
166 | * | ||
167 | * However, if each thread had a different host process (and this was discussed | ||
168 | * for SMP support) this won't be needed. | ||
169 | * | ||
170 | * And this will not need be used when (and if) we'll add support to the host | ||
171 | * SKAS patch. */ | ||
172 | |||
173 | int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to) | ||
174 | { | ||
175 | if (!host_supports_tls) | ||
176 | return 0; | ||
177 | |||
178 | /* We have no need whatsoever to switch TLS for kernel threads; beyond | ||
179 | * that, that would also result in us calling os_set_thread_area with | ||
180 | * userspace_pid[cpu] == 0, which gives an error. */ | ||
181 | if (likely(to->mm)) | ||
182 | return load_TLS(O_FORCE, to); | ||
183 | |||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to) | ||
188 | { | ||
189 | if (!host_supports_tls) | ||
190 | return 0; | ||
191 | |||
192 | if (needs_TLS_update(to)) | ||
193 | return load_TLS(0, to); | ||
194 | |||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static int set_tls_entry(struct task_struct* task, struct user_desc *info, | ||
199 | int idx, int flushed) | ||
200 | { | ||
201 | struct thread_struct *t = &task->thread; | ||
202 | |||
203 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | ||
204 | return -EINVAL; | ||
205 | |||
206 | t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info; | ||
207 | t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1; | ||
208 | t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed; | ||
209 | |||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | int arch_copy_tls(struct task_struct *new) | ||
214 | { | ||
215 | struct user_desc info; | ||
216 | int idx, ret = -EFAULT; | ||
217 | |||
218 | if (copy_from_user(&info, | ||
219 | (void __user *) UPT_ESI(&new->thread.regs.regs), | ||
220 | sizeof(info))) | ||
221 | goto out; | ||
222 | |||
223 | ret = -EINVAL; | ||
224 | if (LDT_empty(&info)) | ||
225 | goto out; | ||
226 | |||
227 | idx = info.entry_number; | ||
228 | |||
229 | ret = set_tls_entry(new, &info, idx, 0); | ||
230 | out: | ||
231 | return ret; | ||
232 | } | ||
233 | |||
234 | /* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */ | ||
235 | static int get_tls_entry(struct task_struct* task, struct user_desc *info, int idx) | ||
236 | { | ||
237 | struct thread_struct *t = &task->thread; | ||
238 | |||
239 | if (!t->arch.tls_array) | ||
240 | goto clear; | ||
241 | |||
242 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | ||
243 | return -EINVAL; | ||
244 | |||
245 | if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present) | ||
246 | goto clear; | ||
247 | |||
248 | *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls; | ||
249 | |||
250 | out: | ||
251 | /* Temporary debugging check, to make sure that things have been | ||
252 | * flushed. This could be triggered if load_TLS() failed. | ||
253 | */ | ||
254 | if (unlikely(task == current && !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) { | ||
255 | printk(KERN_ERR "get_tls_entry: task with pid %d got here " | ||
256 | "without flushed TLS.", current->pid); | ||
257 | } | ||
258 | |||
259 | return 0; | ||
260 | clear: | ||
261 | /* When the TLS entry has not been set, the values read to user in the | ||
262 | * tls_array are 0 (because it's cleared at boot, see | ||
263 | * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that. | ||
264 | */ | ||
265 | clear_user_desc(info); | ||
266 | info->entry_number = idx; | ||
267 | goto out; | ||
268 | } | ||
269 | |||
270 | asmlinkage int sys_set_thread_area(struct user_desc __user *user_desc) | ||
271 | { | ||
272 | struct user_desc info; | ||
273 | int idx, ret; | ||
274 | |||
275 | if (!host_supports_tls) | ||
276 | return -ENOSYS; | ||
277 | |||
278 | if (copy_from_user(&info, user_desc, sizeof(info))) | ||
279 | return -EFAULT; | ||
280 | |||
281 | idx = info.entry_number; | ||
282 | |||
283 | if (idx == -1) { | ||
284 | idx = get_free_idx(current); | ||
285 | if (idx < 0) | ||
286 | return idx; | ||
287 | info.entry_number = idx; | ||
288 | /* Tell the user which slot we chose for him.*/ | ||
289 | if (put_user(idx, &user_desc->entry_number)) | ||
290 | return -EFAULT; | ||
291 | } | ||
292 | |||
293 | ret = CHOOSE_MODE_PROC(do_set_thread_area_tt, do_set_thread_area_skas, &info); | ||
294 | if (ret) | ||
295 | return ret; | ||
296 | return set_tls_entry(current, &info, idx, 1); | ||
297 | } | ||
298 | |||
299 | /* | ||
300 | * Perform set_thread_area on behalf of the traced child. | ||
301 | * Note: error handling is not done on the deferred load, and this differ from | ||
302 | * i386. However the only possible error are caused by bugs. | ||
303 | */ | ||
304 | int ptrace_set_thread_area(struct task_struct *child, int idx, | ||
305 | struct user_desc __user *user_desc) | ||
306 | { | ||
307 | struct user_desc info; | ||
308 | |||
309 | if (!host_supports_tls) | ||
310 | return -EIO; | ||
311 | |||
312 | if (copy_from_user(&info, user_desc, sizeof(info))) | ||
313 | return -EFAULT; | ||
314 | |||
315 | return set_tls_entry(child, &info, idx, 0); | ||
316 | } | ||
317 | |||
318 | asmlinkage int sys_get_thread_area(struct user_desc __user *user_desc) | ||
319 | { | ||
320 | struct user_desc info; | ||
321 | int idx, ret; | ||
322 | |||
323 | if (!host_supports_tls) | ||
324 | return -ENOSYS; | ||
325 | |||
326 | if (get_user(idx, &user_desc->entry_number)) | ||
327 | return -EFAULT; | ||
328 | |||
329 | ret = get_tls_entry(current, &info, idx); | ||
330 | if (ret < 0) | ||
331 | goto out; | ||
332 | |||
333 | if (copy_to_user(user_desc, &info, sizeof(info))) | ||
334 | ret = -EFAULT; | ||
335 | |||
336 | out: | ||
337 | return ret; | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * Perform get_thread_area on behalf of the traced child. | ||
342 | */ | ||
343 | int ptrace_get_thread_area(struct task_struct *child, int idx, | ||
344 | struct user_desc __user *user_desc) | ||
345 | { | ||
346 | struct user_desc info; | ||
347 | int ret; | ||
348 | |||
349 | if (!host_supports_tls) | ||
350 | return -EIO; | ||
351 | |||
352 | ret = get_tls_entry(child, &info, idx); | ||
353 | if (ret < 0) | ||
354 | goto out; | ||
355 | |||
356 | if (copy_to_user(user_desc, &info, sizeof(info))) | ||
357 | ret = -EFAULT; | ||
358 | out: | ||
359 | return ret; | ||
360 | } | ||
361 | |||
362 | |||
363 | /* XXX: This part is probably common to i386 and x86-64. Don't create a common | ||
364 | * file for now, do that when implementing x86-64 support.*/ | ||
365 | static int __init __setup_host_supports_tls(void) { | ||
366 | check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min); | ||
367 | if (host_supports_tls) { | ||
368 | printk(KERN_INFO "Host TLS support detected\n"); | ||
369 | printk(KERN_INFO "Detected host type: "); | ||
370 | switch (host_gdt_entry_tls_min) { | ||
371 | case GDT_ENTRY_TLS_MIN_I386: | ||
372 | printk("i386\n"); | ||
373 | break; | ||
374 | case GDT_ENTRY_TLS_MIN_X86_64: | ||
375 | printk("x86_64\n"); | ||
376 | break; | ||
377 | } | ||
378 | } else | ||
379 | printk(KERN_ERR " Host TLS support NOT detected! " | ||
380 | "TLS support inside UML will not work\n"); | ||
381 | return 1; | ||
382 | } | ||
383 | |||
384 | __initcall(__setup_host_supports_tls); | ||