aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/sys-i386
diff options
context:
space:
mode:
authorPaolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>2006-03-31 05:30:22 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-31 15:18:52 -0500
commitaa6758d4867cd07bd76105ade6177fe6148e559a (patch)
tree0053f855c885a6dc322337468a2c45b6b2f8ddd0 /arch/um/sys-i386
parent972410b0232e97609fcefc8e408fe3037fcd607b (diff)
[PATCH] uml: implement {get,set}_thread_area for i386
Implement sys_[gs]et_thread_area and the corresponding ptrace operations for UML. This is the main chunk, additional parts follow. This implementation is now well tested and has run reliably for some time, and we've understood all the previously existing problems. Their implementation saves the new GDT content and then forwards the call to the host when appropriate, i.e. immediately when the target process is running or on context switch otherwise (i.e. on fork and on ptrace() calls). In SKAS mode, we must switch registers on each context switch (because SKAS does not switches tls_array together with current->mm). Also, added get_cpu() locking; this has been done for SKAS mode, since TT does not need it (it does not use smp_processor_id()). Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Acked-by: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/um/sys-i386')
-rw-r--r--arch/um/sys-i386/Makefile3
-rw-r--r--arch/um/sys-i386/ptrace.c2
-rw-r--r--arch/um/sys-i386/sys_call_table.S2
-rw-r--r--arch/um/sys-i386/syscalls.c14
-rw-r--r--arch/um/sys-i386/tls.c326
5 files changed, 340 insertions, 7 deletions
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
index 2c06a07a1bdb..98b20b7bba4f 100644
--- a/arch/um/sys-i386/Makefile
+++ b/arch/um/sys-i386/Makefile
@@ -1,5 +1,6 @@
1obj-y = bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \ 1obj-y = bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
2 ptrace_user.o signal.o sigcontext.o syscalls.o sysrq.o sys_call_table.o 2 ptrace_user.o signal.o sigcontext.o syscalls.o sysrq.o \
3 sys_call_table.o tls.o
3 4
4obj-$(CONFIG_MODE_SKAS) += stub.o stub_segv.o 5obj-$(CONFIG_MODE_SKAS) += stub.o stub_segv.o
5 6
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c
index 927fcd955651..6a23cc6947c3 100644
--- a/arch/um/sys-i386/ptrace.c
+++ b/arch/um/sys-i386/ptrace.c
@@ -18,10 +18,12 @@
18void arch_switch_to_tt(struct task_struct *from, struct task_struct *to) 18void arch_switch_to_tt(struct task_struct *from, struct task_struct *to)
19{ 19{
20 update_debugregs(to->thread.arch.debugregs_seq); 20 update_debugregs(to->thread.arch.debugregs_seq);
21 arch_switch_tls_tt(from, to);
21} 22}
22 23
23void arch_switch_to_skas(struct task_struct *from, struct task_struct *to) 24void arch_switch_to_skas(struct task_struct *from, struct task_struct *to)
24{ 25{
26 arch_switch_tls_skas(from, to);
25} 27}
26 28
27int is_syscall(unsigned long addr) 29int is_syscall(unsigned long addr)
diff --git a/arch/um/sys-i386/sys_call_table.S b/arch/um/sys-i386/sys_call_table.S
index ad75c27afe38..1ff61474b25c 100644
--- a/arch/um/sys-i386/sys_call_table.S
+++ b/arch/um/sys-i386/sys_call_table.S
@@ -6,8 +6,6 @@
6 6
7#define sys_vm86old sys_ni_syscall 7#define sys_vm86old sys_ni_syscall
8#define sys_vm86 sys_ni_syscall 8#define sys_vm86 sys_ni_syscall
9#define sys_set_thread_area sys_ni_syscall
10#define sys_get_thread_area sys_ni_syscall
11 9
12#define sys_stime um_stime 10#define sys_stime um_stime
13#define sys_time um_time 11#define sys_time um_time
diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
index 1845123ed124..749dd1bfe60f 100644
--- a/arch/um/sys-i386/syscalls.c
+++ b/arch/um/sys-i386/syscalls.c
@@ -61,21 +61,27 @@ long old_select(struct sel_arg_struct __user *arg)
61 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); 61 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
62} 62}
63 63
64/* The i386 version skips reading from %esi, the fourth argument. So we must do 64/*
65 * this, too. 65 * The prototype on i386 is:
66 *
67 * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr)
68 *
69 * and the "newtls" arg. on i386 is read by copy_thread directly from the
70 * register saved on the stack.
66 */ 71 */
67long sys_clone(unsigned long clone_flags, unsigned long newsp, 72long sys_clone(unsigned long clone_flags, unsigned long newsp,
68 int __user *parent_tid, int unused, int __user *child_tid) 73 int __user *parent_tid, void *newtls, int __user *child_tid)
69{ 74{
70 long ret; 75 long ret;
71 76
72 if (!newsp) 77 if (!newsp)
73 newsp = UPT_SP(&current->thread.regs.regs); 78 newsp = UPT_SP(&current->thread.regs.regs);
79
74 current->thread.forking = 1; 80 current->thread.forking = 1;
75 ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid, 81 ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
76 child_tid); 82 child_tid);
77 current->thread.forking = 0; 83 current->thread.forking = 0;
78 return(ret); 84 return ret;
79} 85}
80 86
81/* 87/*
diff --git a/arch/um/sys-i386/tls.c b/arch/um/sys-i386/tls.c
new file mode 100644
index 000000000000..e3c5bc593fae
--- /dev/null
+++ b/arch/um/sys-i386/tls.c
@@ -0,0 +1,326 @@
1/*
2 * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
3 * Licensed under the GPL
4 */
5
6#include "linux/config.h"
7#include "linux/kernel.h"
8#include "linux/sched.h"
9#include "linux/slab.h"
10#include "linux/types.h"
11#include "asm/uaccess.h"
12#include "asm/ptrace.h"
13#include "asm/segment.h"
14#include "asm/smp.h"
15#include "asm/desc.h"
16#include "choose-mode.h"
17#include "kern.h"
18#include "kern_util.h"
19#include "mode_kern.h"
20#include "os.h"
21#include "mode.h"
22
23#ifdef CONFIG_MODE_SKAS
24#include "skas.h"
25#endif
26
27#ifdef CONFIG_MODE_SKAS
28int do_set_thread_area_skas(struct user_desc *info)
29{
30 int ret;
31 u32 cpu;
32
33 cpu = get_cpu();
34 ret = os_set_thread_area(info, userspace_pid[cpu]);
35 put_cpu();
36 return ret;
37}
38
39int do_get_thread_area_skas(struct user_desc *info)
40{
41 int ret;
42 u32 cpu;
43
44 cpu = get_cpu();
45 ret = os_get_thread_area(info, userspace_pid[cpu]);
46 put_cpu();
47 return ret;
48}
49#endif
50
51/*
52 * sys_get_thread_area: get a yet unused TLS descriptor index.
53 * XXX: Consider leaving one free slot for glibc usage at first place. This must
54 * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
55 *
56 * Also, this must be tested when compiling in SKAS mode with dinamic linking
57 * and running against NPTL.
58 */
59static int get_free_idx(struct task_struct* task)
60{
61 struct thread_struct *t = &task->thread;
62 int idx;
63
64 if (!t->arch.tls_array)
65 return GDT_ENTRY_TLS_MIN;
66
67 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
68 if (!t->arch.tls_array[idx].present)
69 return idx + GDT_ENTRY_TLS_MIN;
70 return -ESRCH;
71}
72
73#define O_FORCE 1
74
75static inline void clear_user_desc(struct user_desc* info)
76{
77 /* Postcondition: LDT_empty(info) returns true. */
78 memset(info, 0, sizeof(*info));
79
80 /* Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
81 * indeed an empty user_desc.
82 */
83 info->read_exec_only = 1;
84 info->seg_not_present = 1;
85}
86
87static int load_TLS(int flags, struct task_struct *to)
88{
89 int ret = 0;
90 int idx;
91
92 for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
93 struct uml_tls_struct* curr = &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
94
95 /* Actually, now if it wasn't flushed it gets cleared and
96 * flushed to the host, which will clear it.*/
97 if (!curr->present) {
98 if (!curr->flushed) {
99 clear_user_desc(&curr->tls);
100 curr->tls.entry_number = idx;
101 } else {
102 WARN_ON(!LDT_empty(&curr->tls));
103 continue;
104 }
105 }
106
107 if (!(flags & O_FORCE) && curr->flushed)
108 continue;
109
110 ret = do_set_thread_area(&curr->tls);
111 if (ret)
112 goto out;
113
114 curr->flushed = 1;
115 }
116out:
117 return ret;
118}
119
120/* Verify if we need to do a flush for the new process, i.e. if there are any
121 * present desc's, only if they haven't been flushed.
122 */
123static inline int needs_TLS_update(struct task_struct *task)
124{
125 int i;
126 int ret = 0;
127
128 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
129 struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
130
131 /* Can't test curr->present, we may need to clear a descriptor
132 * which had a value. */
133 if (curr->flushed)
134 continue;
135 ret = 1;
136 break;
137 }
138 return ret;
139}
140
141/* On a newly forked process, the TLS descriptors haven't yet been flushed. So
142 * we mark them as such and the first switch_to will do the job.
143 */
144void clear_flushed_tls(struct task_struct *task)
145{
146 int i;
147
148 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
149 struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
150
151 /* Still correct to do this, if it wasn't present on the host it
152 * will remain as flushed as it was. */
153 if (!curr->present)
154 continue;
155
156 curr->flushed = 0;
157 }
158}
159
160/* This in SKAS0 does not need to be used, since we have different host
161 * processes. Nor will this need to be used when we'll add support to the host
162 * SKAS patch. */
163int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to)
164{
165 return load_TLS(O_FORCE, to);
166}
167
168int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to)
169{
170 if (needs_TLS_update(to))
171 return load_TLS(0, to);
172
173 return 0;
174}
175
176static int set_tls_entry(struct task_struct* task, struct user_desc *info,
177 int idx, int flushed)
178{
179 struct thread_struct *t = &task->thread;
180
181 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
182 return -EINVAL;
183
184 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
185 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
186 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
187
188 return 0;
189}
190
191int arch_copy_tls(struct task_struct *new)
192{
193 struct user_desc info;
194 int idx, ret = -EFAULT;
195
196 if (copy_from_user(&info,
197 (void __user *) UPT_ESI(&new->thread.regs.regs),
198 sizeof(info)))
199 goto out;
200
201 ret = -EINVAL;
202 if (LDT_empty(&info))
203 goto out;
204
205 idx = info.entry_number;
206
207 ret = set_tls_entry(new, &info, idx, 0);
208out:
209 return ret;
210}
211
212/* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
213static int get_tls_entry(struct task_struct* task, struct user_desc *info, int idx)
214{
215 struct thread_struct *t = &task->thread;
216
217 if (!t->arch.tls_array)
218 goto clear;
219
220 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
221 return -EINVAL;
222
223 if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
224 goto clear;
225
226 *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
227
228out:
229 /* Temporary debugging check, to make sure that things have been
230 * flushed. This could be triggered if load_TLS() failed.
231 */
232 if (unlikely(task == current && !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
233 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
234 "without flushed TLS.", current->pid);
235 }
236
237 return 0;
238clear:
239 /* When the TLS entry has not been set, the values read to user in the
240 * tls_array are 0 (because it's cleared at boot, see
241 * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
242 */
243 clear_user_desc(info);
244 info->entry_number = idx;
245 goto out;
246}
247
248asmlinkage int sys_set_thread_area(struct user_desc __user *user_desc)
249{
250 struct user_desc info;
251 int idx, ret;
252
253 if (copy_from_user(&info, user_desc, sizeof(info)))
254 return -EFAULT;
255
256 idx = info.entry_number;
257
258 if (idx == -1) {
259 idx = get_free_idx(current);
260 if (idx < 0)
261 return idx;
262 info.entry_number = idx;
263 /* Tell the user which slot we chose for him.*/
264 if (put_user(idx, &user_desc->entry_number))
265 return -EFAULT;
266 }
267
268 ret = CHOOSE_MODE_PROC(do_set_thread_area_tt, do_set_thread_area_skas, &info);
269 if (ret)
270 return ret;
271 return set_tls_entry(current, &info, idx, 1);
272}
273
274/*
275 * Perform set_thread_area on behalf of the traced child.
276 * Note: error handling is not done on the deferred load, and this differ from
277 * i386. However the only possible error are caused by bugs.
278 */
279int ptrace_set_thread_area(struct task_struct *child, int idx,
280 struct user_desc __user *user_desc)
281{
282 struct user_desc info;
283
284 if (copy_from_user(&info, user_desc, sizeof(info)))
285 return -EFAULT;
286
287 return set_tls_entry(child, &info, idx, 0);
288}
289
290asmlinkage int sys_get_thread_area(struct user_desc __user *user_desc)
291{
292 struct user_desc info;
293 int idx, ret;
294
295 if (get_user(idx, &user_desc->entry_number))
296 return -EFAULT;
297
298 ret = get_tls_entry(current, &info, idx);
299 if (ret < 0)
300 goto out;
301
302 if (copy_to_user(user_desc, &info, sizeof(info)))
303 ret = -EFAULT;
304
305out:
306 return ret;
307}
308
309/*
310 * Perform get_thread_area on behalf of the traced child.
311 */
312int ptrace_get_thread_area(struct task_struct *child, int idx,
313 struct user_desc __user *user_desc)
314{
315 struct user_desc info;
316 int ret;
317
318 ret = get_tls_entry(child, &info, idx);
319 if (ret < 0)
320 goto out;
321
322 if (copy_to_user(user_desc, &info, sizeof(info)))
323 ret = -EFAULT;
324out:
325 return ret;
326}