diff options
Diffstat (limited to 'arch/um/sys-i386/ldt.c')
-rw-r--r-- | arch/um/sys-i386/ldt.c | 506 |
1 files changed, 471 insertions, 35 deletions
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c index 36b5c2c13289..6360f1c958d0 100644 --- a/arch/um/sys-i386/ldt.c +++ b/arch/um/sys-i386/ldt.c | |||
@@ -3,53 +3,26 @@ | |||
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/stddef.h" | ||
6 | #include "linux/config.h" | 7 | #include "linux/config.h" |
7 | #include "linux/sched.h" | 8 | #include "linux/sched.h" |
8 | #include "linux/slab.h" | 9 | #include "linux/slab.h" |
9 | #include "linux/types.h" | 10 | #include "linux/types.h" |
11 | #include "linux/errno.h" | ||
10 | #include "asm/uaccess.h" | 12 | #include "asm/uaccess.h" |
11 | #include "asm/ptrace.h" | ||
12 | #include "asm/smp.h" | 13 | #include "asm/smp.h" |
13 | #include "asm/ldt.h" | 14 | #include "asm/ldt.h" |
15 | #include "asm/unistd.h" | ||
14 | #include "choose-mode.h" | 16 | #include "choose-mode.h" |
15 | #include "kern.h" | 17 | #include "kern.h" |
16 | #include "mode_kern.h" | 18 | #include "mode_kern.h" |
17 | 19 | ||
18 | #ifdef CONFIG_MODE_TT | ||
19 | |||
20 | extern int modify_ldt(int func, void *ptr, unsigned long bytecount); | 20 | extern int modify_ldt(int func, void *ptr, unsigned long bytecount); |
21 | 21 | ||
22 | static int do_modify_ldt_tt(int func, void *ptr, unsigned long bytecount) | 22 | #ifdef CONFIG_MODE_TT |
23 | { | ||
24 | return modify_ldt(func, ptr, bytecount); | ||
25 | } | ||
26 | |||
27 | #endif | ||
28 | |||
29 | #ifdef CONFIG_MODE_SKAS | ||
30 | |||
31 | #include "skas.h" | ||
32 | #include "skas_ptrace.h" | ||
33 | |||
34 | static int do_modify_ldt_skas(int func, void *ptr, unsigned long bytecount) | ||
35 | { | ||
36 | struct ptrace_ldt ldt; | ||
37 | u32 cpu; | ||
38 | int res; | ||
39 | |||
40 | ldt = ((struct ptrace_ldt) { .func = func, | ||
41 | .ptr = ptr, | ||
42 | .bytecount = bytecount }); | ||
43 | |||
44 | cpu = get_cpu(); | ||
45 | res = ptrace(PTRACE_LDT, userspace_pid[cpu], 0, (unsigned long) &ldt); | ||
46 | put_cpu(); | ||
47 | |||
48 | return res; | ||
49 | } | ||
50 | #endif | ||
51 | 23 | ||
52 | int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount) | 24 | static long do_modify_ldt_tt(int func, void __user *ptr, |
25 | unsigned long bytecount) | ||
53 | { | 26 | { |
54 | struct user_desc info; | 27 | struct user_desc info; |
55 | int res = 0; | 28 | int res = 0; |
@@ -89,8 +62,7 @@ int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount) | |||
89 | goto out; | 62 | goto out; |
90 | } | 63 | } |
91 | 64 | ||
92 | res = CHOOSE_MODE_PROC(do_modify_ldt_tt, do_modify_ldt_skas, func, | 65 | res = modify_ldt(func, p, bytecount); |
93 | p, bytecount); | ||
94 | if(res < 0) | 66 | if(res < 0) |
95 | goto out; | 67 | goto out; |
96 | 68 | ||
@@ -108,3 +80,467 @@ out: | |||
108 | kfree(buf); | 80 | kfree(buf); |
109 | return res; | 81 | return res; |
110 | } | 82 | } |
83 | |||
84 | #endif | ||
85 | |||
86 | #ifdef CONFIG_MODE_SKAS | ||
87 | |||
88 | #include "skas.h" | ||
89 | #include "skas_ptrace.h" | ||
90 | #include "asm/mmu_context.h" | ||
91 | |||
92 | long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc, | ||
93 | void **addr, int done) | ||
94 | { | ||
95 | long res; | ||
96 | |||
97 | if(proc_mm){ | ||
98 | /* This is a special handling for the case, that the mm to | ||
99 | * modify isn't current->active_mm. | ||
100 | * If this is called directly by modify_ldt, | ||
101 | * (current->active_mm->context.skas.u == mm_idp) | ||
102 | * will be true. So no call to switch_mm_skas(mm_idp) is done. | ||
103 | * If this is called in case of init_new_ldt or PTRACE_LDT, | ||
104 | * mm_idp won't belong to current->active_mm, but child->mm. | ||
105 | * So we need to switch child's mm into our userspace, then | ||
106 | * later switch back. | ||
107 | * | ||
108 | * Note: I'm unshure: should interrupts be disabled here? | ||
109 | */ | ||
110 | if(!current->active_mm || current->active_mm == &init_mm || | ||
111 | mm_idp != ¤t->active_mm->context.skas.id) | ||
112 | switch_mm_skas(mm_idp); | ||
113 | } | ||
114 | |||
115 | if(ptrace_ldt) { | ||
116 | struct ptrace_ldt ldt_op = (struct ptrace_ldt) { | ||
117 | .func = func, | ||
118 | .ptr = desc, | ||
119 | .bytecount = sizeof(*desc)}; | ||
120 | u32 cpu; | ||
121 | int pid; | ||
122 | |||
123 | if(!proc_mm) | ||
124 | pid = mm_idp->u.pid; | ||
125 | else { | ||
126 | cpu = get_cpu(); | ||
127 | pid = userspace_pid[cpu]; | ||
128 | } | ||
129 | |||
130 | res = ptrace(PTRACE_LDT, pid, 0, (unsigned long) &ldt_op); | ||
131 | if(res) | ||
132 | res = errno; | ||
133 | |||
134 | if(proc_mm) | ||
135 | put_cpu(); | ||
136 | } | ||
137 | else { | ||
138 | void *stub_addr; | ||
139 | res = syscall_stub_data(mm_idp, (unsigned long *)desc, | ||
140 | (sizeof(*desc) + sizeof(long) - 1) & | ||
141 | ~(sizeof(long) - 1), | ||
142 | addr, &stub_addr); | ||
143 | if(!res){ | ||
144 | unsigned long args[] = { func, | ||
145 | (unsigned long)stub_addr, | ||
146 | sizeof(*desc), | ||
147 | 0, 0, 0 }; | ||
148 | res = run_syscall_stub(mm_idp, __NR_modify_ldt, args, | ||
149 | 0, addr, done); | ||
150 | } | ||
151 | } | ||
152 | |||
153 | if(proc_mm){ | ||
154 | /* This is the second part of special handling, that makes | ||
155 | * PTRACE_LDT possible to implement. | ||
156 | */ | ||
157 | if(current->active_mm && current->active_mm != &init_mm && | ||
158 | mm_idp != ¤t->active_mm->context.skas.id) | ||
159 | switch_mm_skas(¤t->active_mm->context.skas.id); | ||
160 | } | ||
161 | |||
162 | return res; | ||
163 | } | ||
164 | |||
165 | static long read_ldt_from_host(void __user * ptr, unsigned long bytecount) | ||
166 | { | ||
167 | int res, n; | ||
168 | struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) { | ||
169 | .func = 0, | ||
170 | .bytecount = bytecount, | ||
171 | .ptr = (void *)kmalloc(bytecount, GFP_KERNEL)}; | ||
172 | u32 cpu; | ||
173 | |||
174 | if(ptrace_ldt.ptr == NULL) | ||
175 | return -ENOMEM; | ||
176 | |||
177 | /* This is called from sys_modify_ldt only, so userspace_pid gives | ||
178 | * us the right number | ||
179 | */ | ||
180 | |||
181 | cpu = get_cpu(); | ||
182 | res = ptrace(PTRACE_LDT, userspace_pid[cpu], 0, | ||
183 | (unsigned long) &ptrace_ldt); | ||
184 | put_cpu(); | ||
185 | if(res < 0) | ||
186 | goto out; | ||
187 | |||
188 | n = copy_to_user(ptr, ptrace_ldt.ptr, res); | ||
189 | if(n != 0) | ||
190 | res = -EFAULT; | ||
191 | |||
192 | out: | ||
193 | kfree(ptrace_ldt.ptr); | ||
194 | |||
195 | return res; | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * In skas mode, we hold our own ldt data in UML. | ||
200 | * Thus, the code implementing sys_modify_ldt_skas | ||
201 | * is very similar to (and mostly stolen from) sys_modify_ldt | ||
202 | * for arch/i386/kernel/ldt.c | ||
203 | * The routines copied and modified in part are: | ||
204 | * - read_ldt | ||
205 | * - read_default_ldt | ||
206 | * - write_ldt | ||
207 | * - sys_modify_ldt_skas | ||
208 | */ | ||
209 | |||
210 | static int read_ldt(void __user * ptr, unsigned long bytecount) | ||
211 | { | ||
212 | int i, err = 0; | ||
213 | unsigned long size; | ||
214 | uml_ldt_t * ldt = ¤t->mm->context.skas.ldt; | ||
215 | |||
216 | if(!ldt->entry_count) | ||
217 | goto out; | ||
218 | if(bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) | ||
219 | bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; | ||
220 | err = bytecount; | ||
221 | |||
222 | if(ptrace_ldt){ | ||
223 | return read_ldt_from_host(ptr, bytecount); | ||
224 | } | ||
225 | |||
226 | down(&ldt->semaphore); | ||
227 | if(ldt->entry_count <= LDT_DIRECT_ENTRIES){ | ||
228 | size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES; | ||
229 | if(size > bytecount) | ||
230 | size = bytecount; | ||
231 | if(copy_to_user(ptr, ldt->entries, size)) | ||
232 | err = -EFAULT; | ||
233 | bytecount -= size; | ||
234 | ptr += size; | ||
235 | } | ||
236 | else { | ||
237 | for(i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount; | ||
238 | i++){ | ||
239 | size = PAGE_SIZE; | ||
240 | if(size > bytecount) | ||
241 | size = bytecount; | ||
242 | if(copy_to_user(ptr, ldt->pages[i], size)){ | ||
243 | err = -EFAULT; | ||
244 | break; | ||
245 | } | ||
246 | bytecount -= size; | ||
247 | ptr += size; | ||
248 | } | ||
249 | } | ||
250 | up(&ldt->semaphore); | ||
251 | |||
252 | if(bytecount == 0 || err == -EFAULT) | ||
253 | goto out; | ||
254 | |||
255 | if(clear_user(ptr, bytecount)) | ||
256 | err = -EFAULT; | ||
257 | |||
258 | out: | ||
259 | return err; | ||
260 | } | ||
261 | |||
262 | static int read_default_ldt(void __user * ptr, unsigned long bytecount) | ||
263 | { | ||
264 | int err; | ||
265 | |||
266 | if(bytecount > 5*LDT_ENTRY_SIZE) | ||
267 | bytecount = 5*LDT_ENTRY_SIZE; | ||
268 | |||
269 | err = bytecount; | ||
270 | /* UML doesn't support lcall7 and lcall27. | ||
271 | * So, we don't really have a default ldt, but emulate | ||
272 | * an empty ldt of common host default ldt size. | ||
273 | */ | ||
274 | if(clear_user(ptr, bytecount)) | ||
275 | err = -EFAULT; | ||
276 | |||
277 | return err; | ||
278 | } | ||
279 | |||
280 | static int write_ldt(void __user * ptr, unsigned long bytecount, int func) | ||
281 | { | ||
282 | uml_ldt_t * ldt = ¤t->mm->context.skas.ldt; | ||
283 | struct mm_id * mm_idp = ¤t->mm->context.skas.id; | ||
284 | int i, err; | ||
285 | struct user_desc ldt_info; | ||
286 | struct ldt_entry entry0, *ldt_p; | ||
287 | void *addr = NULL; | ||
288 | |||
289 | err = -EINVAL; | ||
290 | if(bytecount != sizeof(ldt_info)) | ||
291 | goto out; | ||
292 | err = -EFAULT; | ||
293 | if(copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) | ||
294 | goto out; | ||
295 | |||
296 | err = -EINVAL; | ||
297 | if(ldt_info.entry_number >= LDT_ENTRIES) | ||
298 | goto out; | ||
299 | if(ldt_info.contents == 3){ | ||
300 | if (func == 1) | ||
301 | goto out; | ||
302 | if (ldt_info.seg_not_present == 0) | ||
303 | goto out; | ||
304 | } | ||
305 | |||
306 | if(!ptrace_ldt) | ||
307 | down(&ldt->semaphore); | ||
308 | |||
309 | err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1); | ||
310 | if(err) | ||
311 | goto out_unlock; | ||
312 | else if(ptrace_ldt) { | ||
313 | /* With PTRACE_LDT available, this is used as a flag only */ | ||
314 | ldt->entry_count = 1; | ||
315 | goto out; | ||
316 | } | ||
317 | |||
318 | if(ldt_info.entry_number >= ldt->entry_count && | ||
319 | ldt_info.entry_number >= LDT_DIRECT_ENTRIES){ | ||
320 | for(i=ldt->entry_count/LDT_ENTRIES_PER_PAGE; | ||
321 | i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number; | ||
322 | i++){ | ||
323 | if(i == 0) | ||
324 | memcpy(&entry0, ldt->entries, sizeof(entry0)); | ||
325 | ldt->pages[i] = (struct ldt_entry *) | ||
326 | __get_free_page(GFP_KERNEL|__GFP_ZERO); | ||
327 | if(!ldt->pages[i]){ | ||
328 | err = -ENOMEM; | ||
329 | /* Undo the change in host */ | ||
330 | memset(&ldt_info, 0, sizeof(ldt_info)); | ||
331 | write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1); | ||
332 | goto out_unlock; | ||
333 | } | ||
334 | if(i == 0) { | ||
335 | memcpy(ldt->pages[0], &entry0, sizeof(entry0)); | ||
336 | memcpy(ldt->pages[0]+1, ldt->entries+1, | ||
337 | sizeof(entry0)*(LDT_DIRECT_ENTRIES-1)); | ||
338 | } | ||
339 | ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE; | ||
340 | } | ||
341 | } | ||
342 | if(ldt->entry_count <= ldt_info.entry_number) | ||
343 | ldt->entry_count = ldt_info.entry_number + 1; | ||
344 | |||
345 | if(ldt->entry_count <= LDT_DIRECT_ENTRIES) | ||
346 | ldt_p = ldt->entries + ldt_info.entry_number; | ||
347 | else | ||
348 | ldt_p = ldt->pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] + | ||
349 | ldt_info.entry_number%LDT_ENTRIES_PER_PAGE; | ||
350 | |||
351 | if(ldt_info.base_addr == 0 && ldt_info.limit == 0 && | ||
352 | (func == 1 || LDT_empty(&ldt_info))){ | ||
353 | ldt_p->a = 0; | ||
354 | ldt_p->b = 0; | ||
355 | } | ||
356 | else{ | ||
357 | if (func == 1) | ||
358 | ldt_info.useable = 0; | ||
359 | ldt_p->a = LDT_entry_a(&ldt_info); | ||
360 | ldt_p->b = LDT_entry_b(&ldt_info); | ||
361 | } | ||
362 | err = 0; | ||
363 | |||
364 | out_unlock: | ||
365 | up(&ldt->semaphore); | ||
366 | out: | ||
367 | return err; | ||
368 | } | ||
369 | |||
370 | static long do_modify_ldt_skas(int func, void __user *ptr, | ||
371 | unsigned long bytecount) | ||
372 | { | ||
373 | int ret = -ENOSYS; | ||
374 | |||
375 | switch (func) { | ||
376 | case 0: | ||
377 | ret = read_ldt(ptr, bytecount); | ||
378 | break; | ||
379 | case 1: | ||
380 | case 0x11: | ||
381 | ret = write_ldt(ptr, bytecount, func); | ||
382 | break; | ||
383 | case 2: | ||
384 | ret = read_default_ldt(ptr, bytecount); | ||
385 | break; | ||
386 | } | ||
387 | return ret; | ||
388 | } | ||
389 | |||
390 | short dummy_list[9] = {0, -1}; | ||
391 | short * host_ldt_entries = NULL; | ||
392 | |||
393 | void ldt_get_host_info(void) | ||
394 | { | ||
395 | long ret; | ||
396 | struct ldt_entry * ldt; | ||
397 | int i, size, k, order; | ||
398 | |||
399 | host_ldt_entries = dummy_list+1; | ||
400 | |||
401 | for(i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++); | ||
402 | |||
403 | ldt = (struct ldt_entry *) | ||
404 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); | ||
405 | if(ldt == NULL) { | ||
406 | printk("ldt_get_host_info: couldn't allocate buffer for host ldt\n"); | ||
407 | return; | ||
408 | } | ||
409 | |||
410 | ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE); | ||
411 | if(ret < 0) { | ||
412 | printk("ldt_get_host_info: couldn't read host ldt\n"); | ||
413 | goto out_free; | ||
414 | } | ||
415 | if(ret == 0) { | ||
416 | /* default_ldt is active, simply write an empty entry 0 */ | ||
417 | host_ldt_entries = dummy_list; | ||
418 | goto out_free; | ||
419 | } | ||
420 | |||
421 | for(i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++){ | ||
422 | if(ldt[i].a != 0 || ldt[i].b != 0) | ||
423 | size++; | ||
424 | } | ||
425 | |||
426 | if(size < sizeof(dummy_list)/sizeof(dummy_list[0])) { | ||
427 | host_ldt_entries = dummy_list; | ||
428 | } | ||
429 | else { | ||
430 | size = (size + 1) * sizeof(dummy_list[0]); | ||
431 | host_ldt_entries = (short *)kmalloc(size, GFP_KERNEL); | ||
432 | if(host_ldt_entries == NULL) { | ||
433 | printk("ldt_get_host_info: couldn't allocate host ldt list\n"); | ||
434 | goto out_free; | ||
435 | } | ||
436 | } | ||
437 | |||
438 | for(i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++){ | ||
439 | if(ldt[i].a != 0 || ldt[i].b != 0) { | ||
440 | host_ldt_entries[k++] = i; | ||
441 | } | ||
442 | } | ||
443 | host_ldt_entries[k] = -1; | ||
444 | |||
445 | out_free: | ||
446 | free_pages((unsigned long)ldt, order); | ||
447 | } | ||
448 | |||
449 | long init_new_ldt(struct mmu_context_skas * new_mm, | ||
450 | struct mmu_context_skas * from_mm) | ||
451 | { | ||
452 | struct user_desc desc; | ||
453 | short * num_p; | ||
454 | int i; | ||
455 | long page, err=0; | ||
456 | void *addr = NULL; | ||
457 | |||
458 | memset(&desc, 0, sizeof(desc)); | ||
459 | |||
460 | if(!ptrace_ldt) | ||
461 | init_MUTEX(&new_mm->ldt.semaphore); | ||
462 | |||
463 | if(!from_mm){ | ||
464 | /* | ||
465 | * We have to initialize a clean ldt. | ||
466 | */ | ||
467 | if(proc_mm) { | ||
468 | /* | ||
469 | * If the new mm was created using proc_mm, host's | ||
470 | * default-ldt currently is assigned, which normally | ||
471 | * contains the call-gates for lcall7 and lcall27. | ||
472 | * To remove these gates, we simply write an empty | ||
473 | * entry as number 0 to the host. | ||
474 | */ | ||
475 | err = write_ldt_entry(&new_mm->id, 1, &desc, | ||
476 | &addr, 1); | ||
477 | } | ||
478 | else{ | ||
479 | /* | ||
480 | * Now we try to retrieve info about the ldt, we | ||
481 | * inherited from the host. All ldt-entries found | ||
482 | * will be reset in the following loop | ||
483 | */ | ||
484 | if(host_ldt_entries == NULL) | ||
485 | ldt_get_host_info(); | ||
486 | for(num_p=host_ldt_entries; *num_p != -1; num_p++){ | ||
487 | desc.entry_number = *num_p; | ||
488 | err = write_ldt_entry(&new_mm->id, 1, &desc, | ||
489 | &addr, *(num_p + 1) == -1); | ||
490 | if(err) | ||
491 | break; | ||
492 | } | ||
493 | } | ||
494 | new_mm->ldt.entry_count = 0; | ||
495 | } | ||
496 | else if (!ptrace_ldt) { | ||
497 | /* Our local LDT is used to supply the data for | ||
498 | * modify_ldt(READLDT), if PTRACE_LDT isn't available, | ||
499 | * i.e., we have to use the stub for modify_ldt, which | ||
500 | * can't handle the big read buffer of up to 64kB. | ||
501 | */ | ||
502 | down(&from_mm->ldt.semaphore); | ||
503 | if(from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES){ | ||
504 | memcpy(new_mm->ldt.entries, from_mm->ldt.entries, | ||
505 | sizeof(new_mm->ldt.entries)); | ||
506 | } | ||
507 | else{ | ||
508 | i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE; | ||
509 | while(i-->0){ | ||
510 | page = __get_free_page(GFP_KERNEL|__GFP_ZERO); | ||
511 | if (!page){ | ||
512 | err = -ENOMEM; | ||
513 | break; | ||
514 | } | ||
515 | new_mm->ldt.pages[i] = (struct ldt_entry*)page; | ||
516 | memcpy(new_mm->ldt.pages[i], | ||
517 | from_mm->ldt.pages[i], PAGE_SIZE); | ||
518 | } | ||
519 | } | ||
520 | new_mm->ldt.entry_count = from_mm->ldt.entry_count; | ||
521 | up(&from_mm->ldt.semaphore); | ||
522 | } | ||
523 | |||
524 | return err; | ||
525 | } | ||
526 | |||
527 | |||
528 | void free_ldt(struct mmu_context_skas * mm) | ||
529 | { | ||
530 | int i; | ||
531 | |||
532 | if(!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES){ | ||
533 | i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE; | ||
534 | while(i-- > 0){ | ||
535 | free_page((long )mm->ldt.pages[i]); | ||
536 | } | ||
537 | } | ||
538 | mm->ldt.entry_count = 0; | ||
539 | } | ||
540 | #endif | ||
541 | |||
542 | int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount) | ||
543 | { | ||
544 | return(CHOOSE_MODE_PROC(do_modify_ldt_tt, do_modify_ldt_skas, func, | ||
545 | ptr, bytecount)); | ||
546 | } | ||