aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBodo Stroesser <bstroesser@fujitsu-siemens.com>2005-11-07 03:58:55 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-07 10:53:31 -0500
commit858259cf7d1c443c836a2022b78cb281f0a9b95e (patch)
tree7d306450dd0dfa907bbee1d95f96191c67f74232
parente763b793f7e5c09a859fc420eb0de385d80cf636 (diff)
[PATCH] uml: maintain own LDT entries
Patch imlements full LDT handling in SKAS: * UML holds it's own LDT table, used to deliver data on modify_ldt(READ) * UML disables the default_ldt, inherited from the host (SKAS3) or resets LDT entries, set by host's clib and inherited in SKAS0 * A new global variable skas_needs_stub is inserted, that can be used to decide, whether stub-pages must be supported or not. * Uses the syscall-stub to replace missing PTRACE_LDT (therefore, write_ldt_entry needs to be modified) Signed-off-by: Bodo Stroesser <bstroesser@fujitsu-siemens.com> Signed-off-by: Jeff Dike <jdike@addtoit.com> Cc: Paolo Giarrusso <blaisorblade@yahoo.it> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/um/kernel/skas/include/mmu-skas.h2
-rw-r--r--arch/um/kernel/skas/include/skas.h3
-rw-r--r--arch/um/kernel/skas/mem.c2
-rw-r--r--arch/um/kernel/skas/mmu.c44
-rw-r--r--arch/um/kernel/skas/process.c6
-rw-r--r--arch/um/kernel/skas/process_kern.c2
-rw-r--r--arch/um/os-Linux/start_up.c75
-rw-r--r--arch/um/scripts/Makefile.rules7
-rw-r--r--arch/um/sys-i386/ldt.c506
-rw-r--r--arch/um/sys-x86_64/Makefile5
-rw-r--r--arch/um/sys-x86_64/syscalls.c75
-rw-r--r--include/asm-um/ldt-i386.h69
-rw-r--r--include/asm-um/ldt.h69
13 files changed, 724 insertions, 141 deletions
diff --git a/arch/um/kernel/skas/include/mmu-skas.h b/arch/um/kernel/skas/include/mmu-skas.h
index 09536f81ee42..44110c521e49 100644
--- a/arch/um/kernel/skas/include/mmu-skas.h
+++ b/arch/um/kernel/skas/include/mmu-skas.h
@@ -8,6 +8,7 @@
8 8
9#include "linux/config.h" 9#include "linux/config.h"
10#include "mm_id.h" 10#include "mm_id.h"
11#include "asm/ldt.h"
11 12
12struct mmu_context_skas { 13struct mmu_context_skas {
13 struct mm_id id; 14 struct mm_id id;
@@ -15,6 +16,7 @@ struct mmu_context_skas {
15#ifdef CONFIG_3_LEVEL_PGTABLES 16#ifdef CONFIG_3_LEVEL_PGTABLES
16 unsigned long last_pmd; 17 unsigned long last_pmd;
17#endif 18#endif
19 uml_ldt_t ldt;
18}; 20};
19 21
20extern void switch_mm_skas(struct mm_id * mm_idp); 22extern void switch_mm_skas(struct mm_id * mm_idp);
diff --git a/arch/um/kernel/skas/include/skas.h b/arch/um/kernel/skas/include/skas.h
index 060934740f9f..daa2f85b684c 100644
--- a/arch/um/kernel/skas/include/skas.h
+++ b/arch/um/kernel/skas/include/skas.h
@@ -10,7 +10,8 @@
10#include "sysdep/ptrace.h" 10#include "sysdep/ptrace.h"
11 11
12extern int userspace_pid[]; 12extern int userspace_pid[];
13extern int proc_mm, ptrace_faultinfo; 13extern int proc_mm, ptrace_faultinfo, ptrace_ldt;
14extern int skas_needs_stub;
14 15
15extern void switch_threads(void *me, void *next); 16extern void switch_threads(void *me, void *next);
16extern void thread_wait(void *sw, void *fb); 17extern void thread_wait(void *sw, void *fb);
diff --git a/arch/um/kernel/skas/mem.c b/arch/um/kernel/skas/mem.c
index 147466d7ff4f..88ab96c609ce 100644
--- a/arch/um/kernel/skas/mem.c
+++ b/arch/um/kernel/skas/mem.c
@@ -20,7 +20,7 @@ unsigned long set_task_sizes_skas(int arg, unsigned long *host_size_out,
20 *task_size_out = CONFIG_HOST_TASK_SIZE; 20 *task_size_out = CONFIG_HOST_TASK_SIZE;
21#else 21#else
22 *host_size_out = top; 22 *host_size_out = top;
23 if (proc_mm && ptrace_faultinfo) 23 if (!skas_needs_stub)
24 *task_size_out = top; 24 *task_size_out = top;
25 else *task_size_out = CONFIG_STUB_START & PGDIR_MASK; 25 else *task_size_out = CONFIG_STUB_START & PGDIR_MASK;
26#endif 26#endif
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 9e5e39cea821..677871f1b37c 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -15,6 +15,7 @@
15#include "asm/mmu.h" 15#include "asm/mmu.h"
16#include "asm/pgalloc.h" 16#include "asm/pgalloc.h"
17#include "asm/pgtable.h" 17#include "asm/pgtable.h"
18#include "asm/ldt.h"
18#include "os.h" 19#include "os.h"
19#include "skas.h" 20#include "skas.h"
20 21
@@ -74,13 +75,12 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
74 75
75int init_new_context_skas(struct task_struct *task, struct mm_struct *mm) 76int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
76{ 77{
77 struct mm_struct *cur_mm = current->mm; 78 struct mmu_context_skas *from_mm = NULL;
78 struct mm_id *cur_mm_id = &cur_mm->context.skas.id; 79 struct mmu_context_skas *to_mm = &mm->context.skas;
79 struct mm_id *mm_id = &mm->context.skas.id;
80 unsigned long stack = 0; 80 unsigned long stack = 0;
81 int from, ret = -ENOMEM; 81 int from_fd, ret = -ENOMEM;
82 82
83 if(!proc_mm || !ptrace_faultinfo){ 83 if(skas_needs_stub){
84 stack = get_zeroed_page(GFP_KERNEL); 84 stack = get_zeroed_page(GFP_KERNEL);
85 if(stack == 0) 85 if(stack == 0)
86 goto out; 86 goto out;
@@ -102,33 +102,43 @@ int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
102 102
103 mm->nr_ptes--; 103 mm->nr_ptes--;
104 } 104 }
105 mm_id->stack = stack; 105
106 to_mm->id.stack = stack;
107 if(current->mm != NULL && current->mm != &init_mm)
108 from_mm = &current->mm->context.skas;
106 109
107 if(proc_mm){ 110 if(proc_mm){
108 if((cur_mm != NULL) && (cur_mm != &init_mm)) 111 if(from_mm)
109 from = cur_mm_id->u.mm_fd; 112 from_fd = from_mm->id.u.mm_fd;
110 else from = -1; 113 else from_fd = -1;
111 114
112 ret = new_mm(from, stack); 115 ret = new_mm(from_fd, stack);
113 if(ret < 0){ 116 if(ret < 0){
114 printk("init_new_context_skas - new_mm failed, " 117 printk("init_new_context_skas - new_mm failed, "
115 "errno = %d\n", ret); 118 "errno = %d\n", ret);
116 goto out_free; 119 goto out_free;
117 } 120 }
118 mm_id->u.mm_fd = ret; 121 to_mm->id.u.mm_fd = ret;
119 } 122 }
120 else { 123 else {
121 if((cur_mm != NULL) && (cur_mm != &init_mm)) 124 if(from_mm)
122 mm_id->u.pid = copy_context_skas0(stack, 125 to_mm->id.u.pid = copy_context_skas0(stack,
123 cur_mm_id->u.pid); 126 from_mm->id.u.pid);
124 else mm_id->u.pid = start_userspace(stack); 127 else to_mm->id.u.pid = start_userspace(stack);
128 }
129
130 ret = init_new_ldt(to_mm, from_mm);
131 if(ret < 0){
132 printk("init_new_context_skas - init_ldt"
133 " failed, errno = %d\n", ret);
134 goto out_free;
125 } 135 }
126 136
127 return 0; 137 return 0;
128 138
129 out_free: 139 out_free:
130 if(mm_id->stack != 0) 140 if(to_mm->id.stack != 0)
131 free_page(mm_id->stack); 141 free_page(to_mm->id.stack);
132 out: 142 out:
133 return ret; 143 return ret;
134} 144}
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 42f2da687dc8..599d679bd4fc 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -381,9 +381,9 @@ int copy_context_skas0(unsigned long new_stack, int pid)
381} 381}
382 382
383/* 383/*
384 * This is used only, if proc_mm is available, while PTRACE_FAULTINFO 384 * This is used only, if stub pages are needed, while proc_mm is
385 * isn't. Opening /proc/mm creates a new mm_context, which lacks the stub-pages 385 * availabl. Opening /proc/mm creates a new mm_context, which lacks
386 * Thus, we map them using /proc/mm-fd 386 * the stub-pages. Thus, we map them using /proc/mm-fd
387 */ 387 */
388void map_stub_pages(int fd, unsigned long code, 388void map_stub_pages(int fd, unsigned long code,
389 unsigned long data, unsigned long stack) 389 unsigned long data, unsigned long stack)
diff --git a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c
index efe92e8aa2a9..9c990253966c 100644
--- a/arch/um/kernel/skas/process_kern.c
+++ b/arch/um/kernel/skas/process_kern.c
@@ -145,7 +145,7 @@ int new_mm(int from, unsigned long stack)
145 "err = %d\n", -n); 145 "err = %d\n", -n);
146 } 146 }
147 147
148 if(!ptrace_faultinfo) 148 if(skas_needs_stub)
149 map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack); 149 map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack);
150 150
151 return(fd); 151 return(fd);
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index b99ab414542f..553a09c7d0bc 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -135,7 +135,9 @@ static int stop_ptraced_child(int pid, void *stack, int exitcode,
135} 135}
136 136
137int ptrace_faultinfo = 1; 137int ptrace_faultinfo = 1;
138int ptrace_ldt = 1;
138int proc_mm = 1; 139int proc_mm = 1;
140int skas_needs_stub = 0;
139 141
140static int __init skas0_cmd_param(char *str, int* add) 142static int __init skas0_cmd_param(char *str, int* add)
141{ 143{
@@ -352,14 +354,26 @@ __uml_setup("noptracefaultinfo", noptracefaultinfo_cmd_param,
352" it. To support PTRACE_FAULTINFO, the host needs to be patched\n" 354" it. To support PTRACE_FAULTINFO, the host needs to be patched\n"
353" using the current skas3 patch.\n\n"); 355" using the current skas3 patch.\n\n");
354 356
357static int __init noptraceldt_cmd_param(char *str, int* add)
358{
359 ptrace_ldt = 0;
360 return 0;
361}
362
363__uml_setup("noptraceldt", noptraceldt_cmd_param,
364"noptraceldt\n"
365" Turns off usage of PTRACE_LDT, even if host supports it.\n"
366" To support PTRACE_LDT, the host needs to be patched using\n"
367" the current skas3 patch.\n\n");
368
355#ifdef UML_CONFIG_MODE_SKAS 369#ifdef UML_CONFIG_MODE_SKAS
356static inline void check_skas3_ptrace_support(void) 370static inline void check_skas3_ptrace_faultinfo(void)
357{ 371{
358 struct ptrace_faultinfo fi; 372 struct ptrace_faultinfo fi;
359 void *stack; 373 void *stack;
360 int pid, n; 374 int pid, n;
361 375
362 printf("Checking for the skas3 patch in the host..."); 376 printf(" - PTRACE_FAULTINFO...");
363 pid = start_ptraced_child(&stack); 377 pid = start_ptraced_child(&stack);
364 378
365 n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi); 379 n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
@@ -381,9 +395,49 @@ static inline void check_skas3_ptrace_support(void)
381 stop_ptraced_child(pid, stack, 1, 1); 395 stop_ptraced_child(pid, stack, 1, 1);
382} 396}
383 397
384int can_do_skas(void) 398static inline void check_skas3_ptrace_ldt(void)
399{
400#ifdef PTRACE_LDT
401 void *stack;
402 int pid, n;
403 unsigned char ldtbuf[40];
404 struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
405 .func = 2, /* read default ldt */
406 .ptr = ldtbuf,
407 .bytecount = sizeof(ldtbuf)};
408
409 printf(" - PTRACE_LDT...");
410 pid = start_ptraced_child(&stack);
411
412 n = ptrace(PTRACE_LDT, pid, 0, (unsigned long) &ldt_op);
413 if (n < 0) {
414 if(errno == EIO)
415 printf("not found\n");
416 else {
417 perror("not found");
418 }
419 ptrace_ldt = 0;
420 }
421 else {
422 if(ptrace_ldt)
423 printf("found\n");
424 else
425 printf("found, but use is disabled\n");
426 }
427
428 stop_ptraced_child(pid, stack, 1, 1);
429#else
430 /* PTRACE_LDT might be disabled via cmdline option.
431 * We want to override this, else we might use the stub
432 * without real need
433 */
434 ptrace_ldt = 1;
435#endif
436}
437
438static inline void check_skas3_proc_mm(void)
385{ 439{
386 printf("Checking for /proc/mm..."); 440 printf(" - /proc/mm...");
387 if (os_access("/proc/mm", OS_ACC_W_OK) < 0) { 441 if (os_access("/proc/mm", OS_ACC_W_OK) < 0) {
388 proc_mm = 0; 442 proc_mm = 0;
389 printf("not found\n"); 443 printf("not found\n");
@@ -394,8 +448,19 @@ int can_do_skas(void)
394 else 448 else
395 printf("found\n"); 449 printf("found\n");
396 } 450 }
451}
452
453int can_do_skas(void)
454{
455 printf("Checking for the skas3 patch in the host:\n");
456
457 check_skas3_proc_mm();
458 check_skas3_ptrace_faultinfo();
459 check_skas3_ptrace_ldt();
460
461 if(!proc_mm || !ptrace_faultinfo || !ptrace_ldt)
462 skas_needs_stub = 1;
397 463
398 check_skas3_ptrace_support();
399 return 1; 464 return 1;
400} 465}
401#else 466#else
diff --git a/arch/um/scripts/Makefile.rules b/arch/um/scripts/Makefile.rules
index 651d9d88b656..b3fbf125709b 100644
--- a/arch/um/scripts/Makefile.rules
+++ b/arch/um/scripts/Makefile.rules
@@ -26,8 +26,13 @@ define unprofile
26 $(patsubst -pg,,$(patsubst -fprofile-arcs -ftest-coverage,,$(1))) 26 $(patsubst -pg,,$(patsubst -fprofile-arcs -ftest-coverage,,$(1)))
27endef 27endef
28 28
29# cmd_make_link checks to see if the $(foo-dir) variable starts with a /. If
30# so, it's considered to be a path relative to $(srcdir) rather than
31# $(srcdir)/arch/$(SUBARCH). This is because x86_64 wants to get ldt.c from
32# arch/um/sys-i386 rather than arch/i386 like the other borrowed files. So,
33# it sets $(ldt.c-dir) to /arch/um/sys-i386.
29quiet_cmd_make_link = SYMLINK $@ 34quiet_cmd_make_link = SYMLINK $@
30cmd_make_link = ln -sf $(srctree)/arch/$(SUBARCH)/$($(notdir $@)-dir)/$(notdir $@) $@ 35cmd_make_link = rm -f $@; ln -sf $(srctree)$(if $(filter-out /%,$($(notdir $@)-dir)),/arch/$(SUBARCH))/$($(notdir $@)-dir)/$(notdir $@) $@
31 36
32# this needs to be before the foreach, because targets does not accept 37# this needs to be before the foreach, because targets does not accept
33# complete paths like $(obj)/$(f). To make sure this works, use a := assignment 38# complete paths like $(obj)/$(f). To make sure this works, use a := assignment
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c
index 36b5c2c13289..6360f1c958d0 100644
--- a/arch/um/sys-i386/ldt.c
+++ b/arch/um/sys-i386/ldt.c
@@ -3,53 +3,26 @@
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#include "linux/stddef.h"
6#include "linux/config.h" 7#include "linux/config.h"
7#include "linux/sched.h" 8#include "linux/sched.h"
8#include "linux/slab.h" 9#include "linux/slab.h"
9#include "linux/types.h" 10#include "linux/types.h"
11#include "linux/errno.h"
10#include "asm/uaccess.h" 12#include "asm/uaccess.h"
11#include "asm/ptrace.h"
12#include "asm/smp.h" 13#include "asm/smp.h"
13#include "asm/ldt.h" 14#include "asm/ldt.h"
15#include "asm/unistd.h"
14#include "choose-mode.h" 16#include "choose-mode.h"
15#include "kern.h" 17#include "kern.h"
16#include "mode_kern.h" 18#include "mode_kern.h"
17 19
18#ifdef CONFIG_MODE_TT
19
20extern int modify_ldt(int func, void *ptr, unsigned long bytecount); 20extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
21 21
22static int do_modify_ldt_tt(int func, void *ptr, unsigned long bytecount) 22#ifdef CONFIG_MODE_TT
23{
24 return modify_ldt(func, ptr, bytecount);
25}
26
27#endif
28
29#ifdef CONFIG_MODE_SKAS
30
31#include "skas.h"
32#include "skas_ptrace.h"
33
34static int do_modify_ldt_skas(int func, void *ptr, unsigned long bytecount)
35{
36 struct ptrace_ldt ldt;
37 u32 cpu;
38 int res;
39
40 ldt = ((struct ptrace_ldt) { .func = func,
41 .ptr = ptr,
42 .bytecount = bytecount });
43
44 cpu = get_cpu();
45 res = ptrace(PTRACE_LDT, userspace_pid[cpu], 0, (unsigned long) &ldt);
46 put_cpu();
47
48 return res;
49}
50#endif
51 23
52int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount) 24static long do_modify_ldt_tt(int func, void __user *ptr,
25 unsigned long bytecount)
53{ 26{
54 struct user_desc info; 27 struct user_desc info;
55 int res = 0; 28 int res = 0;
@@ -89,8 +62,7 @@ int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
89 goto out; 62 goto out;
90 } 63 }
91 64
92 res = CHOOSE_MODE_PROC(do_modify_ldt_tt, do_modify_ldt_skas, func, 65 res = modify_ldt(func, p, bytecount);
93 p, bytecount);
94 if(res < 0) 66 if(res < 0)
95 goto out; 67 goto out;
96 68
@@ -108,3 +80,467 @@ out:
108 kfree(buf); 80 kfree(buf);
109 return res; 81 return res;
110} 82}
83
84#endif
85
86#ifdef CONFIG_MODE_SKAS
87
88#include "skas.h"
89#include "skas_ptrace.h"
90#include "asm/mmu_context.h"
91
92long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc,
93 void **addr, int done)
94{
95 long res;
96
97 if(proc_mm){
98 /* This is a special handling for the case, that the mm to
99 * modify isn't current->active_mm.
100 * If this is called directly by modify_ldt,
101 * (current->active_mm->context.skas.u == mm_idp)
102 * will be true. So no call to switch_mm_skas(mm_idp) is done.
103 * If this is called in case of init_new_ldt or PTRACE_LDT,
104 * mm_idp won't belong to current->active_mm, but child->mm.
105 * So we need to switch child's mm into our userspace, then
106 * later switch back.
107 *
108 * Note: I'm unshure: should interrupts be disabled here?
109 */
110 if(!current->active_mm || current->active_mm == &init_mm ||
111 mm_idp != &current->active_mm->context.skas.id)
112 switch_mm_skas(mm_idp);
113 }
114
115 if(ptrace_ldt) {
116 struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
117 .func = func,
118 .ptr = desc,
119 .bytecount = sizeof(*desc)};
120 u32 cpu;
121 int pid;
122
123 if(!proc_mm)
124 pid = mm_idp->u.pid;
125 else {
126 cpu = get_cpu();
127 pid = userspace_pid[cpu];
128 }
129
130 res = ptrace(PTRACE_LDT, pid, 0, (unsigned long) &ldt_op);
131 if(res)
132 res = errno;
133
134 if(proc_mm)
135 put_cpu();
136 }
137 else {
138 void *stub_addr;
139 res = syscall_stub_data(mm_idp, (unsigned long *)desc,
140 (sizeof(*desc) + sizeof(long) - 1) &
141 ~(sizeof(long) - 1),
142 addr, &stub_addr);
143 if(!res){
144 unsigned long args[] = { func,
145 (unsigned long)stub_addr,
146 sizeof(*desc),
147 0, 0, 0 };
148 res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
149 0, addr, done);
150 }
151 }
152
153 if(proc_mm){
154 /* This is the second part of special handling, that makes
155 * PTRACE_LDT possible to implement.
156 */
157 if(current->active_mm && current->active_mm != &init_mm &&
158 mm_idp != &current->active_mm->context.skas.id)
159 switch_mm_skas(&current->active_mm->context.skas.id);
160 }
161
162 return res;
163}
164
165static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
166{
167 int res, n;
168 struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
169 .func = 0,
170 .bytecount = bytecount,
171 .ptr = (void *)kmalloc(bytecount, GFP_KERNEL)};
172 u32 cpu;
173
174 if(ptrace_ldt.ptr == NULL)
175 return -ENOMEM;
176
177 /* This is called from sys_modify_ldt only, so userspace_pid gives
178 * us the right number
179 */
180
181 cpu = get_cpu();
182 res = ptrace(PTRACE_LDT, userspace_pid[cpu], 0,
183 (unsigned long) &ptrace_ldt);
184 put_cpu();
185 if(res < 0)
186 goto out;
187
188 n = copy_to_user(ptr, ptrace_ldt.ptr, res);
189 if(n != 0)
190 res = -EFAULT;
191
192 out:
193 kfree(ptrace_ldt.ptr);
194
195 return res;
196}
197
198/*
199 * In skas mode, we hold our own ldt data in UML.
200 * Thus, the code implementing sys_modify_ldt_skas
201 * is very similar to (and mostly stolen from) sys_modify_ldt
202 * for arch/i386/kernel/ldt.c
203 * The routines copied and modified in part are:
204 * - read_ldt
205 * - read_default_ldt
206 * - write_ldt
207 * - sys_modify_ldt_skas
208 */
209
210static int read_ldt(void __user * ptr, unsigned long bytecount)
211{
212 int i, err = 0;
213 unsigned long size;
214 uml_ldt_t * ldt = &current->mm->context.skas.ldt;
215
216 if(!ldt->entry_count)
217 goto out;
218 if(bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
219 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
220 err = bytecount;
221
222 if(ptrace_ldt){
223 return read_ldt_from_host(ptr, bytecount);
224 }
225
226 down(&ldt->semaphore);
227 if(ldt->entry_count <= LDT_DIRECT_ENTRIES){
228 size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
229 if(size > bytecount)
230 size = bytecount;
231 if(copy_to_user(ptr, ldt->entries, size))
232 err = -EFAULT;
233 bytecount -= size;
234 ptr += size;
235 }
236 else {
237 for(i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
238 i++){
239 size = PAGE_SIZE;
240 if(size > bytecount)
241 size = bytecount;
242 if(copy_to_user(ptr, ldt->pages[i], size)){
243 err = -EFAULT;
244 break;
245 }
246 bytecount -= size;
247 ptr += size;
248 }
249 }
250 up(&ldt->semaphore);
251
252 if(bytecount == 0 || err == -EFAULT)
253 goto out;
254
255 if(clear_user(ptr, bytecount))
256 err = -EFAULT;
257
258out:
259 return err;
260}
261
262static int read_default_ldt(void __user * ptr, unsigned long bytecount)
263{
264 int err;
265
266 if(bytecount > 5*LDT_ENTRY_SIZE)
267 bytecount = 5*LDT_ENTRY_SIZE;
268
269 err = bytecount;
270 /* UML doesn't support lcall7 and lcall27.
271 * So, we don't really have a default ldt, but emulate
272 * an empty ldt of common host default ldt size.
273 */
274 if(clear_user(ptr, bytecount))
275 err = -EFAULT;
276
277 return err;
278}
279
280static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
281{
282 uml_ldt_t * ldt = &current->mm->context.skas.ldt;
283 struct mm_id * mm_idp = &current->mm->context.skas.id;
284 int i, err;
285 struct user_desc ldt_info;
286 struct ldt_entry entry0, *ldt_p;
287 void *addr = NULL;
288
289 err = -EINVAL;
290 if(bytecount != sizeof(ldt_info))
291 goto out;
292 err = -EFAULT;
293 if(copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
294 goto out;
295
296 err = -EINVAL;
297 if(ldt_info.entry_number >= LDT_ENTRIES)
298 goto out;
299 if(ldt_info.contents == 3){
300 if (func == 1)
301 goto out;
302 if (ldt_info.seg_not_present == 0)
303 goto out;
304 }
305
306 if(!ptrace_ldt)
307 down(&ldt->semaphore);
308
309 err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
310 if(err)
311 goto out_unlock;
312 else if(ptrace_ldt) {
313 /* With PTRACE_LDT available, this is used as a flag only */
314 ldt->entry_count = 1;
315 goto out;
316 }
317
318 if(ldt_info.entry_number >= ldt->entry_count &&
319 ldt_info.entry_number >= LDT_DIRECT_ENTRIES){
320 for(i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
321 i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
322 i++){
323 if(i == 0)
324 memcpy(&entry0, ldt->entries, sizeof(entry0));
325 ldt->pages[i] = (struct ldt_entry *)
326 __get_free_page(GFP_KERNEL|__GFP_ZERO);
327 if(!ldt->pages[i]){
328 err = -ENOMEM;
329 /* Undo the change in host */
330 memset(&ldt_info, 0, sizeof(ldt_info));
331 write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
332 goto out_unlock;
333 }
334 if(i == 0) {
335 memcpy(ldt->pages[0], &entry0, sizeof(entry0));
336 memcpy(ldt->pages[0]+1, ldt->entries+1,
337 sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
338 }
339 ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
340 }
341 }
342 if(ldt->entry_count <= ldt_info.entry_number)
343 ldt->entry_count = ldt_info.entry_number + 1;
344
345 if(ldt->entry_count <= LDT_DIRECT_ENTRIES)
346 ldt_p = ldt->entries + ldt_info.entry_number;
347 else
348 ldt_p = ldt->pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
349 ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
350
351 if(ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
352 (func == 1 || LDT_empty(&ldt_info))){
353 ldt_p->a = 0;
354 ldt_p->b = 0;
355 }
356 else{
357 if (func == 1)
358 ldt_info.useable = 0;
359 ldt_p->a = LDT_entry_a(&ldt_info);
360 ldt_p->b = LDT_entry_b(&ldt_info);
361 }
362 err = 0;
363
364out_unlock:
365 up(&ldt->semaphore);
366out:
367 return err;
368}
369
370static long do_modify_ldt_skas(int func, void __user *ptr,
371 unsigned long bytecount)
372{
373 int ret = -ENOSYS;
374
375 switch (func) {
376 case 0:
377 ret = read_ldt(ptr, bytecount);
378 break;
379 case 1:
380 case 0x11:
381 ret = write_ldt(ptr, bytecount, func);
382 break;
383 case 2:
384 ret = read_default_ldt(ptr, bytecount);
385 break;
386 }
387 return ret;
388}
389
390short dummy_list[9] = {0, -1};
391short * host_ldt_entries = NULL;
392
393void ldt_get_host_info(void)
394{
395 long ret;
396 struct ldt_entry * ldt;
397 int i, size, k, order;
398
399 host_ldt_entries = dummy_list+1;
400
401 for(i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++);
402
403 ldt = (struct ldt_entry *)
404 __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
405 if(ldt == NULL) {
406 printk("ldt_get_host_info: couldn't allocate buffer for host ldt\n");
407 return;
408 }
409
410 ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
411 if(ret < 0) {
412 printk("ldt_get_host_info: couldn't read host ldt\n");
413 goto out_free;
414 }
415 if(ret == 0) {
416 /* default_ldt is active, simply write an empty entry 0 */
417 host_ldt_entries = dummy_list;
418 goto out_free;
419 }
420
421 for(i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++){
422 if(ldt[i].a != 0 || ldt[i].b != 0)
423 size++;
424 }
425
426 if(size < sizeof(dummy_list)/sizeof(dummy_list[0])) {
427 host_ldt_entries = dummy_list;
428 }
429 else {
430 size = (size + 1) * sizeof(dummy_list[0]);
431 host_ldt_entries = (short *)kmalloc(size, GFP_KERNEL);
432 if(host_ldt_entries == NULL) {
433 printk("ldt_get_host_info: couldn't allocate host ldt list\n");
434 goto out_free;
435 }
436 }
437
438 for(i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++){
439 if(ldt[i].a != 0 || ldt[i].b != 0) {
440 host_ldt_entries[k++] = i;
441 }
442 }
443 host_ldt_entries[k] = -1;
444
445out_free:
446 free_pages((unsigned long)ldt, order);
447}
448
449long init_new_ldt(struct mmu_context_skas * new_mm,
450 struct mmu_context_skas * from_mm)
451{
452 struct user_desc desc;
453 short * num_p;
454 int i;
455 long page, err=0;
456 void *addr = NULL;
457
458 memset(&desc, 0, sizeof(desc));
459
460 if(!ptrace_ldt)
461 init_MUTEX(&new_mm->ldt.semaphore);
462
463 if(!from_mm){
464 /*
465 * We have to initialize a clean ldt.
466 */
467 if(proc_mm) {
468 /*
469 * If the new mm was created using proc_mm, host's
470 * default-ldt currently is assigned, which normally
471 * contains the call-gates for lcall7 and lcall27.
472 * To remove these gates, we simply write an empty
473 * entry as number 0 to the host.
474 */
475 err = write_ldt_entry(&new_mm->id, 1, &desc,
476 &addr, 1);
477 }
478 else{
479 /*
480 * Now we try to retrieve info about the ldt, we
481 * inherited from the host. All ldt-entries found
482 * will be reset in the following loop
483 */
484 if(host_ldt_entries == NULL)
485 ldt_get_host_info();
486 for(num_p=host_ldt_entries; *num_p != -1; num_p++){
487 desc.entry_number = *num_p;
488 err = write_ldt_entry(&new_mm->id, 1, &desc,
489 &addr, *(num_p + 1) == -1);
490 if(err)
491 break;
492 }
493 }
494 new_mm->ldt.entry_count = 0;
495 }
496 else if (!ptrace_ldt) {
497 /* Our local LDT is used to supply the data for
498 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
499 * i.e., we have to use the stub for modify_ldt, which
500 * can't handle the big read buffer of up to 64kB.
501 */
502 down(&from_mm->ldt.semaphore);
503 if(from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES){
504 memcpy(new_mm->ldt.entries, from_mm->ldt.entries,
505 sizeof(new_mm->ldt.entries));
506 }
507 else{
508 i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
509 while(i-->0){
510 page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
511 if (!page){
512 err = -ENOMEM;
513 break;
514 }
515 new_mm->ldt.pages[i] = (struct ldt_entry*)page;
516 memcpy(new_mm->ldt.pages[i],
517 from_mm->ldt.pages[i], PAGE_SIZE);
518 }
519 }
520 new_mm->ldt.entry_count = from_mm->ldt.entry_count;
521 up(&from_mm->ldt.semaphore);
522 }
523
524 return err;
525}
526
527
528void free_ldt(struct mmu_context_skas * mm)
529{
530 int i;
531
532 if(!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES){
533 i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
534 while(i-- > 0){
535 free_page((long )mm->ldt.pages[i]);
536 }
537 }
538 mm->ldt.entry_count = 0;
539}
540#endif
541
542int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
543{
544 return(CHOOSE_MODE_PROC(do_modify_ldt_tt, do_modify_ldt_skas, func,
545 ptr, bytecount));
546}
diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
index 06c3633457a2..ea977df395a1 100644
--- a/arch/um/sys-x86_64/Makefile
+++ b/arch/um/sys-x86_64/Makefile
@@ -5,7 +5,7 @@
5# 5#
6 6
7#XXX: why into lib-y? 7#XXX: why into lib-y?
8lib-y = bitops.o bugs.o csum-partial.o delay.o fault.o mem.o memcpy.o \ 8lib-y = bitops.o bugs.o csum-partial.o delay.o fault.o ldt.o mem.o memcpy.o \
9 ptrace.o ptrace_user.o sigcontext.o signal.o stub.o \ 9 ptrace.o ptrace_user.o sigcontext.o signal.o stub.o \
10 stub_segv.o syscalls.o syscall_table.o sysrq.o thunk.o 10 stub_segv.o syscalls.o syscall_table.o sysrq.o thunk.o
11 11
@@ -14,7 +14,7 @@ obj-$(CONFIG_MODULES) += module.o um_module.o
14 14
15USER_OBJS := ptrace_user.o sigcontext.o 15USER_OBJS := ptrace_user.o sigcontext.o
16 16
17SYMLINKS = bitops.c csum-copy.S csum-partial.c csum-wrappers.c memcpy.S \ 17SYMLINKS = bitops.c csum-copy.S csum-partial.c csum-wrappers.c ldt.c memcpy.S \
18 thunk.S module.c 18 thunk.S module.c
19 19
20include arch/um/scripts/Makefile.rules 20include arch/um/scripts/Makefile.rules
@@ -23,6 +23,7 @@ bitops.c-dir = lib
23csum-copy.S-dir = lib 23csum-copy.S-dir = lib
24csum-partial.c-dir = lib 24csum-partial.c-dir = lib
25csum-wrappers.c-dir = lib 25csum-wrappers.c-dir = lib
26ldt.c-dir = /arch/um/sys-i386
26memcpy.S-dir = lib 27memcpy.S-dir = lib
27thunk.S-dir = lib 28thunk.S-dir = lib
28module.c-dir = kernel 29module.c-dir = kernel
diff --git a/arch/um/sys-x86_64/syscalls.c b/arch/um/sys-x86_64/syscalls.c
index 3259a4db4534..6acee5c4ada6 100644
--- a/arch/um/sys-x86_64/syscalls.c
+++ b/arch/um/sys-x86_64/syscalls.c
@@ -29,81 +29,6 @@ asmlinkage long sys_uname64(struct new_utsname __user * name)
29} 29}
30 30
31#ifdef CONFIG_MODE_TT 31#ifdef CONFIG_MODE_TT
32extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
33
34long sys_modify_ldt_tt(int func, void *ptr, unsigned long bytecount)
35{
36 /* XXX This should check VERIFY_WRITE depending on func, check this
37 * in i386 as well.
38 */
39 if (!access_ok(VERIFY_READ, ptr, bytecount))
40 return -EFAULT;
41 return(modify_ldt(func, ptr, bytecount));
42}
43#endif
44
45#ifdef CONFIG_MODE_SKAS
46extern int userspace_pid[];
47
48#include "skas_ptrace.h"
49
50long sys_modify_ldt_skas(int func, void *ptr, unsigned long bytecount)
51{
52 struct ptrace_ldt ldt;
53 void *buf;
54 int res, n;
55
56 buf = kmalloc(bytecount, GFP_KERNEL);
57 if(buf == NULL)
58 return(-ENOMEM);
59
60 res = 0;
61
62 switch(func){
63 case 1:
64 case 0x11:
65 res = copy_from_user(buf, ptr, bytecount);
66 break;
67 }
68
69 if(res != 0){
70 res = -EFAULT;
71 goto out;
72 }
73
74 ldt = ((struct ptrace_ldt) { .func = func,
75 .ptr = buf,
76 .bytecount = bytecount });
77#warning Need to look up userspace_pid by cpu
78 res = ptrace(PTRACE_LDT, userspace_pid[0], 0, (unsigned long) &ldt);
79 if(res < 0)
80 goto out;
81
82 switch(func){
83 case 0:
84 case 2:
85 n = res;
86 res = copy_to_user(ptr, buf, n);
87 if(res != 0)
88 res = -EFAULT;
89 else
90 res = n;
91 break;
92 }
93
94 out:
95 kfree(buf);
96 return(res);
97}
98#endif
99
100long sys_modify_ldt(int func, void *ptr, unsigned long bytecount)
101{
102 return(CHOOSE_MODE_PROC(sys_modify_ldt_tt, sys_modify_ldt_skas, func,
103 ptr, bytecount));
104}
105
106#ifdef CONFIG_MODE_TT
107extern long arch_prctl(int code, unsigned long addr); 32extern long arch_prctl(int code, unsigned long addr);
108 33
109static long arch_prctl_tt(int code, unsigned long addr) 34static long arch_prctl_tt(int code, unsigned long addr)
diff --git a/include/asm-um/ldt-i386.h b/include/asm-um/ldt-i386.h
new file mode 100644
index 000000000000..b42662929b6c
--- /dev/null
+++ b/include/asm-um/ldt-i386.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
3 * Licensed under the GPL
4 *
5 * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
6 */
7
8#ifndef __ASM_LDT_I386_H
9#define __ASM_LDT_I386_H
10
11#include "asm/semaphore.h"
12#include "asm/arch/ldt.h"
13
14struct mmu_context_skas;
15extern void ldt_host_info(void);
16extern long init_new_ldt(struct mmu_context_skas * to_mm,
17 struct mmu_context_skas * from_mm);
18extern void free_ldt(struct mmu_context_skas * mm);
19
20#define LDT_PAGES_MAX \
21 ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE)
22#define LDT_ENTRIES_PER_PAGE \
23 (PAGE_SIZE/LDT_ENTRY_SIZE)
24#define LDT_DIRECT_ENTRIES \
25 ((LDT_PAGES_MAX*sizeof(void *))/LDT_ENTRY_SIZE)
26
27struct ldt_entry {
28 __u32 a;
29 __u32 b;
30};
31
32typedef struct uml_ldt {
33 int entry_count;
34 struct semaphore semaphore;
35 union {
36 struct ldt_entry * pages[LDT_PAGES_MAX];
37 struct ldt_entry entries[LDT_DIRECT_ENTRIES];
38 };
39} uml_ldt_t;
40
41/*
42 * macros stolen from include/asm-i386/desc.h
43 */
44#define LDT_entry_a(info) \
45 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
46
47#define LDT_entry_b(info) \
48 (((info)->base_addr & 0xff000000) | \
49 (((info)->base_addr & 0x00ff0000) >> 16) | \
50 ((info)->limit & 0xf0000) | \
51 (((info)->read_exec_only ^ 1) << 9) | \
52 ((info)->contents << 10) | \
53 (((info)->seg_not_present ^ 1) << 15) | \
54 ((info)->seg_32bit << 22) | \
55 ((info)->limit_in_pages << 23) | \
56 ((info)->useable << 20) | \
57 0x7000)
58
59#define LDT_empty(info) (\
60 (info)->base_addr == 0 && \
61 (info)->limit == 0 && \
62 (info)->contents == 0 && \
63 (info)->read_exec_only == 1 && \
64 (info)->seg_32bit == 0 && \
65 (info)->limit_in_pages == 0 && \
66 (info)->seg_not_present == 1 && \
67 (info)->useable == 0 )
68
69#endif
diff --git a/include/asm-um/ldt.h b/include/asm-um/ldt.h
index e908439d338a..4466ff6de0fd 100644
--- a/include/asm-um/ldt.h
+++ b/include/asm-um/ldt.h
@@ -1,3 +1,72 @@
1/*
2 * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
3 * Licensed under the GPL
4 *
5 * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
6 */
7
8#ifndef __ASM_LDT_I386_H
9#define __ASM_LDT_I386_H
10
11#include "asm/semaphore.h"
12#include "asm/arch/ldt.h"
13
14struct mmu_context_skas;
15extern void ldt_host_info(void);
16extern long init_new_ldt(struct mmu_context_skas * to_mm,
17 struct mmu_context_skas * from_mm);
18extern void free_ldt(struct mmu_context_skas * mm);
19
20#define LDT_PAGES_MAX \
21 ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE)
22#define LDT_ENTRIES_PER_PAGE \
23 (PAGE_SIZE/LDT_ENTRY_SIZE)
24#define LDT_DIRECT_ENTRIES \
25 ((LDT_PAGES_MAX*sizeof(void *))/LDT_ENTRY_SIZE)
26
27struct ldt_entry {
28 __u32 a;
29 __u32 b;
30};
31
32typedef struct uml_ldt {
33 int entry_count;
34 struct semaphore semaphore;
35 union {
36 struct ldt_entry * pages[LDT_PAGES_MAX];
37 struct ldt_entry entries[LDT_DIRECT_ENTRIES];
38 };
39} uml_ldt_t;
40
41/*
42 * macros stolen from include/asm-i386/desc.h
43 */
44#define LDT_entry_a(info) \
45 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
46
47#define LDT_entry_b(info) \
48 (((info)->base_addr & 0xff000000) | \
49 (((info)->base_addr & 0x00ff0000) >> 16) | \
50 ((info)->limit & 0xf0000) | \
51 (((info)->read_exec_only ^ 1) << 9) | \
52 ((info)->contents << 10) | \
53 (((info)->seg_not_present ^ 1) << 15) | \
54 ((info)->seg_32bit << 22) | \
55 ((info)->limit_in_pages << 23) | \
56 ((info)->useable << 20) | \
57 0x7000)
58
59#define LDT_empty(info) (\
60 (info)->base_addr == 0 && \
61 (info)->limit == 0 && \
62 (info)->contents == 0 && \
63 (info)->read_exec_only == 1 && \
64 (info)->seg_32bit == 0 && \
65 (info)->limit_in_pages == 0 && \
66 (info)->seg_not_present == 1 && \
67 (info)->useable == 0 )
68
69#endif
1#ifndef __UM_LDT_H 70#ifndef __UM_LDT_H
2#define __UM_LDT_H 71#define __UM_LDT_H
3 72