aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um
diff options
context:
space:
mode:
authorBodo Stroesser <bstroesser@fujitsu-siemens.com>2005-09-03 18:57:49 -0400
committerLinus Torvalds <torvalds@evo.osdl.org>2005-09-05 03:06:24 -0400
commit8b51304ed3184826fb262c1e9d3e58b0b00fd083 (patch)
tree2fd338bf425794146ba4d8b1a2fb3a81fb8c3fa4 /arch/um
parent60d339f6fe0831060600c62418b71a62ad26c281 (diff)
[PATCH] uml: increase granularity of host capability checking
This change enables SKAS0/SKAS3 to work with all combinations of /proc/mm and PTRACE_FAULTINFO being available or not. Also it changes the initialization of proc_mm and ptrace_faultinfo slightly, to ease forcing SKAS0 on a patched host. Forcing UML to run without /proc/mm or PTRACE_FAULTINFO by cmdline parameter can be implemented with a setup resetting the related variable. Signed-off-by: Bodo Stroesser <bstroesser@fujitsu-siemens.com> Signed-off-by: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/um')
-rw-r--r--arch/um/kernel/skas/include/skas.h2
-rw-r--r--arch/um/kernel/skas/mmu.c52
-rw-r--r--arch/um/kernel/skas/process.c69
-rw-r--r--arch/um/kernel/skas/process_kern.c7
4 files changed, 96 insertions, 34 deletions
diff --git a/arch/um/kernel/skas/include/skas.h b/arch/um/kernel/skas/include/skas.h
index e91064b7e5a0..03c9d99e15e4 100644
--- a/arch/um/kernel/skas/include/skas.h
+++ b/arch/um/kernel/skas/include/skas.h
@@ -33,7 +33,7 @@ extern void *protect(struct mm_id * mm_idp, unsigned long addr,
33 unsigned long len, int r, int w, int x, int done, 33 unsigned long len, int r, int w, int x, int done,
34 void *data); 34 void *data);
35extern void user_signal(int sig, union uml_pt_regs *regs, int pid); 35extern void user_signal(int sig, union uml_pt_regs *regs, int pid);
36extern int new_mm(int from); 36extern int new_mm(int from, unsigned long stack);
37extern int start_userspace(unsigned long stub_stack); 37extern int start_userspace(unsigned long stub_stack);
38extern int copy_context_skas0(unsigned long stack, int pid); 38extern int copy_context_skas0(unsigned long stack, int pid);
39extern void get_skas_faultinfo(int pid, struct faultinfo * fi); 39extern void get_skas_faultinfo(int pid, struct faultinfo * fi);
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index d232daa42c31..d837223e22af 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -77,23 +77,14 @@ int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
77 struct mm_struct *cur_mm = current->mm; 77 struct mm_struct *cur_mm = current->mm;
78 struct mm_id *cur_mm_id = &cur_mm->context.skas.id; 78 struct mm_id *cur_mm_id = &cur_mm->context.skas.id;
79 struct mm_id *mm_id = &mm->context.skas.id; 79 struct mm_id *mm_id = &mm->context.skas.id;
80 unsigned long stack; 80 unsigned long stack = 0;
81 int from, ret; 81 int from, ret = -ENOMEM;
82 82
83 if(proc_mm){ 83 if(!proc_mm || !ptrace_faultinfo){
84 if((cur_mm != NULL) && (cur_mm != &init_mm)) 84 stack = get_zeroed_page(GFP_KERNEL);
85 from = cur_mm->context.skas.id.u.mm_fd; 85 if(stack == 0)
86 else from = -1; 86 goto out;
87 87
88 ret = new_mm(from);
89 if(ret < 0){
90 printk("init_new_context_skas - new_mm failed, "
91 "errno = %d\n", ret);
92 return ret;
93 }
94 mm_id->u.mm_fd = ret;
95 }
96 else {
97 /* This zeros the entry that pgd_alloc didn't, needed since 88 /* This zeros the entry that pgd_alloc didn't, needed since
98 * we are about to reinitialize it, and want mm.nr_ptes to 89 * we are about to reinitialize it, and want mm.nr_ptes to
99 * be accurate. 90 * be accurate.
@@ -103,20 +94,30 @@ int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
103 ret = init_stub_pte(mm, CONFIG_STUB_CODE, 94 ret = init_stub_pte(mm, CONFIG_STUB_CODE,
104 (unsigned long) &__syscall_stub_start); 95 (unsigned long) &__syscall_stub_start);
105 if(ret) 96 if(ret)
106 goto out; 97 goto out_free;
107
108 ret = -ENOMEM;
109 stack = get_zeroed_page(GFP_KERNEL);
110 if(stack == 0)
111 goto out;
112 mm_id->stack = stack;
113 98
114 ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack); 99 ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack);
115 if(ret) 100 if(ret)
116 goto out_free; 101 goto out_free;
117 102
118 mm->nr_ptes--; 103 mm->nr_ptes--;
104 }
105 mm_id->stack = stack;
119 106
107 if(proc_mm){
108 if((cur_mm != NULL) && (cur_mm != &init_mm))
109 from = cur_mm_id->u.mm_fd;
110 else from = -1;
111
112 ret = new_mm(from, stack);
113 if(ret < 0){
114 printk("init_new_context_skas - new_mm failed, "
115 "errno = %d\n", ret);
116 goto out_free;
117 }
118 mm_id->u.mm_fd = ret;
119 }
120 else {
120 if((cur_mm != NULL) && (cur_mm != &init_mm)) 121 if((cur_mm != NULL) && (cur_mm != &init_mm))
121 mm_id->u.pid = copy_context_skas0(stack, 122 mm_id->u.pid = copy_context_skas0(stack,
122 cur_mm_id->u.pid); 123 cur_mm_id->u.pid);
@@ -126,7 +127,8 @@ int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
126 return 0; 127 return 0;
127 128
128 out_free: 129 out_free:
129 free_page(mm_id->stack); 130 if(mm_id->stack != 0)
131 free_page(mm_id->stack);
130 out: 132 out:
131 return ret; 133 return ret;
132} 134}
@@ -137,8 +139,10 @@ void destroy_context_skas(struct mm_struct *mm)
137 139
138 if(proc_mm) 140 if(proc_mm)
139 os_close_file(mmu->id.u.mm_fd); 141 os_close_file(mmu->id.u.mm_fd);
140 else { 142 else
141 os_kill_ptraced_process(mmu->id.u.pid, 1); 143 os_kill_ptraced_process(mmu->id.u.pid, 1);
144
145 if(!proc_mm || !ptrace_faultinfo){
142 free_page(mmu->id.stack); 146 free_page(mmu->id.stack);
143 free_page(mmu->last_page_table); 147 free_page(mmu->last_page_table);
144 } 148 }
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index f228f8b54194..5cd0e9929789 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -138,6 +138,8 @@ static void handle_trap(int pid, union uml_pt_regs *regs, int local_using_sysemu
138} 138}
139 139
140extern int __syscall_stub_start; 140extern int __syscall_stub_start;
141int stub_code_fd = -1;
142__u64 stub_code_offset;
141 143
142static int userspace_tramp(void *stack) 144static int userspace_tramp(void *stack)
143{ 145{
@@ -152,31 +154,31 @@ static int userspace_tramp(void *stack)
152 /* This has a pte, but it can't be mapped in with the usual 154 /* This has a pte, but it can't be mapped in with the usual
153 * tlb_flush mechanism because this is part of that mechanism 155 * tlb_flush mechanism because this is part of that mechanism
154 */ 156 */
155 int fd;
156 __u64 offset;
157
158 fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
159 addr = mmap64((void *) UML_CONFIG_STUB_CODE, page_size(), 157 addr = mmap64((void *) UML_CONFIG_STUB_CODE, page_size(),
160 PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset); 158 PROT_EXEC, MAP_FIXED | MAP_PRIVATE,
159 stub_code_fd, stub_code_offset);
161 if(addr == MAP_FAILED){ 160 if(addr == MAP_FAILED){
162 printk("mapping mmap stub failed, errno = %d\n", 161 printk("mapping stub code failed, errno = %d\n",
163 errno); 162 errno);
164 exit(1); 163 exit(1);
165 } 164 }
166 165
167 if(stack != NULL){ 166 if(stack != NULL){
167 int fd;
168 __u64 offset;
169
168 fd = phys_mapping(to_phys(stack), &offset); 170 fd = phys_mapping(to_phys(stack), &offset);
169 addr = mmap((void *) UML_CONFIG_STUB_DATA, page_size(), 171 addr = mmap((void *) UML_CONFIG_STUB_DATA, page_size(),
170 PROT_READ | PROT_WRITE, 172 PROT_READ | PROT_WRITE,
171 MAP_FIXED | MAP_SHARED, fd, offset); 173 MAP_FIXED | MAP_SHARED, fd, offset);
172 if(addr == MAP_FAILED){ 174 if(addr == MAP_FAILED){
173 printk("mapping segfault stack failed, " 175 printk("mapping stub stack failed, "
174 "errno = %d\n", errno); 176 "errno = %d\n", errno);
175 exit(1); 177 exit(1);
176 } 178 }
177 } 179 }
178 } 180 }
179 if(!ptrace_faultinfo && (stack != NULL)){ 181 if(!ptrace_faultinfo){
180 unsigned long v = UML_CONFIG_STUB_CODE + 182 unsigned long v = UML_CONFIG_STUB_CODE +
181 (unsigned long) stub_segv_handler - 183 (unsigned long) stub_segv_handler -
182 (unsigned long) &__syscall_stub_start; 184 (unsigned long) &__syscall_stub_start;
@@ -202,6 +204,10 @@ int start_userspace(unsigned long stub_stack)
202 unsigned long sp; 204 unsigned long sp;
203 int pid, status, n, flags; 205 int pid, status, n, flags;
204 206
207 if ( stub_code_fd == -1 )
208 stub_code_fd = phys_mapping(to_phys(&__syscall_stub_start),
209 &stub_code_offset);
210
205 stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, 211 stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
206 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 212 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
207 if(stack == MAP_FAILED) 213 if(stack == MAP_FAILED)
@@ -363,6 +369,53 @@ int copy_context_skas0(unsigned long new_stack, int pid)
363 return pid; 369 return pid;
364} 370}
365 371
372/*
373 * This is used only, if proc_mm is available, while PTRACE_FAULTINFO
374 * isn't. Opening /proc/mm creates a new mm_context, which lacks the stub-pages
375 * Thus, we map them using /proc/mm-fd
376 */
377void map_stub_pages(int fd, unsigned long code,
378 unsigned long data, unsigned long stack)
379{
380 struct proc_mm_op mmop;
381 int n;
382
383 mmop = ((struct proc_mm_op) { .op = MM_MMAP,
384 .u =
385 { .mmap =
386 { .addr = code,
387 .len = PAGE_SIZE,
388 .prot = PROT_EXEC,
389 .flags = MAP_FIXED | MAP_PRIVATE,
390 .fd = stub_code_fd,
391 .offset = stub_code_offset
392 } } });
393 n = os_write_file(fd, &mmop, sizeof(mmop));
394 if(n != sizeof(mmop))
395 panic("map_stub_pages : /proc/mm map for code failed, "
396 "err = %d\n", -n);
397
398 if ( stack ) {
399 __u64 map_offset;
400 int map_fd = phys_mapping(to_phys((void *)stack), &map_offset);
401 mmop = ((struct proc_mm_op)
402 { .op = MM_MMAP,
403 .u =
404 { .mmap =
405 { .addr = data,
406 .len = PAGE_SIZE,
407 .prot = PROT_READ | PROT_WRITE,
408 .flags = MAP_FIXED | MAP_SHARED,
409 .fd = map_fd,
410 .offset = map_offset
411 } } });
412 n = os_write_file(fd, &mmop, sizeof(mmop));
413 if(n != sizeof(mmop))
414 panic("map_stub_pages : /proc/mm map for data failed, "
415 "err = %d\n", -n);
416 }
417}
418
366void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr, 419void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr,
367 void (*handler)(int)) 420 void (*handler)(int))
368{ 421{
diff --git a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c
index cbabab104ac3..3d1b227226e6 100644
--- a/arch/um/kernel/skas/process_kern.c
+++ b/arch/um/kernel/skas/process_kern.c
@@ -129,7 +129,9 @@ int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
129 return(0); 129 return(0);
130} 130}
131 131
132int new_mm(int from) 132extern void map_stub_pages(int fd, unsigned long code,
133 unsigned long data, unsigned long stack);
134int new_mm(int from, unsigned long stack)
133{ 135{
134 struct proc_mm_op copy; 136 struct proc_mm_op copy;
135 int n, fd; 137 int n, fd;
@@ -148,6 +150,9 @@ int new_mm(int from)
148 "err = %d\n", -n); 150 "err = %d\n", -n);
149 } 151 }
150 152
153 if(!ptrace_faultinfo)
154 map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack);
155
151 return(fd); 156 return(fd);
152} 157}
153 158