aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um')
-rw-r--r--arch/um/include/os.h6
-rw-r--r--arch/um/include/tlb.h8
-rw-r--r--arch/um/kernel/physmem.c228
-rw-r--r--arch/um/kernel/skas/tlb.c21
-rw-r--r--arch/um/kernel/tlb.c42
-rw-r--r--arch/um/os-Linux/skas/mem.c51
-rw-r--r--arch/um/os-Linux/skas/process.c122
-rw-r--r--arch/um/os-Linux/skas/trap.c17
-rw-r--r--arch/um/sys-i386/user-offsets.c9
-rw-r--r--arch/um/sys-x86_64/user-offsets.c5
10 files changed, 145 insertions, 364 deletions
diff --git a/arch/um/include/os.h b/arch/um/include/os.h
index e11bdcd8afc2..688d181b5f8a 100644
--- a/arch/um/include/os.h
+++ b/arch/um/include/os.h
@@ -300,13 +300,12 @@ extern long syscall_stub_data(struct mm_id * mm_idp,
300 unsigned long *data, int data_count, 300 unsigned long *data, int data_count,
301 void **addr, void **stub_addr); 301 void **addr, void **stub_addr);
302extern int map(struct mm_id * mm_idp, unsigned long virt, 302extern int map(struct mm_id * mm_idp, unsigned long virt,
303 unsigned long len, int r, int w, int x, int phys_fd, 303 unsigned long len, int prot, int phys_fd,
304 unsigned long long offset, int done, void **data); 304 unsigned long long offset, int done, void **data);
305extern int unmap(struct mm_id * mm_idp, unsigned long addr, unsigned long len, 305extern int unmap(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
306 int done, void **data); 306 int done, void **data);
307extern int protect(struct mm_id * mm_idp, unsigned long addr, 307extern int protect(struct mm_id * mm_idp, unsigned long addr,
308 unsigned long len, int r, int w, int x, int done, 308 unsigned long len, unsigned int prot, int done, void **data);
309 void **data);
310 309
311/* skas/process.c */ 310/* skas/process.c */
312extern int is_skas_winch(int pid, int fd, void *data); 311extern int is_skas_winch(int pid, int fd, void *data);
@@ -342,7 +341,6 @@ extern void maybe_sigio_broken(int fd, int read);
342 341
343/* skas/trap */ 342/* skas/trap */
344extern void sig_handler_common_skas(int sig, void *sc_ptr); 343extern void sig_handler_common_skas(int sig, void *sc_ptr);
345extern void user_signal(int sig, union uml_pt_regs *regs, int pid);
346 344
347/* sys-x86_64/prctl.c */ 345/* sys-x86_64/prctl.c */
348extern int os_arch_prctl(int pid, int code, unsigned long *addr); 346extern int os_arch_prctl(int pid, int code, unsigned long *addr);
diff --git a/arch/um/include/tlb.h b/arch/um/include/tlb.h
index 8efc1e0f1b84..bcd1a4afb842 100644
--- a/arch/um/include/tlb.h
+++ b/arch/um/include/tlb.h
@@ -14,9 +14,7 @@ struct host_vm_op {
14 struct { 14 struct {
15 unsigned long addr; 15 unsigned long addr;
16 unsigned long len; 16 unsigned long len;
17 unsigned int r:1; 17 unsigned int prot;
18 unsigned int w:1;
19 unsigned int x:1;
20 int fd; 18 int fd;
21 __u64 offset; 19 __u64 offset;
22 } mmap; 20 } mmap;
@@ -27,9 +25,7 @@ struct host_vm_op {
27 struct { 25 struct {
28 unsigned long addr; 26 unsigned long addr;
29 unsigned long len; 27 unsigned long len;
30 unsigned int r:1; 28 unsigned int prot;
31 unsigned int w:1;
32 unsigned int x:1;
33 } mprotect; 29 } mprotect;
34 } u; 30 } u;
35}; 31};
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index df1ad3ba130c..3ba6e4c841da 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -21,229 +21,8 @@
21#include "kern.h" 21#include "kern.h"
22#include "init.h" 22#include "init.h"
23 23
24struct phys_desc {
25 struct rb_node rb;
26 int fd;
27 __u64 offset;
28 void *virt;
29 unsigned long phys;
30 struct list_head list;
31};
32
33static struct rb_root phys_mappings = RB_ROOT;
34
35static struct rb_node **find_rb(void *virt)
36{
37 struct rb_node **n = &phys_mappings.rb_node;
38 struct phys_desc *d;
39
40 while(*n != NULL){
41 d = rb_entry(*n, struct phys_desc, rb);
42 if(d->virt == virt)
43 return n;
44
45 if(d->virt > virt)
46 n = &(*n)->rb_left;
47 else
48 n = &(*n)->rb_right;
49 }
50
51 return n;
52}
53
54static struct phys_desc *find_phys_mapping(void *virt)
55{
56 struct rb_node **n = find_rb(virt);
57
58 if(*n == NULL)
59 return NULL;
60
61 return rb_entry(*n, struct phys_desc, rb);
62}
63
64static void insert_phys_mapping(struct phys_desc *desc)
65{
66 struct rb_node **n = find_rb(desc->virt);
67
68 if(*n != NULL)
69 panic("Physical remapping for %p already present",
70 desc->virt);
71
72 rb_link_node(&desc->rb, rb_parent(*n), n);
73 rb_insert_color(&desc->rb, &phys_mappings);
74}
75
76LIST_HEAD(descriptor_mappings);
77
78struct desc_mapping {
79 int fd;
80 struct list_head list;
81 struct list_head pages;
82};
83
84static struct desc_mapping *find_mapping(int fd)
85{
86 struct desc_mapping *desc;
87 struct list_head *ele;
88
89 list_for_each(ele, &descriptor_mappings){
90 desc = list_entry(ele, struct desc_mapping, list);
91 if(desc->fd == fd)
92 return desc;
93 }
94
95 return NULL;
96}
97
98static struct desc_mapping *descriptor_mapping(int fd)
99{
100 struct desc_mapping *desc;
101
102 desc = find_mapping(fd);
103 if(desc != NULL)
104 return desc;
105
106 desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
107 if(desc == NULL)
108 return NULL;
109
110 *desc = ((struct desc_mapping)
111 { .fd = fd,
112 .list = LIST_HEAD_INIT(desc->list),
113 .pages = LIST_HEAD_INIT(desc->pages) });
114 list_add(&desc->list, &descriptor_mappings);
115
116 return desc;
117}
118
119int physmem_subst_mapping(void *virt, int fd, __u64 offset, int w)
120{
121 struct desc_mapping *fd_maps;
122 struct phys_desc *desc;
123 unsigned long phys;
124 int err;
125
126 fd_maps = descriptor_mapping(fd);
127 if(fd_maps == NULL)
128 return -ENOMEM;
129
130 phys = __pa(virt);
131 desc = find_phys_mapping(virt);
132 if(desc != NULL)
133 panic("Address 0x%p is already substituted\n", virt);
134
135 err = -ENOMEM;
136 desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
137 if(desc == NULL)
138 goto out;
139
140 *desc = ((struct phys_desc)
141 { .fd = fd,
142 .offset = offset,
143 .virt = virt,
144 .phys = __pa(virt),
145 .list = LIST_HEAD_INIT(desc->list) });
146 insert_phys_mapping(desc);
147
148 list_add(&desc->list, &fd_maps->pages);
149
150 virt = (void *) ((unsigned long) virt & PAGE_MASK);
151 err = os_map_memory(virt, fd, offset, PAGE_SIZE, 1, w, 0);
152 if(!err)
153 goto out;
154
155 rb_erase(&desc->rb, &phys_mappings);
156 kfree(desc);
157 out:
158 return err;
159}
160
161static int physmem_fd = -1; 24static int physmem_fd = -1;
162 25
163static void remove_mapping(struct phys_desc *desc)
164{
165 void *virt = desc->virt;
166 int err;
167
168 rb_erase(&desc->rb, &phys_mappings);
169 list_del(&desc->list);
170 kfree(desc);
171
172 err = os_map_memory(virt, physmem_fd, __pa(virt), PAGE_SIZE, 1, 1, 0);
173 if(err)
174 panic("Failed to unmap block device page from physical memory, "
175 "errno = %d", -err);
176}
177
178int physmem_remove_mapping(void *virt)
179{
180 struct phys_desc *desc;
181
182 virt = (void *) ((unsigned long) virt & PAGE_MASK);
183 desc = find_phys_mapping(virt);
184 if(desc == NULL)
185 return 0;
186
187 remove_mapping(desc);
188 return 1;
189}
190
191void physmem_forget_descriptor(int fd)
192{
193 struct desc_mapping *desc;
194 struct phys_desc *page;
195 struct list_head *ele, *next;
196 __u64 offset;
197 void *addr;
198 int err;
199
200 desc = find_mapping(fd);
201 if(desc == NULL)
202 return;
203
204 list_for_each_safe(ele, next, &desc->pages){
205 page = list_entry(ele, struct phys_desc, list);
206 offset = page->offset;
207 addr = page->virt;
208 remove_mapping(page);
209 err = os_seek_file(fd, offset);
210 if(err)
211 panic("physmem_forget_descriptor - failed to seek "
212 "to %lld in fd %d, error = %d\n",
213 offset, fd, -err);
214 err = os_read_file(fd, addr, PAGE_SIZE);
215 if(err < 0)
216 panic("physmem_forget_descriptor - failed to read "
217 "from fd %d to 0x%p, error = %d\n",
218 fd, addr, -err);
219 }
220
221 list_del(&desc->list);
222 kfree(desc);
223}
224
225EXPORT_SYMBOL(physmem_forget_descriptor);
226EXPORT_SYMBOL(physmem_remove_mapping);
227EXPORT_SYMBOL(physmem_subst_mapping);
228
229void arch_free_page(struct page *page, int order)
230{
231 void *virt;
232 int i;
233
234 for(i = 0; i < (1 << order); i++){
235 virt = __va(page_to_phys(page + i));
236 physmem_remove_mapping(virt);
237 }
238}
239
240int is_remapped(void *virt)
241{
242 struct phys_desc *desc = find_phys_mapping(virt);
243
244 return desc != NULL;
245}
246
247/* Changed during early boot */ 26/* Changed during early boot */
248unsigned long high_physmem; 27unsigned long high_physmem;
249 28
@@ -350,14 +129,9 @@ void setup_physmem(unsigned long start, unsigned long reserve_end,
350 129
351int phys_mapping(unsigned long phys, __u64 *offset_out) 130int phys_mapping(unsigned long phys, __u64 *offset_out)
352{ 131{
353 struct phys_desc *desc = find_phys_mapping(__va(phys & PAGE_MASK));
354 int fd = -1; 132 int fd = -1;
355 133
356 if(desc != NULL){ 134 if(phys < physmem_size){
357 fd = desc->fd;
358 *offset_out = desc->offset;
359 }
360 else if(phys < physmem_size){
361 fd = physmem_fd; 135 fd = physmem_fd;
362 *offset_out = phys; 136 *offset_out = phys;
363 } 137 }
diff --git a/arch/um/kernel/skas/tlb.c b/arch/um/kernel/skas/tlb.c
index c43901aa9368..b3d722ddde31 100644
--- a/arch/um/kernel/skas/tlb.c
+++ b/arch/um/kernel/skas/tlb.c
@@ -27,9 +27,9 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
27 switch(op->type){ 27 switch(op->type){
28 case MMAP: 28 case MMAP:
29 ret = map(&mmu->skas.id, op->u.mmap.addr, 29 ret = map(&mmu->skas.id, op->u.mmap.addr,
30 op->u.mmap.len, op->u.mmap.r, op->u.mmap.w, 30 op->u.mmap.len, op->u.mmap.prot,
31 op->u.mmap.x, op->u.mmap.fd, 31 op->u.mmap.fd, op->u.mmap.offset, finished,
32 op->u.mmap.offset, finished, flush); 32 flush);
33 break; 33 break;
34 case MUNMAP: 34 case MUNMAP:
35 ret = unmap(&mmu->skas.id, op->u.munmap.addr, 35 ret = unmap(&mmu->skas.id, op->u.munmap.addr,
@@ -37,8 +37,7 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
37 break; 37 break;
38 case MPROTECT: 38 case MPROTECT:
39 ret = protect(&mmu->skas.id, op->u.mprotect.addr, 39 ret = protect(&mmu->skas.id, op->u.mprotect.addr,
40 op->u.mprotect.len, op->u.mprotect.r, 40 op->u.mprotect.len, op->u.mprotect.prot,
41 op->u.mprotect.w, op->u.mprotect.x,
42 finished, flush); 41 finished, flush);
43 break; 42 break;
44 default: 43 default:
@@ -102,10 +101,10 @@ void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address)
102 pte_t *pte; 101 pte_t *pte;
103 struct mm_struct *mm = vma->vm_mm; 102 struct mm_struct *mm = vma->vm_mm;
104 void *flush = NULL; 103 void *flush = NULL;
105 int r, w, x, err = 0; 104 int r, w, x, prot, err = 0;
106 struct mm_id *mm_id; 105 struct mm_id *mm_id;
107 106
108 pgd = pgd_offset(vma->vm_mm, address); 107 pgd = pgd_offset(mm, address);
109 if(!pgd_present(*pgd)) 108 if(!pgd_present(*pgd))
110 goto kill; 109 goto kill;
111 110
@@ -130,19 +129,21 @@ void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address)
130 } 129 }
131 130
132 mm_id = &mm->context.skas.id; 131 mm_id = &mm->context.skas.id;
132 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
133 (x ? UM_PROT_EXEC : 0));
133 if(pte_newpage(*pte)){ 134 if(pte_newpage(*pte)){
134 if(pte_present(*pte)){ 135 if(pte_present(*pte)){
135 unsigned long long offset; 136 unsigned long long offset;
136 int fd; 137 int fd;
137 138
138 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset); 139 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
139 err = map(mm_id, address, PAGE_SIZE, r, w, x, fd, 140 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
140 offset, 1, &flush); 141 1, &flush);
141 } 142 }
142 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush); 143 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
143 } 144 }
144 else if(pte_newprot(*pte)) 145 else if(pte_newprot(*pte))
145 err = protect(mm_id, address, PAGE_SIZE, r, w, x, 1, &flush); 146 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
146 147
147 if(err) 148 if(err)
148 goto kill; 149 goto kill;
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 4a39d50d2d62..8a8d52851443 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -17,7 +17,7 @@
17#include "os.h" 17#include "os.h"
18 18
19static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, 19static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
20 int r, int w, int x, struct host_vm_op *ops, int *index, 20 unsigned int prot, struct host_vm_op *ops, int *index,
21 int last_filled, union mm_context *mmu, void **flush, 21 int last_filled, union mm_context *mmu, void **flush,
22 int (*do_ops)(union mm_context *, struct host_vm_op *, 22 int (*do_ops)(union mm_context *, struct host_vm_op *,
23 int, int, void **)) 23 int, int, void **))
@@ -31,8 +31,7 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
31 last = &ops[*index]; 31 last = &ops[*index];
32 if((last->type == MMAP) && 32 if((last->type == MMAP) &&
33 (last->u.mmap.addr + last->u.mmap.len == virt) && 33 (last->u.mmap.addr + last->u.mmap.len == virt) &&
34 (last->u.mmap.r == r) && (last->u.mmap.w == w) && 34 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
35 (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
36 (last->u.mmap.offset + last->u.mmap.len == offset)){ 35 (last->u.mmap.offset + last->u.mmap.len == offset)){
37 last->u.mmap.len += len; 36 last->u.mmap.len += len;
38 return 0; 37 return 0;
@@ -48,9 +47,7 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
48 .u = { .mmap = { 47 .u = { .mmap = {
49 .addr = virt, 48 .addr = virt,
50 .len = len, 49 .len = len,
51 .r = r, 50 .prot = prot,
52 .w = w,
53 .x = x,
54 .fd = fd, 51 .fd = fd,
55 .offset = offset } 52 .offset = offset }
56 } }); 53 } });
@@ -87,8 +84,8 @@ static int add_munmap(unsigned long addr, unsigned long len,
87 return ret; 84 return ret;
88} 85}
89 86
90static int add_mprotect(unsigned long addr, unsigned long len, int r, int w, 87static int add_mprotect(unsigned long addr, unsigned long len,
91 int x, struct host_vm_op *ops, int *index, 88 unsigned int prot, struct host_vm_op *ops, int *index,
92 int last_filled, union mm_context *mmu, void **flush, 89 int last_filled, union mm_context *mmu, void **flush,
93 int (*do_ops)(union mm_context *, struct host_vm_op *, 90 int (*do_ops)(union mm_context *, struct host_vm_op *,
94 int, int, void **)) 91 int, int, void **))
@@ -100,8 +97,7 @@ static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
100 last = &ops[*index]; 97 last = &ops[*index];
101 if((last->type == MPROTECT) && 98 if((last->type == MPROTECT) &&
102 (last->u.mprotect.addr + last->u.mprotect.len == addr) && 99 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
103 (last->u.mprotect.r == r) && (last->u.mprotect.w == w) && 100 (last->u.mprotect.prot == prot)){
104 (last->u.mprotect.x == x)){
105 last->u.mprotect.len += len; 101 last->u.mprotect.len += len;
106 return 0; 102 return 0;
107 } 103 }
@@ -116,9 +112,7 @@ static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
116 .u = { .mprotect = { 112 .u = { .mprotect = {
117 .addr = addr, 113 .addr = addr,
118 .len = len, 114 .len = len,
119 .r = r, 115 .prot = prot } } });
120 .w = w,
121 .x = x } } });
122 return ret; 116 return ret;
123} 117}
124 118
@@ -133,7 +127,7 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
133 void **)) 127 void **))
134{ 128{
135 pte_t *pte; 129 pte_t *pte;
136 int r, w, x, ret = 0; 130 int r, w, x, prot, ret = 0;
137 131
138 pte = pte_offset_kernel(pmd, addr); 132 pte = pte_offset_kernel(pmd, addr);
139 do { 133 do {
@@ -146,19 +140,19 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
146 } else if (!pte_dirty(*pte)) { 140 } else if (!pte_dirty(*pte)) {
147 w = 0; 141 w = 0;
148 } 142 }
143 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
144 (x ? UM_PROT_EXEC : 0));
149 if(force || pte_newpage(*pte)){ 145 if(force || pte_newpage(*pte)){
150 if(pte_present(*pte)) 146 if(pte_present(*pte))
151 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, 147 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
152 PAGE_SIZE, r, w, x, ops, 148 PAGE_SIZE, prot, ops, op_index,
153 op_index, last_op, mmu, flush, 149 last_op, mmu, flush, do_ops);
154 do_ops);
155 else ret = add_munmap(addr, PAGE_SIZE, ops, op_index, 150 else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
156 last_op, mmu, flush, do_ops); 151 last_op, mmu, flush, do_ops);
157 } 152 }
158 else if(pte_newprot(*pte)) 153 else if(pte_newprot(*pte))
159 ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops, 154 ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index,
160 op_index, last_op, mmu, flush, 155 last_op, mmu, flush, do_ops);
161 do_ops);
162 *pte = pte_mkuptodate(*pte); 156 *pte = pte_mkuptodate(*pte);
163 } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret)); 157 } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
164 return ret; 158 return ret;
@@ -377,14 +371,6 @@ pte_t *addr_pte(struct task_struct *task, unsigned long addr)
377 return(pte_offset_map(pmd, addr)); 371 return(pte_offset_map(pmd, addr));
378} 372}
379 373
380void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
381{
382 address &= PAGE_MASK;
383
384 CHOOSE_MODE(flush_tlb_range(vma, address, address + PAGE_SIZE),
385 flush_tlb_page_skas(vma, address));
386}
387
388void flush_tlb_all(void) 374void flush_tlb_all(void)
389{ 375{
390 flush_tlb_mm(current->mm); 376 flush_tlb_mm(current->mm);
diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c
index af0790719b77..8e490fff3d47 100644
--- a/arch/um/os-Linux/skas/mem.c
+++ b/arch/um/os-Linux/skas/mem.c
@@ -24,10 +24,11 @@
24#include "uml-config.h" 24#include "uml-config.h"
25#include "sysdep/ptrace.h" 25#include "sysdep/ptrace.h"
26#include "sysdep/stub.h" 26#include "sysdep/stub.h"
27#include "init.h"
27 28
28extern unsigned long batch_syscall_stub, __syscall_stub_start; 29extern unsigned long batch_syscall_stub, __syscall_stub_start;
29 30
30extern void wait_stub_done(int pid, int sig, char * fname); 31extern void wait_stub_done(int pid);
31 32
32static inline unsigned long *check_init_stack(struct mm_id * mm_idp, 33static inline unsigned long *check_init_stack(struct mm_id * mm_idp,
33 unsigned long *stack) 34 unsigned long *stack)
@@ -39,6 +40,19 @@ static inline unsigned long *check_init_stack(struct mm_id * mm_idp,
39 return stack; 40 return stack;
40} 41}
41 42
43static unsigned long syscall_regs[MAX_REG_NR];
44
45static int __init init_syscall_regs(void)
46{
47 get_safe_registers(syscall_regs, NULL);
48 syscall_regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
49 ((unsigned long) &batch_syscall_stub -
50 (unsigned long) &__syscall_stub_start);
51 return 0;
52}
53
54__initcall(init_syscall_regs);
55
42extern int proc_mm; 56extern int proc_mm;
43 57
44int single_count = 0; 58int single_count = 0;
@@ -47,12 +61,11 @@ int multi_op_count = 0;
47 61
48static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr) 62static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr)
49{ 63{
50 unsigned long regs[MAX_REG_NR];
51 int n, i; 64 int n, i;
52 long ret, offset; 65 long ret, offset;
53 unsigned long * data; 66 unsigned long * data;
54 unsigned long * syscall; 67 unsigned long * syscall;
55 int pid = mm_idp->u.pid; 68 int err, pid = mm_idp->u.pid;
56 69
57 if(proc_mm) 70 if(proc_mm)
58#warning Need to look up userspace_pid by cpu 71#warning Need to look up userspace_pid by cpu
@@ -60,21 +73,21 @@ static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr)
60 73
61 multi_count++; 74 multi_count++;
62 75
63 get_safe_registers(regs, NULL); 76 n = ptrace_setregs(pid, syscall_regs);
64 regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
65 ((unsigned long) &batch_syscall_stub -
66 (unsigned long) &__syscall_stub_start);
67
68 n = ptrace_setregs(pid, regs);
69 if(n < 0){ 77 if(n < 0){
70 printk("Registers - \n"); 78 printk("Registers - \n");
71 for(i = 0; i < MAX_REG_NR; i++) 79 for(i = 0; i < MAX_REG_NR; i++)
72 printk("\t%d\t0x%lx\n", i, regs[i]); 80 printk("\t%d\t0x%lx\n", i, syscall_regs[i]);
73 panic("do_syscall_stub : PTRACE_SETREGS failed, errno = %d\n", 81 panic("do_syscall_stub : PTRACE_SETREGS failed, errno = %d\n",
74 -n); 82 -n);
75 } 83 }
76 84
77 wait_stub_done(pid, 0, "do_syscall_stub"); 85 err = ptrace(PTRACE_CONT, pid, 0, 0);
86 if(err)
87 panic("Failed to continue stub, pid = %d, errno = %d\n", pid,
88 errno);
89
90 wait_stub_done(pid);
78 91
79 /* When the stub stops, we find the following values on the 92 /* When the stub stops, we find the following values on the
80 * beginning of the stack: 93 * beginning of the stack:
@@ -176,14 +189,10 @@ long syscall_stub_data(struct mm_id * mm_idp,
176 return 0; 189 return 0;
177} 190}
178 191
179int map(struct mm_id * mm_idp, unsigned long virt, unsigned long len, 192int map(struct mm_id * mm_idp, unsigned long virt, unsigned long len, int prot,
180 int r, int w, int x, int phys_fd, unsigned long long offset, 193 int phys_fd, unsigned long long offset, int done, void **data)
181 int done, void **data)
182{ 194{
183 int prot, ret; 195 int ret;
184
185 prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
186 (x ? PROT_EXEC : 0);
187 196
188 if(proc_mm){ 197 if(proc_mm){
189 struct proc_mm_op map; 198 struct proc_mm_op map;
@@ -253,13 +262,11 @@ int unmap(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
253} 262}
254 263
255int protect(struct mm_id * mm_idp, unsigned long addr, unsigned long len, 264int protect(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
256 int r, int w, int x, int done, void **data) 265 unsigned int prot, int done, void **data)
257{ 266{
258 struct proc_mm_op protect; 267 struct proc_mm_op protect;
259 int prot, ret; 268 int ret;
260 269
261 prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
262 (x ? PROT_EXEC : 0);
263 if(proc_mm){ 270 if(proc_mm){
264 int fd = mm_idp->u.mm_fd; 271 int fd = mm_idp->u.mm_fd;
265 272
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 1f39f2bf7ce9..5c088a55396c 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -34,6 +34,7 @@
34#include "process.h" 34#include "process.h"
35#include "longjmp.h" 35#include "longjmp.h"
36#include "kern_constants.h" 36#include "kern_constants.h"
37#include "as-layout.h"
37 38
38int is_skas_winch(int pid, int fd, void *data) 39int is_skas_winch(int pid, int fd, void *data)
39{ 40{
@@ -60,37 +61,42 @@ static int ptrace_dump_regs(int pid)
60 return 0; 61 return 0;
61} 62}
62 63
63void wait_stub_done(int pid, int sig, char * fname) 64/*
65 * Signals that are OK to receive in the stub - we'll just continue it.
66 * SIGWINCH will happen when UML is inside a detached screen.
67 */
68#define STUB_SIG_MASK ((1 << SIGVTALRM) | (1 << SIGWINCH))
69
70/* Signals that the stub will finish with - anything else is an error */
71#define STUB_DONE_MASK ((1 << SIGUSR1) | (1 << SIGTRAP))
72
73void wait_stub_done(int pid)
64{ 74{
65 int n, status, err; 75 int n, status, err;
66 76
67 do { 77 while(1){
68 if ( sig != -1 ) {
69 err = ptrace(PTRACE_CONT, pid, 0, sig);
70 if(err)
71 panic("%s : continue failed, errno = %d\n",
72 fname, errno);
73 }
74 sig = 0;
75
76 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); 78 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
77 } while((n >= 0) && WIFSTOPPED(status) && 79 if((n < 0) || !WIFSTOPPED(status))
78 ((WSTOPSIG(status) == SIGVTALRM) || 80 goto bad_wait;
79 /* running UML inside a detached screen can cause 81
80 * SIGWINCHes 82 if(((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0)
81 */ 83 break;
82 (WSTOPSIG(status) == SIGWINCH))); 84
83 85 err = ptrace(PTRACE_CONT, pid, 0, 0);
84 if((n < 0) || !WIFSTOPPED(status) ||
85 (WSTOPSIG(status) != SIGUSR1 && WSTOPSIG(status) != SIGTRAP)){
86 err = ptrace_dump_regs(pid);
87 if(err) 86 if(err)
88 printk("Failed to get registers from stub, " 87 panic("wait_stub_done : continue failed, errno = %d\n",
89 "errno = %d\n", -err); 88 errno);
90 panic("%s : failed to wait for SIGUSR1/SIGTRAP, "
91 "pid = %d, n = %d, errno = %d, status = 0x%x\n",
92 fname, pid, n, errno, status);
93 } 89 }
90
91 if(((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
92 return;
93
94bad_wait:
95 err = ptrace_dump_regs(pid);
96 if(err)
97 printk("Failed to get registers from stub, errno = %d\n", -err);
98 panic("wait_stub_done : failed to wait for SIGUSR1/SIGTRAP, pid = %d, "
99 "n = %d, errno = %d, status = 0x%x\n", pid, n, errno, status);
94} 100}
95 101
96extern unsigned long current_stub_stack(void); 102extern unsigned long current_stub_stack(void);
@@ -112,7 +118,11 @@ void get_skas_faultinfo(int pid, struct faultinfo * fi)
112 sizeof(struct ptrace_faultinfo)); 118 sizeof(struct ptrace_faultinfo));
113 } 119 }
114 else { 120 else {
115 wait_stub_done(pid, SIGSEGV, "get_skas_faultinfo"); 121 err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
122 if(err)
123 panic("Failed to continue stub, pid = %d, errno = %d\n",
124 pid, errno);
125 wait_stub_done(pid);
116 126
117 /* faultinfo is prepared by the stub-segv-handler at start of 127 /* faultinfo is prepared by the stub-segv-handler at start of
118 * the stub stack page. We just have to copy it. 128 * the stub stack page. We just have to copy it.
@@ -304,10 +314,13 @@ void userspace(union uml_pt_regs *regs)
304 UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */ 314 UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
305 315
306 if(WIFSTOPPED(status)){ 316 if(WIFSTOPPED(status)){
307 switch(WSTOPSIG(status)){ 317 int sig = WSTOPSIG(status);
318 switch(sig){
308 case SIGSEGV: 319 case SIGSEGV:
309 if(PTRACE_FULL_FAULTINFO || !ptrace_faultinfo) 320 if(PTRACE_FULL_FAULTINFO || !ptrace_faultinfo){
310 user_signal(SIGSEGV, regs, pid); 321 get_skas_faultinfo(pid, &regs->skas.faultinfo);
322 (*sig_info[SIGSEGV])(SIGSEGV, regs);
323 }
311 else handle_segv(pid, regs); 324 else handle_segv(pid, regs);
312 break; 325 break;
313 case SIGTRAP + 0x80: 326 case SIGTRAP + 0x80:
@@ -322,11 +335,13 @@ void userspace(union uml_pt_regs *regs)
322 case SIGBUS: 335 case SIGBUS:
323 case SIGFPE: 336 case SIGFPE:
324 case SIGWINCH: 337 case SIGWINCH:
325 user_signal(WSTOPSIG(status), regs, pid); 338 block_signals();
339 (*sig_info[sig])(sig, regs);
340 unblock_signals();
326 break; 341 break;
327 default: 342 default:
328 printk("userspace - child stopped with signal " 343 printk("userspace - child stopped with signal "
329 "%d\n", WSTOPSIG(status)); 344 "%d\n", sig);
330 } 345 }
331 pid = userspace_pid[0]; 346 pid = userspace_pid[0];
332 interrupt_end(); 347 interrupt_end();
@@ -338,11 +353,29 @@ void userspace(union uml_pt_regs *regs)
338 } 353 }
339} 354}
340 355
356static unsigned long thread_regs[MAX_REG_NR];
357static unsigned long thread_fp_regs[HOST_FP_SIZE];
358
359static int __init init_thread_regs(void)
360{
361 get_safe_registers(thread_regs, thread_fp_regs);
362 /* Set parent's instruction pointer to start of clone-stub */
363 thread_regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
364 (unsigned long) stub_clone_handler -
365 (unsigned long) &__syscall_stub_start;
366 thread_regs[REGS_SP_INDEX] = UML_CONFIG_STUB_DATA + PAGE_SIZE -
367 sizeof(void *);
368#ifdef __SIGNAL_FRAMESIZE
369 thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
370#endif
371 return 0;
372}
373
374__initcall(init_thread_regs);
375
341int copy_context_skas0(unsigned long new_stack, int pid) 376int copy_context_skas0(unsigned long new_stack, int pid)
342{ 377{
343 int err; 378 int err;
344 unsigned long regs[MAX_REG_NR];
345 unsigned long fp_regs[HOST_FP_SIZE];
346 unsigned long current_stack = current_stub_stack(); 379 unsigned long current_stack = current_stub_stack();
347 struct stub_data *data = (struct stub_data *) current_stack; 380 struct stub_data *data = (struct stub_data *) current_stack;
348 struct stub_data *child_data = (struct stub_data *) new_stack; 381 struct stub_data *child_data = (struct stub_data *) new_stack;
@@ -357,23 +390,12 @@ int copy_context_skas0(unsigned long new_stack, int pid)
357 .timer = ((struct itimerval) 390 .timer = ((struct itimerval)
358 { { 0, 1000000 / hz() }, 391 { { 0, 1000000 / hz() },
359 { 0, 1000000 / hz() }})}); 392 { 0, 1000000 / hz() }})});
360 get_safe_registers(regs, fp_regs); 393 err = ptrace_setregs(pid, thread_regs);
361
362 /* Set parent's instruction pointer to start of clone-stub */
363 regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
364 (unsigned long) stub_clone_handler -
365 (unsigned long) &__syscall_stub_start;
366 regs[REGS_SP_INDEX] = UML_CONFIG_STUB_DATA + PAGE_SIZE -
367 sizeof(void *);
368#ifdef __SIGNAL_FRAMESIZE
369 regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
370#endif
371 err = ptrace_setregs(pid, regs);
372 if(err < 0) 394 if(err < 0)
373 panic("copy_context_skas0 : PTRACE_SETREGS failed, " 395 panic("copy_context_skas0 : PTRACE_SETREGS failed, "
374 "pid = %d, errno = %d\n", pid, -err); 396 "pid = %d, errno = %d\n", pid, -err);
375 397
376 err = ptrace_setfpregs(pid, fp_regs); 398 err = ptrace_setfpregs(pid, thread_fp_regs);
377 if(err < 0) 399 if(err < 0)
378 panic("copy_context_skas0 : PTRACE_SETFPREGS failed, " 400 panic("copy_context_skas0 : PTRACE_SETFPREGS failed, "
379 "pid = %d, errno = %d\n", pid, -err); 401 "pid = %d, errno = %d\n", pid, -err);
@@ -384,7 +406,11 @@ int copy_context_skas0(unsigned long new_stack, int pid)
384 /* Wait, until parent has finished its work: read child's pid from 406 /* Wait, until parent has finished its work: read child's pid from
385 * parent's stack, and check, if bad result. 407 * parent's stack, and check, if bad result.
386 */ 408 */
387 wait_stub_done(pid, 0, "copy_context_skas0"); 409 err = ptrace(PTRACE_CONT, pid, 0, 0);
410 if(err)
411 panic("Failed to continue new process, pid = %d, "
412 "errno = %d\n", pid, errno);
413 wait_stub_done(pid);
388 414
389 pid = data->err; 415 pid = data->err;
390 if(pid < 0) 416 if(pid < 0)
@@ -394,7 +420,7 @@ int copy_context_skas0(unsigned long new_stack, int pid)
394 /* Wait, until child has finished too: read child's result from 420 /* Wait, until child has finished too: read child's result from
395 * child's stack and check it. 421 * child's stack and check it.
396 */ 422 */
397 wait_stub_done(pid, -1, "copy_context_skas0"); 423 wait_stub_done(pid);
398 if (child_data->err != UML_CONFIG_STUB_DATA) 424 if (child_data->err != UML_CONFIG_STUB_DATA)
399 panic("copy_context_skas0 - stub-child reports error %ld\n", 425 panic("copy_context_skas0 - stub-child reports error %ld\n",
400 child_data->err); 426 child_data->err);
diff --git a/arch/um/os-Linux/skas/trap.c b/arch/um/os-Linux/skas/trap.c
index 5110eff51b90..3b600c2e63b8 100644
--- a/arch/um/os-Linux/skas/trap.c
+++ b/arch/um/os-Linux/skas/trap.c
@@ -64,20 +64,3 @@ void sig_handler_common_skas(int sig, void *sc_ptr)
64 errno = save_errno; 64 errno = save_errno;
65 r->skas.is_user = save_user; 65 r->skas.is_user = save_user;
66} 66}
67
68extern int ptrace_faultinfo;
69
70void user_signal(int sig, union uml_pt_regs *regs, int pid)
71{
72 void (*handler)(int, union uml_pt_regs *);
73 int segv = ((sig == SIGFPE) || (sig == SIGSEGV) || (sig == SIGBUS) ||
74 (sig == SIGILL) || (sig == SIGTRAP));
75
76 if (segv)
77 get_skas_faultinfo(pid, &regs->skas.faultinfo);
78
79 handler = sig_info[sig];
80 handler(sig, (union uml_pt_regs *) regs);
81
82 unblock_signals();
83}
diff --git a/arch/um/sys-i386/user-offsets.c b/arch/um/sys-i386/user-offsets.c
index ee42c27abd3a..29118cf5ff25 100644
--- a/arch/um/sys-i386/user-offsets.c
+++ b/arch/um/sys-i386/user-offsets.c
@@ -1,9 +1,10 @@
1#include <stdio.h> 1#include <stdio.h>
2#include <stddef.h>
2#include <signal.h> 3#include <signal.h>
4#include <sys/poll.h>
5#include <sys/mman.h>
3#include <asm/ptrace.h> 6#include <asm/ptrace.h>
4#include <asm/user.h> 7#include <asm/user.h>
5#include <stddef.h>
6#include <sys/poll.h>
7 8
8#define DEFINE(sym, val) \ 9#define DEFINE(sym, val) \
9 asm volatile("\n->" #sym " %0 " #val : : "i" (val)) 10 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
@@ -72,4 +73,8 @@ void foo(void)
72 DEFINE(UM_POLLIN, POLLIN); 73 DEFINE(UM_POLLIN, POLLIN);
73 DEFINE(UM_POLLPRI, POLLPRI); 74 DEFINE(UM_POLLPRI, POLLPRI);
74 DEFINE(UM_POLLOUT, POLLOUT); 75 DEFINE(UM_POLLOUT, POLLOUT);
76
77 DEFINE(UM_PROT_READ, PROT_READ);
78 DEFINE(UM_PROT_WRITE, PROT_WRITE);
79 DEFINE(UM_PROT_EXEC, PROT_EXEC);
75} 80}
diff --git a/arch/um/sys-x86_64/user-offsets.c b/arch/um/sys-x86_64/user-offsets.c
index 7bb532567c47..0d5fd764c21f 100644
--- a/arch/um/sys-x86_64/user-offsets.c
+++ b/arch/um/sys-x86_64/user-offsets.c
@@ -2,6 +2,7 @@
2#include <stddef.h> 2#include <stddef.h>
3#include <signal.h> 3#include <signal.h>
4#include <sys/poll.h> 4#include <sys/poll.h>
5#include <sys/mman.h>
5#define __FRAME_OFFSETS 6#define __FRAME_OFFSETS
6#include <asm/ptrace.h> 7#include <asm/ptrace.h>
7#include <asm/types.h> 8#include <asm/types.h>
@@ -93,4 +94,8 @@ void foo(void)
93 DEFINE(UM_POLLIN, POLLIN); 94 DEFINE(UM_POLLIN, POLLIN);
94 DEFINE(UM_POLLPRI, POLLPRI); 95 DEFINE(UM_POLLPRI, POLLPRI);
95 DEFINE(UM_POLLOUT, POLLOUT); 96 DEFINE(UM_POLLOUT, POLLOUT);
97
98 DEFINE(UM_PROT_READ, PROT_READ);
99 DEFINE(UM_PROT_WRITE, PROT_WRITE);
100 DEFINE(UM_PROT_EXEC, PROT_EXEC);
96} 101}