aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/um/include/skas/mmu-skas.h23
-rw-r--r--arch/um/include/tlb.h2
-rw-r--r--arch/um/include/um_mmu.h18
-rw-r--r--arch/um/kernel/exec.c4
-rw-r--r--arch/um/kernel/reboot.c2
-rw-r--r--arch/um/kernel/skas/mmu.c12
-rw-r--r--arch/um/kernel/skas/process.c2
-rw-r--r--arch/um/kernel/tlb.c43
-rw-r--r--arch/um/sys-i386/ldt.c17
-rw-r--r--arch/um/sys-x86_64/syscalls.c2
-rw-r--r--include/asm-um/ldt.h4
-rw-r--r--include/asm-um/mmu_context.h4
12 files changed, 58 insertions, 75 deletions
diff --git a/arch/um/include/skas/mmu-skas.h b/arch/um/include/skas/mmu-skas.h
deleted file mode 100644
index 838dfd75e2aa..000000000000
--- a/arch/um/include/skas/mmu-skas.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SKAS_MMU_H
7#define __SKAS_MMU_H
8
9#include "mm_id.h"
10#include "asm/ldt.h"
11
12struct mmu_context_skas {
13 struct mm_id id;
14 unsigned long last_page_table;
15#ifdef CONFIG_3_LEVEL_PGTABLES
16 unsigned long last_pmd;
17#endif
18 uml_ldt_t ldt;
19};
20
21extern void __switch_mm(struct mm_id * mm_idp);
22
23#endif
diff --git a/arch/um/include/tlb.h b/arch/um/include/tlb.h
index bcd1a4afb842..46cf0057a1c5 100644
--- a/arch/um/include/tlb.h
+++ b/arch/um/include/tlb.h
@@ -33,7 +33,7 @@ struct host_vm_op {
33extern void force_flush_all(void); 33extern void force_flush_all(void);
34extern void fix_range_common(struct mm_struct *mm, unsigned long start_addr, 34extern void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
35 unsigned long end_addr, int force, 35 unsigned long end_addr, int force,
36 int (*do_ops)(union mm_context *, 36 int (*do_ops)(struct mm_context *,
37 struct host_vm_op *, int, int, 37 struct host_vm_op *, int, int,
38 void **)); 38 void **));
39extern int flush_tlb_kernel_range_common(unsigned long start, 39extern int flush_tlb_kernel_range_common(unsigned long start,
diff --git a/arch/um/include/um_mmu.h b/arch/um/include/um_mmu.h
index 668da8426ef4..8855d8df512f 100644
--- a/arch/um/include/um_mmu.h
+++ b/arch/um/include/um_mmu.h
@@ -7,10 +7,22 @@
7#define __ARCH_UM_MMU_H 7#define __ARCH_UM_MMU_H
8 8
9#include "uml-config.h" 9#include "uml-config.h"
10#include "mmu-skas.h" 10#include "mm_id.h"
11#include "asm/ldt.h"
11 12
12typedef union mm_context { 13typedef struct mm_context {
13 struct mmu_context_skas skas; 14 struct mm_id id;
15 unsigned long last_page_table;
16#ifdef CONFIG_3_LEVEL_PGTABLES
17 unsigned long last_pmd;
18#endif
19 struct uml_ldt ldt;
14} mm_context_t; 20} mm_context_t;
15 21
22extern void __switch_mm(struct mm_id * mm_idp);
23
24/* Avoid tangled inclusion with asm/ldt.h */
25extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
26extern void free_ldt(struct mm_context *mm);
27
16#endif 28#endif
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index 25c502617553..7c77adecd919 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -23,14 +23,14 @@ void flush_thread(void)
23 23
24 arch_flush_thread(&current->thread.arch); 24 arch_flush_thread(&current->thread.arch);
25 25
26 ret = unmap(&current->mm->context.skas.id, 0, end, 1, &data); 26 ret = unmap(&current->mm->context.id, 0, end, 1, &data);
27 if (ret) { 27 if (ret) {
28 printk(KERN_ERR "flush_thread - clearing address space failed, " 28 printk(KERN_ERR "flush_thread - clearing address space failed, "
29 "err = %d\n", ret); 29 "err = %d\n", ret);
30 force_sig(SIGKILL, current); 30 force_sig(SIGKILL, current);
31 } 31 }
32 32
33 __switch_mm(&current->mm->context.skas.id); 33 __switch_mm(&current->mm->context.id);
34} 34}
35 35
36void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) 36void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c
index 9d8eea47a0fc..04cebcf0679f 100644
--- a/arch/um/kernel/reboot.c
+++ b/arch/um/kernel/reboot.c
@@ -25,7 +25,7 @@ static void kill_off_processes(void)
25 if(p->mm == NULL) 25 if(p->mm == NULL)
26 continue; 26 continue;
27 27
28 pid = p->mm->context.skas.id.u.pid; 28 pid = p->mm->context.id.u.pid;
29 os_kill_ptraced_process(pid, 1); 29 os_kill_ptraced_process(pid, 1);
30 } 30 }
31 } 31 }
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index c5475ecd9fd4..48c8c136c038 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -47,9 +47,9 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
47 * destroy_context_skas. 47 * destroy_context_skas.
48 */ 48 */
49 49
50 mm->context.skas.last_page_table = pmd_page_vaddr(*pmd); 50 mm->context.last_page_table = pmd_page_vaddr(*pmd);
51#ifdef CONFIG_3_LEVEL_PGTABLES 51#ifdef CONFIG_3_LEVEL_PGTABLES
52 mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud)); 52 mm->context.last_pmd = (unsigned long) __va(pud_val(*pud));
53#endif 53#endif
54 54
55 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); 55 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
@@ -66,8 +66,8 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
66 66
67int init_new_context(struct task_struct *task, struct mm_struct *mm) 67int init_new_context(struct task_struct *task, struct mm_struct *mm)
68{ 68{
69 struct mmu_context_skas *from_mm = NULL; 69 struct mm_context *from_mm = NULL;
70 struct mmu_context_skas *to_mm = &mm->context.skas; 70 struct mm_context *to_mm = &mm->context;
71 unsigned long stack = 0; 71 unsigned long stack = 0;
72 int ret = -ENOMEM; 72 int ret = -ENOMEM;
73 73
@@ -97,7 +97,7 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
97 97
98 to_mm->id.stack = stack; 98 to_mm->id.stack = stack;
99 if (current->mm != NULL && current->mm != &init_mm) 99 if (current->mm != NULL && current->mm != &init_mm)
100 from_mm = &current->mm->context.skas; 100 from_mm = &current->mm->context;
101 101
102 if (proc_mm) { 102 if (proc_mm) {
103 ret = new_mm(stack); 103 ret = new_mm(stack);
@@ -133,7 +133,7 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
133 133
134void destroy_context(struct mm_struct *mm) 134void destroy_context(struct mm_struct *mm)
135{ 135{
136 struct mmu_context_skas *mmu = &mm->context.skas; 136 struct mm_context *mmu = &mm->context;
137 137
138 if (proc_mm) 138 if (proc_mm)
139 os_close_file(mmu->id.u.mm_fd); 139 os_close_file(mmu->id.u.mm_fd);
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 9ce1c49421f8..0297e63f9725 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -65,5 +65,5 @@ unsigned long current_stub_stack(void)
65 if (current->mm == NULL) 65 if (current->mm == NULL)
66 return 0; 66 return 0;
67 67
68 return current->mm->context.skas.id.stack; 68 return current->mm->context.id.stack;
69} 69}
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 849922fcfb60..081baefb4c0d 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -14,8 +14,8 @@
14 14
15static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, 15static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
16 unsigned int prot, struct host_vm_op *ops, int *index, 16 unsigned int prot, struct host_vm_op *ops, int *index,
17 int last_filled, union mm_context *mmu, void **flush, 17 int last_filled, struct mm_context *mmu, void **flush,
18 int (*do_ops)(union mm_context *, struct host_vm_op *, 18 int (*do_ops)(struct mm_context *, struct host_vm_op *,
19 int, int, void **)) 19 int, int, void **))
20{ 20{
21 __u64 offset; 21 __u64 offset;
@@ -52,8 +52,8 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
52 52
53static int add_munmap(unsigned long addr, unsigned long len, 53static int add_munmap(unsigned long addr, unsigned long len,
54 struct host_vm_op *ops, int *index, int last_filled, 54 struct host_vm_op *ops, int *index, int last_filled,
55 union mm_context *mmu, void **flush, 55 struct mm_context *mmu, void **flush,
56 int (*do_ops)(union mm_context *, struct host_vm_op *, 56 int (*do_ops)(struct mm_context *, struct host_vm_op *,
57 int, int, void **)) 57 int, int, void **))
58{ 58{
59 struct host_vm_op *last; 59 struct host_vm_op *last;
@@ -82,8 +82,8 @@ static int add_munmap(unsigned long addr, unsigned long len,
82 82
83static int add_mprotect(unsigned long addr, unsigned long len, 83static int add_mprotect(unsigned long addr, unsigned long len,
84 unsigned int prot, struct host_vm_op *ops, int *index, 84 unsigned int prot, struct host_vm_op *ops, int *index,
85 int last_filled, union mm_context *mmu, void **flush, 85 int last_filled, struct mm_context *mmu, void **flush,
86 int (*do_ops)(union mm_context *, struct host_vm_op *, 86 int (*do_ops)(struct mm_context *, struct host_vm_op *,
87 int, int, void **)) 87 int, int, void **))
88{ 88{
89 struct host_vm_op *last; 89 struct host_vm_op *last;
@@ -117,8 +117,8 @@ static int add_mprotect(unsigned long addr, unsigned long len,
117static inline int update_pte_range(pmd_t *pmd, unsigned long addr, 117static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
118 unsigned long end, struct host_vm_op *ops, 118 unsigned long end, struct host_vm_op *ops,
119 int last_op, int *op_index, int force, 119 int last_op, int *op_index, int force,
120 union mm_context *mmu, void **flush, 120 struct mm_context *mmu, void **flush,
121 int (*do_ops)(union mm_context *, 121 int (*do_ops)(struct mm_context *,
122 struct host_vm_op *, int, int, 122 struct host_vm_op *, int, int,
123 void **)) 123 void **))
124{ 124{
@@ -157,8 +157,8 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
157static inline int update_pmd_range(pud_t *pud, unsigned long addr, 157static inline int update_pmd_range(pud_t *pud, unsigned long addr,
158 unsigned long end, struct host_vm_op *ops, 158 unsigned long end, struct host_vm_op *ops,
159 int last_op, int *op_index, int force, 159 int last_op, int *op_index, int force,
160 union mm_context *mmu, void **flush, 160 struct mm_context *mmu, void **flush,
161 int (*do_ops)(union mm_context *, 161 int (*do_ops)(struct mm_context *,
162 struct host_vm_op *, int, int, 162 struct host_vm_op *, int, int,
163 void **)) 163 void **))
164{ 164{
@@ -187,8 +187,8 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr,
187static inline int update_pud_range(pgd_t *pgd, unsigned long addr, 187static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
188 unsigned long end, struct host_vm_op *ops, 188 unsigned long end, struct host_vm_op *ops,
189 int last_op, int *op_index, int force, 189 int last_op, int *op_index, int force,
190 union mm_context *mmu, void **flush, 190 struct mm_context *mmu, void **flush,
191 int (*do_ops)(union mm_context *, 191 int (*do_ops)(struct mm_context *,
192 struct host_vm_op *, int, int, 192 struct host_vm_op *, int, int,
193 void **)) 193 void **))
194{ 194{
@@ -216,11 +216,11 @@ static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
216 216
217void fix_range_common(struct mm_struct *mm, unsigned long start_addr, 217void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
218 unsigned long end_addr, int force, 218 unsigned long end_addr, int force,
219 int (*do_ops)(union mm_context *, struct host_vm_op *, 219 int (*do_ops)(struct mm_context *, struct host_vm_op *,
220 int, int, void **)) 220 int, int, void **))
221{ 221{
222 pgd_t *pgd; 222 pgd_t *pgd;
223 union mm_context *mmu = &mm->context; 223 struct mm_context *mmu = &mm->context;
224 struct host_vm_op ops[1]; 224 struct host_vm_op ops[1];
225 unsigned long addr = start_addr, next; 225 unsigned long addr = start_addr, next;
226 int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1; 226 int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
@@ -375,7 +375,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
375 w = 0; 375 w = 0;
376 } 376 }
377 377
378 mm_id = &mm->context.skas.id; 378 mm_id = &mm->context.id;
379 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | 379 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
380 (x ? UM_PROT_EXEC : 0)); 380 (x ? UM_PROT_EXEC : 0));
381 if (pte_newpage(*pte)) { 381 if (pte_newpage(*pte)) {
@@ -453,7 +453,7 @@ void __flush_tlb_one(unsigned long addr)
453 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE); 453 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
454} 454}
455 455
456static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, 456static int do_ops(struct mm_context *mmu, struct host_vm_op *ops, int last,
457 int finished, void **flush) 457 int finished, void **flush)
458{ 458{
459 struct host_vm_op *op; 459 struct host_vm_op *op;
@@ -463,17 +463,16 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
463 op = &ops[i]; 463 op = &ops[i];
464 switch(op->type) { 464 switch(op->type) {
465 case MMAP: 465 case MMAP:
466 ret = map(&mmu->skas.id, op->u.mmap.addr, 466 ret = map(&mmu->id, op->u.mmap.addr, op->u.mmap.len,
467 op->u.mmap.len, op->u.mmap.prot, 467 op->u.mmap.prot, op->u.mmap.fd,
468 op->u.mmap.fd, op->u.mmap.offset, finished, 468 op->u.mmap.offset, finished, flush);
469 flush);
470 break; 469 break;
471 case MUNMAP: 470 case MUNMAP:
472 ret = unmap(&mmu->skas.id, op->u.munmap.addr, 471 ret = unmap(&mmu->id, op->u.munmap.addr,
473 op->u.munmap.len, finished, flush); 472 op->u.munmap.len, finished, flush);
474 break; 473 break;
475 case MPROTECT: 474 case MPROTECT:
476 ret = protect(&mmu->skas.id, op->u.mprotect.addr, 475 ret = protect(&mmu->id, op->u.mprotect.addr,
477 op->u.mprotect.len, op->u.mprotect.prot, 476 op->u.mprotect.len, op->u.mprotect.prot,
478 finished, flush); 477 finished, flush);
479 break; 478 break;
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c
index 0bf7572a80a3..67c0958eb984 100644
--- a/arch/um/sys-i386/ldt.c
+++ b/arch/um/sys-i386/ldt.c
@@ -33,7 +33,7 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc,
33 * Note: I'm unsure: should interrupts be disabled here? 33 * Note: I'm unsure: should interrupts be disabled here?
34 */ 34 */
35 if (!current->active_mm || current->active_mm == &init_mm || 35 if (!current->active_mm || current->active_mm == &init_mm ||
36 mm_idp != &current->active_mm->context.skas.id) 36 mm_idp != &current->active_mm->context.id)
37 __switch_mm(mm_idp); 37 __switch_mm(mm_idp);
38 } 38 }
39 39
@@ -79,8 +79,8 @@ long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc,
79 * PTRACE_LDT possible to implement. 79 * PTRACE_LDT possible to implement.
80 */ 80 */
81 if (current->active_mm && current->active_mm != &init_mm && 81 if (current->active_mm && current->active_mm != &init_mm &&
82 mm_idp != &current->active_mm->context.skas.id) 82 mm_idp != &current->active_mm->context.id)
83 __switch_mm(&current->active_mm->context.skas.id); 83 __switch_mm(&current->active_mm->context.id);
84 } 84 }
85 85
86 return res; 86 return res;
@@ -135,7 +135,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount)
135{ 135{
136 int i, err = 0; 136 int i, err = 0;
137 unsigned long size; 137 unsigned long size;
138 uml_ldt_t * ldt = &current->mm->context.skas.ldt; 138 uml_ldt_t * ldt = &current->mm->context.ldt;
139 139
140 if (!ldt->entry_count) 140 if (!ldt->entry_count)
141 goto out; 141 goto out;
@@ -203,8 +203,8 @@ static int read_default_ldt(void __user * ptr, unsigned long bytecount)
203 203
204static int write_ldt(void __user * ptr, unsigned long bytecount, int func) 204static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
205{ 205{
206 uml_ldt_t * ldt = &current->mm->context.skas.ldt; 206 uml_ldt_t * ldt = &current->mm->context.ldt;
207 struct mm_id * mm_idp = &current->mm->context.skas.id; 207 struct mm_id * mm_idp = &current->mm->context.id;
208 int i, err; 208 int i, err;
209 struct user_desc ldt_info; 209 struct user_desc ldt_info;
210 struct ldt_entry entry0, *ldt_p; 210 struct ldt_entry entry0, *ldt_p;
@@ -384,8 +384,7 @@ out_free:
384 free_pages((unsigned long)ldt, order); 384 free_pages((unsigned long)ldt, order);
385} 385}
386 386
387long init_new_ldt(struct mmu_context_skas * new_mm, 387long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
388 struct mmu_context_skas * from_mm)
389{ 388{
390 struct user_desc desc; 389 struct user_desc desc;
391 short * num_p; 390 short * num_p;
@@ -483,7 +482,7 @@ long init_new_ldt(struct mmu_context_skas * new_mm,
483} 482}
484 483
485 484
486void free_ldt(struct mmu_context_skas * mm) 485void free_ldt(struct mm_context *mm)
487{ 486{
488 int i; 487 int i;
489 488
diff --git a/arch/um/sys-x86_64/syscalls.c b/arch/um/sys-x86_64/syscalls.c
index 58ae06562b4a..86f6b18410ee 100644
--- a/arch/um/sys-x86_64/syscalls.c
+++ b/arch/um/sys-x86_64/syscalls.c
@@ -30,7 +30,7 @@ long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
30{ 30{
31 unsigned long *ptr = addr, tmp; 31 unsigned long *ptr = addr, tmp;
32 long ret; 32 long ret;
33 int pid = task->mm->context.skas.id.u.pid; 33 int pid = task->mm->context.id.u.pid;
34 34
35 /* 35 /*
36 * With ARCH_SET_FS (and ARCH_SET_GS is treated similarly to 36 * With ARCH_SET_FS (and ARCH_SET_GS is treated similarly to
diff --git a/include/asm-um/ldt.h b/include/asm-um/ldt.h
index 96f82a456ce6..b2553f3e87eb 100644
--- a/include/asm-um/ldt.h
+++ b/include/asm-um/ldt.h
@@ -11,11 +11,7 @@
11#include "asm/semaphore.h" 11#include "asm/semaphore.h"
12#include "asm/host_ldt.h" 12#include "asm/host_ldt.h"
13 13
14struct mmu_context_skas;
15extern void ldt_host_info(void); 14extern void ldt_host_info(void);
16extern long init_new_ldt(struct mmu_context_skas * to_mm,
17 struct mmu_context_skas * from_mm);
18extern void free_ldt(struct mmu_context_skas * mm);
19 15
20#define LDT_PAGES_MAX \ 16#define LDT_PAGES_MAX \
21 ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE) 17 ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE)
diff --git a/include/asm-um/mmu_context.h b/include/asm-um/mmu_context.h
index 035fd1c363ea..5f3b863aef9a 100644
--- a/include/asm-um/mmu_context.h
+++ b/include/asm-um/mmu_context.h
@@ -29,7 +29,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
29 * possible. 29 * possible.
30 */ 30 */
31 if (old != new && (current->flags & PF_BORROWED_MM)) 31 if (old != new && (current->flags & PF_BORROWED_MM))
32 __switch_mm(&new->context.skas.id); 32 __switch_mm(&new->context.id);
33} 33}
34 34
35static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 35static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
@@ -41,7 +41,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
41 cpu_clear(cpu, prev->cpu_vm_mask); 41 cpu_clear(cpu, prev->cpu_vm_mask);
42 cpu_set(cpu, next->cpu_vm_mask); 42 cpu_set(cpu, next->cpu_vm_mask);
43 if(next != &init_mm) 43 if(next != &init_mm)
44 __switch_mm(&next->context.skas.id); 44 __switch_mm(&next->context.id);
45 } 45 }
46} 46}
47 47