aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/kernel
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2007-10-16 04:27:06 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:06 -0400
commit6c738ffa9fea6869f5d51882dfefbba746e432b1 (patch)
treee9b30ccd149f73676422ea5519d6572a3f8e2819 /arch/um/kernel
parentfab95c55e3b94e219044dc7a558632d08c198771 (diff)
uml: fold mmu_context_skas into mm_context
This patch folds mmu_context_skas into struct mm_context, changing all users of these structures as needed. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/um/kernel')
-rw-r--r--arch/um/kernel/exec.c4
-rw-r--r--arch/um/kernel/reboot.c2
-rw-r--r--arch/um/kernel/skas/mmu.c12
-rw-r--r--arch/um/kernel/skas/process.c2
-rw-r--r--arch/um/kernel/tlb.c43
5 files changed, 31 insertions, 32 deletions
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index 25c502617553..7c77adecd919 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -23,14 +23,14 @@ void flush_thread(void)
23 23
24 arch_flush_thread(&current->thread.arch); 24 arch_flush_thread(&current->thread.arch);
25 25
26 ret = unmap(&current->mm->context.skas.id, 0, end, 1, &data); 26 ret = unmap(&current->mm->context.id, 0, end, 1, &data);
27 if (ret) { 27 if (ret) {
28 printk(KERN_ERR "flush_thread - clearing address space failed, " 28 printk(KERN_ERR "flush_thread - clearing address space failed, "
29 "err = %d\n", ret); 29 "err = %d\n", ret);
30 force_sig(SIGKILL, current); 30 force_sig(SIGKILL, current);
31 } 31 }
32 32
33 __switch_mm(&current->mm->context.skas.id); 33 __switch_mm(&current->mm->context.id);
34} 34}
35 35
36void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) 36void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c
index 9d8eea47a0fc..04cebcf0679f 100644
--- a/arch/um/kernel/reboot.c
+++ b/arch/um/kernel/reboot.c
@@ -25,7 +25,7 @@ static void kill_off_processes(void)
25 if(p->mm == NULL) 25 if(p->mm == NULL)
26 continue; 26 continue;
27 27
28 pid = p->mm->context.skas.id.u.pid; 28 pid = p->mm->context.id.u.pid;
29 os_kill_ptraced_process(pid, 1); 29 os_kill_ptraced_process(pid, 1);
30 } 30 }
31 } 31 }
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index c5475ecd9fd4..48c8c136c038 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -47,9 +47,9 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
47 * destroy_context_skas. 47 * destroy_context_skas.
48 */ 48 */
49 49
50 mm->context.skas.last_page_table = pmd_page_vaddr(*pmd); 50 mm->context.last_page_table = pmd_page_vaddr(*pmd);
51#ifdef CONFIG_3_LEVEL_PGTABLES 51#ifdef CONFIG_3_LEVEL_PGTABLES
52 mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud)); 52 mm->context.last_pmd = (unsigned long) __va(pud_val(*pud));
53#endif 53#endif
54 54
55 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); 55 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
@@ -66,8 +66,8 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
66 66
67int init_new_context(struct task_struct *task, struct mm_struct *mm) 67int init_new_context(struct task_struct *task, struct mm_struct *mm)
68{ 68{
69 struct mmu_context_skas *from_mm = NULL; 69 struct mm_context *from_mm = NULL;
70 struct mmu_context_skas *to_mm = &mm->context.skas; 70 struct mm_context *to_mm = &mm->context;
71 unsigned long stack = 0; 71 unsigned long stack = 0;
72 int ret = -ENOMEM; 72 int ret = -ENOMEM;
73 73
@@ -97,7 +97,7 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
97 97
98 to_mm->id.stack = stack; 98 to_mm->id.stack = stack;
99 if (current->mm != NULL && current->mm != &init_mm) 99 if (current->mm != NULL && current->mm != &init_mm)
100 from_mm = &current->mm->context.skas; 100 from_mm = &current->mm->context;
101 101
102 if (proc_mm) { 102 if (proc_mm) {
103 ret = new_mm(stack); 103 ret = new_mm(stack);
@@ -133,7 +133,7 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
133 133
134void destroy_context(struct mm_struct *mm) 134void destroy_context(struct mm_struct *mm)
135{ 135{
136 struct mmu_context_skas *mmu = &mm->context.skas; 136 struct mm_context *mmu = &mm->context;
137 137
138 if (proc_mm) 138 if (proc_mm)
139 os_close_file(mmu->id.u.mm_fd); 139 os_close_file(mmu->id.u.mm_fd);
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 9ce1c49421f8..0297e63f9725 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -65,5 +65,5 @@ unsigned long current_stub_stack(void)
65 if (current->mm == NULL) 65 if (current->mm == NULL)
66 return 0; 66 return 0;
67 67
68 return current->mm->context.skas.id.stack; 68 return current->mm->context.id.stack;
69} 69}
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 849922fcfb60..081baefb4c0d 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -14,8 +14,8 @@
14 14
15static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, 15static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
16 unsigned int prot, struct host_vm_op *ops, int *index, 16 unsigned int prot, struct host_vm_op *ops, int *index,
17 int last_filled, union mm_context *mmu, void **flush, 17 int last_filled, struct mm_context *mmu, void **flush,
18 int (*do_ops)(union mm_context *, struct host_vm_op *, 18 int (*do_ops)(struct mm_context *, struct host_vm_op *,
19 int, int, void **)) 19 int, int, void **))
20{ 20{
21 __u64 offset; 21 __u64 offset;
@@ -52,8 +52,8 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
52 52
53static int add_munmap(unsigned long addr, unsigned long len, 53static int add_munmap(unsigned long addr, unsigned long len,
54 struct host_vm_op *ops, int *index, int last_filled, 54 struct host_vm_op *ops, int *index, int last_filled,
55 union mm_context *mmu, void **flush, 55 struct mm_context *mmu, void **flush,
56 int (*do_ops)(union mm_context *, struct host_vm_op *, 56 int (*do_ops)(struct mm_context *, struct host_vm_op *,
57 int, int, void **)) 57 int, int, void **))
58{ 58{
59 struct host_vm_op *last; 59 struct host_vm_op *last;
@@ -82,8 +82,8 @@ static int add_munmap(unsigned long addr, unsigned long len,
82 82
83static int add_mprotect(unsigned long addr, unsigned long len, 83static int add_mprotect(unsigned long addr, unsigned long len,
84 unsigned int prot, struct host_vm_op *ops, int *index, 84 unsigned int prot, struct host_vm_op *ops, int *index,
85 int last_filled, union mm_context *mmu, void **flush, 85 int last_filled, struct mm_context *mmu, void **flush,
86 int (*do_ops)(union mm_context *, struct host_vm_op *, 86 int (*do_ops)(struct mm_context *, struct host_vm_op *,
87 int, int, void **)) 87 int, int, void **))
88{ 88{
89 struct host_vm_op *last; 89 struct host_vm_op *last;
@@ -117,8 +117,8 @@ static int add_mprotect(unsigned long addr, unsigned long len,
117static inline int update_pte_range(pmd_t *pmd, unsigned long addr, 117static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
118 unsigned long end, struct host_vm_op *ops, 118 unsigned long end, struct host_vm_op *ops,
119 int last_op, int *op_index, int force, 119 int last_op, int *op_index, int force,
120 union mm_context *mmu, void **flush, 120 struct mm_context *mmu, void **flush,
121 int (*do_ops)(union mm_context *, 121 int (*do_ops)(struct mm_context *,
122 struct host_vm_op *, int, int, 122 struct host_vm_op *, int, int,
123 void **)) 123 void **))
124{ 124{
@@ -157,8 +157,8 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
157static inline int update_pmd_range(pud_t *pud, unsigned long addr, 157static inline int update_pmd_range(pud_t *pud, unsigned long addr,
158 unsigned long end, struct host_vm_op *ops, 158 unsigned long end, struct host_vm_op *ops,
159 int last_op, int *op_index, int force, 159 int last_op, int *op_index, int force,
160 union mm_context *mmu, void **flush, 160 struct mm_context *mmu, void **flush,
161 int (*do_ops)(union mm_context *, 161 int (*do_ops)(struct mm_context *,
162 struct host_vm_op *, int, int, 162 struct host_vm_op *, int, int,
163 void **)) 163 void **))
164{ 164{
@@ -187,8 +187,8 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr,
187static inline int update_pud_range(pgd_t *pgd, unsigned long addr, 187static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
188 unsigned long end, struct host_vm_op *ops, 188 unsigned long end, struct host_vm_op *ops,
189 int last_op, int *op_index, int force, 189 int last_op, int *op_index, int force,
190 union mm_context *mmu, void **flush, 190 struct mm_context *mmu, void **flush,
191 int (*do_ops)(union mm_context *, 191 int (*do_ops)(struct mm_context *,
192 struct host_vm_op *, int, int, 192 struct host_vm_op *, int, int,
193 void **)) 193 void **))
194{ 194{
@@ -216,11 +216,11 @@ static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
216 216
217void fix_range_common(struct mm_struct *mm, unsigned long start_addr, 217void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
218 unsigned long end_addr, int force, 218 unsigned long end_addr, int force,
219 int (*do_ops)(union mm_context *, struct host_vm_op *, 219 int (*do_ops)(struct mm_context *, struct host_vm_op *,
220 int, int, void **)) 220 int, int, void **))
221{ 221{
222 pgd_t *pgd; 222 pgd_t *pgd;
223 union mm_context *mmu = &mm->context; 223 struct mm_context *mmu = &mm->context;
224 struct host_vm_op ops[1]; 224 struct host_vm_op ops[1];
225 unsigned long addr = start_addr, next; 225 unsigned long addr = start_addr, next;
226 int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1; 226 int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
@@ -375,7 +375,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
375 w = 0; 375 w = 0;
376 } 376 }
377 377
378 mm_id = &mm->context.skas.id; 378 mm_id = &mm->context.id;
379 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | 379 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
380 (x ? UM_PROT_EXEC : 0)); 380 (x ? UM_PROT_EXEC : 0));
381 if (pte_newpage(*pte)) { 381 if (pte_newpage(*pte)) {
@@ -453,7 +453,7 @@ void __flush_tlb_one(unsigned long addr)
453 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE); 453 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
454} 454}
455 455
456static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, 456static int do_ops(struct mm_context *mmu, struct host_vm_op *ops, int last,
457 int finished, void **flush) 457 int finished, void **flush)
458{ 458{
459 struct host_vm_op *op; 459 struct host_vm_op *op;
@@ -463,17 +463,16 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
463 op = &ops[i]; 463 op = &ops[i];
464 switch(op->type) { 464 switch(op->type) {
465 case MMAP: 465 case MMAP:
466 ret = map(&mmu->skas.id, op->u.mmap.addr, 466 ret = map(&mmu->id, op->u.mmap.addr, op->u.mmap.len,
467 op->u.mmap.len, op->u.mmap.prot, 467 op->u.mmap.prot, op->u.mmap.fd,
468 op->u.mmap.fd, op->u.mmap.offset, finished, 468 op->u.mmap.offset, finished, flush);
469 flush);
470 break; 469 break;
471 case MUNMAP: 470 case MUNMAP:
472 ret = unmap(&mmu->skas.id, op->u.munmap.addr, 471 ret = unmap(&mmu->id, op->u.munmap.addr,
473 op->u.munmap.len, finished, flush); 472 op->u.munmap.len, finished, flush);
474 break; 473 break;
475 case MPROTECT: 474 case MPROTECT:
476 ret = protect(&mmu->skas.id, op->u.mprotect.addr, 475 ret = protect(&mmu->id, op->u.mprotect.addr,
477 op->u.mprotect.len, op->u.mprotect.prot, 476 op->u.mprotect.len, op->u.mprotect.prot,
478 finished, flush); 477 finished, flush);
479 break; 478 break;