diff options
Diffstat (limited to 'arch/um/kernel/skas/mmu.c')
-rw-r--r-- | arch/um/kernel/skas/mmu.c | 136 |
1 files changed, 114 insertions, 22 deletions
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 6cb9a6d028a9..511a855c9ec0 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c | |||
@@ -3,46 +3,138 @@ | |||
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/config.h" | ||
6 | #include "linux/sched.h" | 7 | #include "linux/sched.h" |
7 | #include "linux/list.h" | 8 | #include "linux/list.h" |
8 | #include "linux/spinlock.h" | 9 | #include "linux/spinlock.h" |
9 | #include "linux/slab.h" | 10 | #include "linux/slab.h" |
11 | #include "linux/errno.h" | ||
12 | #include "linux/mm.h" | ||
10 | #include "asm/current.h" | 13 | #include "asm/current.h" |
11 | #include "asm/segment.h" | 14 | #include "asm/segment.h" |
12 | #include "asm/mmu.h" | 15 | #include "asm/mmu.h" |
16 | #include "asm/pgalloc.h" | ||
17 | #include "asm/pgtable.h" | ||
13 | #include "os.h" | 18 | #include "os.h" |
14 | #include "skas.h" | 19 | #include "skas.h" |
15 | 20 | ||
21 | extern int __syscall_stub_start; | ||
22 | |||
23 | static int init_stub_pte(struct mm_struct *mm, unsigned long proc, | ||
24 | unsigned long kernel) | ||
25 | { | ||
26 | pgd_t *pgd; | ||
27 | pud_t *pud; | ||
28 | pmd_t *pmd; | ||
29 | pte_t *pte; | ||
30 | |||
31 | spin_lock(&mm->page_table_lock); | ||
32 | pgd = pgd_offset(mm, proc); | ||
33 | pud = pud_alloc(mm, pgd, proc); | ||
34 | if (!pud) | ||
35 | goto out; | ||
36 | |||
37 | pmd = pmd_alloc(mm, pud, proc); | ||
38 | if (!pmd) | ||
39 | goto out_pmd; | ||
40 | |||
41 | pte = pte_alloc_map(mm, pmd, proc); | ||
42 | if (!pte) | ||
43 | goto out_pte; | ||
44 | |||
45 | /* There's an interaction between the skas0 stub pages, stack | ||
46 | * randomization, and the BUG at the end of exit_mmap. exit_mmap | ||
47 | * checks that the number of page tables freed is the same as had | ||
48 | * been allocated. If the stack is on the last page table page, | ||
49 | * then the stack pte page will be freed, and if not, it won't. To | ||
50 | * avoid having to know where the stack is, or if the process mapped | ||
51 | * something at the top of its address space for some other reason, | ||
52 | * we set TASK_SIZE to end at the start of the last page table. | ||
53 | * This keeps exit_mmap off the last page, but introduces a leak | ||
54 | * of that page. So, we hang onto it here and free it in | ||
55 | * destroy_context_skas. | ||
56 | */ | ||
57 | |||
58 | mm->context.skas.last_page_table = pmd_page_kernel(*pmd); | ||
59 | |||
60 | *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); | ||
61 | *pte = pte_mkexec(*pte); | ||
62 | *pte = pte_wrprotect(*pte); | ||
63 | spin_unlock(&mm->page_table_lock); | ||
64 | return(0); | ||
65 | |||
66 | out_pmd: | ||
67 | pud_free(pud); | ||
68 | out_pte: | ||
69 | pmd_free(pmd); | ||
70 | out: | ||
71 | spin_unlock(&mm->page_table_lock); | ||
72 | return(-ENOMEM); | ||
73 | } | ||
74 | |||
16 | int init_new_context_skas(struct task_struct *task, struct mm_struct *mm) | 75 | int init_new_context_skas(struct task_struct *task, struct mm_struct *mm) |
17 | { | 76 | { |
18 | int from; | 77 | struct mm_struct *cur_mm = current->mm; |
78 | struct mm_id *mm_id = &mm->context.skas.id; | ||
79 | unsigned long stack; | ||
80 | int from, ret; | ||
19 | 81 | ||
20 | if((current->mm != NULL) && (current->mm != &init_mm)) | 82 | if(proc_mm){ |
21 | from = current->mm->context.skas.mm_fd; | 83 | if((cur_mm != NULL) && (cur_mm != &init_mm)) |
22 | else from = -1; | 84 | from = cur_mm->context.skas.id.u.mm_fd; |
85 | else from = -1; | ||
23 | 86 | ||
24 | mm->context.skas.mm_fd = new_mm(from); | 87 | ret = new_mm(from); |
25 | if(mm->context.skas.mm_fd < 0){ | 88 | if(ret < 0){ |
26 | printk("init_new_context_skas - new_mm failed, errno = %d\n", | 89 | printk("init_new_context_skas - new_mm failed, " |
27 | mm->context.skas.mm_fd); | 90 | "errno = %d\n", ret); |
28 | return(mm->context.skas.mm_fd); | 91 | return ret; |
92 | } | ||
93 | mm_id->u.mm_fd = ret; | ||
29 | } | 94 | } |
95 | else { | ||
96 | /* This zeros the entry that pgd_alloc didn't, needed since | ||
97 | * we are about to reinitialize it, and want mm.nr_ptes to | ||
98 | * be accurate. | ||
99 | */ | ||
100 | mm->pgd[USER_PTRS_PER_PGD] = __pgd(0); | ||
30 | 101 | ||
31 | return(0); | 102 | ret = init_stub_pte(mm, CONFIG_STUB_CODE, |
103 | (unsigned long) &__syscall_stub_start); | ||
104 | if(ret) | ||
105 | goto out; | ||
106 | |||
107 | ret = -ENOMEM; | ||
108 | stack = get_zeroed_page(GFP_KERNEL); | ||
109 | if(stack == 0) | ||
110 | goto out; | ||
111 | mm_id->stack = stack; | ||
112 | |||
113 | ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack); | ||
114 | if(ret) | ||
115 | goto out_free; | ||
116 | |||
117 | mm->nr_ptes--; | ||
118 | mm_id->u.pid = start_userspace(stack); | ||
119 | } | ||
120 | |||
121 | return 0; | ||
122 | |||
123 | out_free: | ||
124 | free_page(mm_id->stack); | ||
125 | out: | ||
126 | return ret; | ||
32 | } | 127 | } |
33 | 128 | ||
34 | void destroy_context_skas(struct mm_struct *mm) | 129 | void destroy_context_skas(struct mm_struct *mm) |
35 | { | 130 | { |
36 | os_close_file(mm->context.skas.mm_fd); | 131 | struct mmu_context_skas *mmu = &mm->context.skas; |
37 | } | ||
38 | 132 | ||
39 | /* | 133 | if(proc_mm) |
40 | * Overrides for Emacs so that we follow Linus's tabbing style. | 134 | os_close_file(mmu->id.u.mm_fd); |
41 | * Emacs will notice this stuff at the end of the file and automatically | 135 | else { |
42 | * adjust the settings for this buffer only. This must remain at the end | 136 | os_kill_ptraced_process(mmu->id.u.pid, 1); |
43 | * of the file. | 137 | free_page(mmu->id.stack); |
44 | * --------------------------------------------------------------------------- | 138 | free_page(mmu->last_page_table); |
45 | * Local variables: | 139 | } |
46 | * c-file-style: "linux" | 140 | } |
47 | * End: | ||
48 | */ | ||