diff options
author | Bodo Stroesser <bstroesser@fujitsu-siemens.com> | 2005-09-03 18:57:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@evo.osdl.org> | 2005-09-05 03:06:24 -0400 |
commit | 8b51304ed3184826fb262c1e9d3e58b0b00fd083 (patch) | |
tree | 2fd338bf425794146ba4d8b1a2fb3a81fb8c3fa4 /arch/um/kernel/skas/mmu.c | |
parent | 60d339f6fe0831060600c62418b71a62ad26c281 (diff) |
[PATCH] uml: increase granularity of host capability checking
This change enables SKAS0/SKAS3 to work with all combinations of /proc/mm and
PTRACE_FAULTINFO being available or not.
Also it changes the initialization of proc_mm and ptrace_faultinfo slightly,
to ease forcing SKAS0 on a patched host. Forcing UML to run without /proc/mm
or PTRACE_FAULTINFO by cmdline parameter can be implemented with a setup
resetting the related variable.
Signed-off-by: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/um/kernel/skas/mmu.c')
-rw-r--r-- | arch/um/kernel/skas/mmu.c | 52 |
1 files changed, 28 insertions, 24 deletions
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index d232daa42c31..d837223e22af 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c | |||
@@ -77,23 +77,14 @@ int init_new_context_skas(struct task_struct *task, struct mm_struct *mm) | |||
77 | struct mm_struct *cur_mm = current->mm; | 77 | struct mm_struct *cur_mm = current->mm; |
78 | struct mm_id *cur_mm_id = &cur_mm->context.skas.id; | 78 | struct mm_id *cur_mm_id = &cur_mm->context.skas.id; |
79 | struct mm_id *mm_id = &mm->context.skas.id; | 79 | struct mm_id *mm_id = &mm->context.skas.id; |
80 | unsigned long stack; | 80 | unsigned long stack = 0; |
81 | int from, ret; | 81 | int from, ret = -ENOMEM; |
82 | 82 | ||
83 | if(proc_mm){ | 83 | if(!proc_mm || !ptrace_faultinfo){ |
84 | if((cur_mm != NULL) && (cur_mm != &init_mm)) | 84 | stack = get_zeroed_page(GFP_KERNEL); |
85 | from = cur_mm->context.skas.id.u.mm_fd; | 85 | if(stack == 0) |
86 | else from = -1; | 86 | goto out; |
87 | 87 | ||
88 | ret = new_mm(from); | ||
89 | if(ret < 0){ | ||
90 | printk("init_new_context_skas - new_mm failed, " | ||
91 | "errno = %d\n", ret); | ||
92 | return ret; | ||
93 | } | ||
94 | mm_id->u.mm_fd = ret; | ||
95 | } | ||
96 | else { | ||
97 | /* This zeros the entry that pgd_alloc didn't, needed since | 88 | /* This zeros the entry that pgd_alloc didn't, needed since |
98 | * we are about to reinitialize it, and want mm.nr_ptes to | 89 | * we are about to reinitialize it, and want mm.nr_ptes to |
99 | * be accurate. | 90 | * be accurate. |
@@ -103,20 +94,30 @@ int init_new_context_skas(struct task_struct *task, struct mm_struct *mm) | |||
103 | ret = init_stub_pte(mm, CONFIG_STUB_CODE, | 94 | ret = init_stub_pte(mm, CONFIG_STUB_CODE, |
104 | (unsigned long) &__syscall_stub_start); | 95 | (unsigned long) &__syscall_stub_start); |
105 | if(ret) | 96 | if(ret) |
106 | goto out; | 97 | goto out_free; |
107 | |||
108 | ret = -ENOMEM; | ||
109 | stack = get_zeroed_page(GFP_KERNEL); | ||
110 | if(stack == 0) | ||
111 | goto out; | ||
112 | mm_id->stack = stack; | ||
113 | 98 | ||
114 | ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack); | 99 | ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack); |
115 | if(ret) | 100 | if(ret) |
116 | goto out_free; | 101 | goto out_free; |
117 | 102 | ||
118 | mm->nr_ptes--; | 103 | mm->nr_ptes--; |
104 | } | ||
105 | mm_id->stack = stack; | ||
119 | 106 | ||
107 | if(proc_mm){ | ||
108 | if((cur_mm != NULL) && (cur_mm != &init_mm)) | ||
109 | from = cur_mm_id->u.mm_fd; | ||
110 | else from = -1; | ||
111 | |||
112 | ret = new_mm(from, stack); | ||
113 | if(ret < 0){ | ||
114 | printk("init_new_context_skas - new_mm failed, " | ||
115 | "errno = %d\n", ret); | ||
116 | goto out_free; | ||
117 | } | ||
118 | mm_id->u.mm_fd = ret; | ||
119 | } | ||
120 | else { | ||
120 | if((cur_mm != NULL) && (cur_mm != &init_mm)) | 121 | if((cur_mm != NULL) && (cur_mm != &init_mm)) |
121 | mm_id->u.pid = copy_context_skas0(stack, | 122 | mm_id->u.pid = copy_context_skas0(stack, |
122 | cur_mm_id->u.pid); | 123 | cur_mm_id->u.pid); |
@@ -126,7 +127,8 @@ int init_new_context_skas(struct task_struct *task, struct mm_struct *mm) | |||
126 | return 0; | 127 | return 0; |
127 | 128 | ||
128 | out_free: | 129 | out_free: |
129 | free_page(mm_id->stack); | 130 | if(mm_id->stack != 0) |
131 | free_page(mm_id->stack); | ||
130 | out: | 132 | out: |
131 | return ret; | 133 | return ret; |
132 | } | 134 | } |
@@ -137,8 +139,10 @@ void destroy_context_skas(struct mm_struct *mm) | |||
137 | 139 | ||
138 | if(proc_mm) | 140 | if(proc_mm) |
139 | os_close_file(mmu->id.u.mm_fd); | 141 | os_close_file(mmu->id.u.mm_fd); |
140 | else { | 142 | else |
141 | os_kill_ptraced_process(mmu->id.u.pid, 1); | 143 | os_kill_ptraced_process(mmu->id.u.pid, 1); |
144 | |||
145 | if(!proc_mm || !ptrace_faultinfo){ | ||
142 | free_page(mmu->id.stack); | 146 | free_page(mmu->id.stack); |
143 | free_page(mmu->last_page_table); | 147 | free_page(mmu->last_page_table); |
144 | } | 148 | } |