aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/sys-i386/tls.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um/sys-i386/tls.c')
-rw-r--r--arch/um/sys-i386/tls.c74
1 files changed, 44 insertions, 30 deletions
diff --git a/arch/um/sys-i386/tls.c b/arch/um/sys-i386/tls.c
index 6cb7cbd137a0..b02266ab5c55 100644
--- a/arch/um/sys-i386/tls.c
+++ b/arch/um/sys-i386/tls.c
@@ -3,19 +3,12 @@
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#include "linux/kernel.h" 6#include "linux/percpu.h"
7#include "linux/sched.h" 7#include "linux/sched.h"
8#include "linux/slab.h"
9#include "linux/types.h"
10#include "asm/uaccess.h" 8#include "asm/uaccess.h"
11#include "asm/ptrace.h"
12#include "asm/segment.h"
13#include "asm/smp.h"
14#include "asm/desc.h"
15#include "kern.h"
16#include "kern_util.h"
17#include "os.h" 9#include "os.h"
18#include "skas.h" 10#include "skas.h"
11#include "sysdep/tls.h"
19 12
20/* 13/*
21 * If needed we can detect when it's uninitialized. 14 * If needed we can detect when it's uninitialized.
@@ -74,7 +67,8 @@ static inline void clear_user_desc(struct user_desc* info)
74 /* Postcondition: LDT_empty(info) returns true. */ 67 /* Postcondition: LDT_empty(info) returns true. */
75 memset(info, 0, sizeof(*info)); 68 memset(info, 0, sizeof(*info));
76 69
77 /* Check the LDT_empty or the i386 sys_get_thread_area code - we obtain 70 /*
71 * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
78 * indeed an empty user_desc. 72 * indeed an empty user_desc.
79 */ 73 */
80 info->read_exec_only = 1; 74 info->read_exec_only = 1;
@@ -89,10 +83,13 @@ static int load_TLS(int flags, struct task_struct *to)
89 int idx; 83 int idx;
90 84
91 for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) { 85 for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
92 struct uml_tls_struct* curr = &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN]; 86 struct uml_tls_struct* curr =
87 &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
93 88
94 /* Actually, now if it wasn't flushed it gets cleared and 89 /*
95 * flushed to the host, which will clear it.*/ 90 * Actually, now if it wasn't flushed it gets cleared and
91 * flushed to the host, which will clear it.
92 */
96 if (!curr->present) { 93 if (!curr->present) {
97 if (!curr->flushed) { 94 if (!curr->flushed) {
98 clear_user_desc(&curr->tls); 95 clear_user_desc(&curr->tls);
@@ -116,7 +113,8 @@ out:
116 return ret; 113 return ret;
117} 114}
118 115
119/* Verify if we need to do a flush for the new process, i.e. if there are any 116/*
117 * Verify if we need to do a flush for the new process, i.e. if there are any
120 * present desc's, only if they haven't been flushed. 118 * present desc's, only if they haven't been flushed.
121 */ 119 */
122static inline int needs_TLS_update(struct task_struct *task) 120static inline int needs_TLS_update(struct task_struct *task)
@@ -125,10 +123,13 @@ static inline int needs_TLS_update(struct task_struct *task)
125 int ret = 0; 123 int ret = 0;
126 124
127 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) { 125 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
128 struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; 126 struct uml_tls_struct* curr =
127 &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
129 128
130 /* Can't test curr->present, we may need to clear a descriptor 129 /*
131 * which had a value. */ 130 * Can't test curr->present, we may need to clear a descriptor
131 * which had a value.
132 */
132 if (curr->flushed) 133 if (curr->flushed)
133 continue; 134 continue;
134 ret = 1; 135 ret = 1;
@@ -137,7 +138,8 @@ static inline int needs_TLS_update(struct task_struct *task)
137 return ret; 138 return ret;
138} 139}
139 140
140/* On a newly forked process, the TLS descriptors haven't yet been flushed. So 141/*
142 * On a newly forked process, the TLS descriptors haven't yet been flushed. So
141 * we mark them as such and the first switch_to will do the job. 143 * we mark them as such and the first switch_to will do the job.
142 */ 144 */
143void clear_flushed_tls(struct task_struct *task) 145void clear_flushed_tls(struct task_struct *task)
@@ -145,10 +147,13 @@ void clear_flushed_tls(struct task_struct *task)
145 int i; 147 int i;
146 148
147 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) { 149 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
148 struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; 150 struct uml_tls_struct* curr =
151 &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
149 152
150 /* Still correct to do this, if it wasn't present on the host it 153 /*
151 * will remain as flushed as it was. */ 154 * Still correct to do this, if it wasn't present on the host it
155 * will remain as flushed as it was.
156 */
152 if (!curr->present) 157 if (!curr->present)
153 continue; 158 continue;
154 159
@@ -156,23 +161,27 @@ void clear_flushed_tls(struct task_struct *task)
156 } 161 }
157} 162}
158 163
159/* In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a 164/*
165 * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
160 * common host process. So this is needed in SKAS0 too. 166 * common host process. So this is needed in SKAS0 too.
161 * 167 *
162 * However, if each thread had a different host process (and this was discussed 168 * However, if each thread had a different host process (and this was discussed
163 * for SMP support) this won't be needed. 169 * for SMP support) this won't be needed.
164 * 170 *
165 * And this will not need be used when (and if) we'll add support to the host 171 * And this will not need be used when (and if) we'll add support to the host
166 * SKAS patch. */ 172 * SKAS patch.
173 */
167 174
168int arch_switch_tls(struct task_struct *from, struct task_struct *to) 175int arch_switch_tls(struct task_struct *from, struct task_struct *to)
169{ 176{
170 if (!host_supports_tls) 177 if (!host_supports_tls)
171 return 0; 178 return 0;
172 179
173 /* We have no need whatsoever to switch TLS for kernel threads; beyond 180 /*
181 * We have no need whatsoever to switch TLS for kernel threads; beyond
174 * that, that would also result in us calling os_set_thread_area with 182 * that, that would also result in us calling os_set_thread_area with
175 * userspace_pid[cpu] == 0, which gives an error. */ 183 * userspace_pid[cpu] == 0, which gives an error.
184 */
176 if (likely(to->mm)) 185 if (likely(to->mm))
177 return load_TLS(O_FORCE, to); 186 return load_TLS(O_FORCE, to);
178 187
@@ -232,17 +241,20 @@ static int get_tls_entry(struct task_struct* task, struct user_desc *info, int i
232 *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls; 241 *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
233 242
234out: 243out:
235 /* Temporary debugging check, to make sure that things have been 244 /*
245 * Temporary debugging check, to make sure that things have been
236 * flushed. This could be triggered if load_TLS() failed. 246 * flushed. This could be triggered if load_TLS() failed.
237 */ 247 */
238 if (unlikely(task == current && !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) { 248 if (unlikely(task == current &&
249 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
239 printk(KERN_ERR "get_tls_entry: task with pid %d got here " 250 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
240 "without flushed TLS.", current->pid); 251 "without flushed TLS.", current->pid);
241 } 252 }
242 253
243 return 0; 254 return 0;
244clear: 255clear:
245 /* When the TLS entry has not been set, the values read to user in the 256 /*
257 * When the TLS entry has not been set, the values read to user in the
246 * tls_array are 0 (because it's cleared at boot, see 258 * tls_array are 0 (because it's cleared at boot, see
247 * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that. 259 * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
248 */ 260 */
@@ -344,8 +356,10 @@ out:
344} 356}
345 357
346 358
347/* XXX: This part is probably common to i386 and x86-64. Don't create a common 359/*
348 * file for now, do that when implementing x86-64 support.*/ 360 * XXX: This part is probably common to i386 and x86-64. Don't create a common
361 * file for now, do that when implementing x86-64 support.
362 */
349static int __init __setup_host_supports_tls(void) 363static int __init __setup_host_supports_tls(void)
350{ 364{
351 check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min); 365 check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);