aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/enlighten.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-02-09 08:17:40 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-09 18:42:00 -0500
commitccbeed3a05908d201b47b6c3dd1a373138bba566 (patch)
treee834d548c70426aa3885dd2169be1a79be4a617b /arch/x86/xen/enlighten.c
parentd9a89a26e02ef9ed03f74a755a8b4d8f3a066622 (diff)
x86: make lazy %gs optional on x86_32
Impact: pt_regs changed, lazy gs handling made optional, add slight overhead to SAVE_ALL, simplifies error_code path a bit On x86_32, %gs hasn't been used by kernel and handled lazily. pt_regs doesn't have place for it and gs is saved/loaded only when necessary. In preparation for stack protector support, this patch makes lazy %gs handling optional by doing the followings. * Add CONFIG_X86_32_LAZY_GS and place for gs in pt_regs. * Save and restore %gs along with other registers in entry_32.S unless LAZY_GS. Note that this unfortunately adds "pushl $0" on SAVE_ALL even when LAZY_GS. However, it adds no overhead to common exit path and simplifies entry path with error code. * Define different user_gs accessors depending on LAZY_GS and add lazy_save_gs() and lazy_load_gs() which are noop if !LAZY_GS. The lazy_*_gs() ops are used to save, load and clear %gs lazily. * Define ELF_CORE_COPY_KERNEL_REGS() which always read %gs directly. xen and lguest changes need to be verified. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/xen/enlighten.c')
-rw-r--r--arch/x86/xen/enlighten.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 37230342c2c4..95ff6a0e942a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -323,13 +323,14 @@ static void load_TLS_descriptor(struct thread_struct *t,
323static void xen_load_tls(struct thread_struct *t, unsigned int cpu) 323static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
324{ 324{
325 /* 325 /*
326 * XXX sleazy hack: If we're being called in a lazy-cpu zone, 326 * XXX sleazy hack: If we're being called in a lazy-cpu zone
327 * it means we're in a context switch, and %gs has just been 327 * and lazy gs handling is enabled, it means we're in a
328 * saved. This means we can zero it out to prevent faults on 328 * context switch, and %gs has just been saved. This means we
329 * exit from the hypervisor if the next process has no %gs. 329 * can zero it out to prevent faults on exit from the
330 * Either way, it has been saved, and the new value will get 330 * hypervisor if the next process has no %gs. Either way, it
331 * loaded properly. This will go away as soon as Xen has been 331 * has been saved, and the new value will get loaded properly.
332 * modified to not save/restore %gs for normal hypercalls. 332 * This will go away as soon as Xen has been modified to not
333 * save/restore %gs for normal hypercalls.
333 * 334 *
334 * On x86_64, this hack is not used for %gs, because gs points 335 * On x86_64, this hack is not used for %gs, because gs points
335 * to KERNEL_GS_BASE (and uses it for PDA references), so we 336 * to KERNEL_GS_BASE (and uses it for PDA references), so we
@@ -341,7 +342,7 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
341 */ 342 */
342 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) { 343 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
343#ifdef CONFIG_X86_32 344#ifdef CONFIG_X86_32
344 loadsegment(gs, 0); 345 lazy_load_gs(0);
345#else 346#else
346 loadsegment(fs, 0); 347 loadsegment(fs, 0);
347#endif 348#endif