diff options
Diffstat (limited to 'arch/i386/kernel/vm86.c')
-rw-r--r-- | arch/i386/kernel/vm86.c | 33 |
1 files changed, 17 insertions, 16 deletions
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c index be2f96e67f78..d1b8f2b7aea6 100644 --- a/arch/i386/kernel/vm86.c +++ b/arch/i386/kernel/vm86.c | |||
@@ -96,12 +96,12 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user, | |||
96 | { | 96 | { |
97 | int ret = 0; | 97 | int ret = 0; |
98 | 98 | ||
99 | /* kernel_vm86_regs is missing xfs, so copy everything up to | 99 | /* kernel_vm86_regs is missing xgs, so copy everything up to |
100 | (but not including) xgs, and then rest after xgs. */ | 100 | (but not including) orig_eax, and then rest including orig_eax. */ |
101 | ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.xgs)); | 101 | ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_eax)); |
102 | ret += copy_to_user(&user->__null_gs, ®s->pt.xgs, | 102 | ret += copy_to_user(&user->orig_eax, ®s->pt.orig_eax, |
103 | sizeof(struct kernel_vm86_regs) - | 103 | sizeof(struct kernel_vm86_regs) - |
104 | offsetof(struct kernel_vm86_regs, pt.xgs)); | 104 | offsetof(struct kernel_vm86_regs, pt.orig_eax)); |
105 | 105 | ||
106 | return ret; | 106 | return ret; |
107 | } | 107 | } |
@@ -113,12 +113,13 @@ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, | |||
113 | { | 113 | { |
114 | int ret = 0; | 114 | int ret = 0; |
115 | 115 | ||
116 | ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.xgs)); | 116 | /* copy eax-xfs inclusive */ |
117 | ret += copy_from_user(®s->pt.xgs, &user->__null_gs, | 117 | ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_eax)); |
118 | /* copy orig_eax-__gsh+extra */ | ||
119 | ret += copy_from_user(®s->pt.orig_eax, &user->orig_eax, | ||
118 | sizeof(struct kernel_vm86_regs) - | 120 | sizeof(struct kernel_vm86_regs) - |
119 | offsetof(struct kernel_vm86_regs, pt.xgs) + | 121 | offsetof(struct kernel_vm86_regs, pt.orig_eax) + |
120 | extra); | 122 | extra); |
121 | |||
122 | return ret; | 123 | return ret; |
123 | } | 124 | } |
124 | 125 | ||
@@ -157,8 +158,8 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs) | |||
157 | 158 | ||
158 | ret = KVM86->regs32; | 159 | ret = KVM86->regs32; |
159 | 160 | ||
160 | loadsegment(fs, current->thread.saved_fs); | 161 | ret->xfs = current->thread.saved_fs; |
161 | ret->xgs = current->thread.saved_gs; | 162 | loadsegment(gs, current->thread.saved_gs); |
162 | 163 | ||
163 | return ret; | 164 | return ret; |
164 | } | 165 | } |
@@ -285,9 +286,9 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk | |||
285 | */ | 286 | */ |
286 | info->regs.pt.xds = 0; | 287 | info->regs.pt.xds = 0; |
287 | info->regs.pt.xes = 0; | 288 | info->regs.pt.xes = 0; |
288 | info->regs.pt.xgs = 0; | 289 | info->regs.pt.xfs = 0; |
289 | 290 | ||
290 | /* we are clearing fs later just before "jmp resume_userspace", | 291 | /* we are clearing gs later just before "jmp resume_userspace", |
291 | * because it is not saved/restored. | 292 | * because it is not saved/restored. |
292 | */ | 293 | */ |
293 | 294 | ||
@@ -321,8 +322,8 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk | |||
321 | */ | 322 | */ |
322 | info->regs32->eax = 0; | 323 | info->regs32->eax = 0; |
323 | tsk->thread.saved_esp0 = tsk->thread.esp0; | 324 | tsk->thread.saved_esp0 = tsk->thread.esp0; |
324 | savesegment(fs, tsk->thread.saved_fs); | 325 | tsk->thread.saved_fs = info->regs32->xfs; |
325 | tsk->thread.saved_gs = info->regs32->xgs; | 326 | savesegment(gs, tsk->thread.saved_gs); |
326 | 327 | ||
327 | tss = &per_cpu(init_tss, get_cpu()); | 328 | tss = &per_cpu(init_tss, get_cpu()); |
328 | tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; | 329 | tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; |
@@ -342,7 +343,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk | |||
342 | __asm__ __volatile__( | 343 | __asm__ __volatile__( |
343 | "movl %0,%%esp\n\t" | 344 | "movl %0,%%esp\n\t" |
344 | "movl %1,%%ebp\n\t" | 345 | "movl %1,%%ebp\n\t" |
345 | "mov %2, %%fs\n\t" | 346 | "mov %2, %%gs\n\t" |
346 | "jmp resume_userspace" | 347 | "jmp resume_userspace" |
347 | : /* no outputs */ | 348 | : /* no outputs */ |
348 | :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); | 349 | :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); |