aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorRoland McGrath <roland@redhat.com>2008-01-30 07:30:46 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:30:46 -0500
commitefd1ca52d04d2f6df337a3332cee56cd60e6d4c4 (patch)
treecf1e630d25cc45f399388f5fc996d86cf3bcf9ff /arch/x86
parent13abd0e50433092c41551bc13c32268028b6d663 (diff)
x86: TLS cleanup
This consolidates the four different places that implemented the same encoding magic for the GDT-slot 32-bit TLS support. The old tls32.c was renamed and is now only slightly modified to be the shared implementation. Signed-off-by: Roland McGrath <roland@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Zachary Amsden <zach@vmware.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/ia32/ia32entry.S4
-rw-r--r--arch/x86/kernel/Makefile_321
-rw-r--r--arch/x86/kernel/process_32.c141
-rw-r--r--arch/x86/kernel/process_64.c3
-rw-r--r--arch/x86/kernel/ptrace_32.c91
-rw-r--r--arch/x86/kernel/ptrace_64.c26
-rw-r--r--arch/x86/kernel/tls.c96
7 files changed, 66 insertions, 296 deletions
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 2499a324feaa..0db0a6291bbd 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -643,8 +643,8 @@ ia32_sys_call_table:
643 .quad compat_sys_futex /* 240 */ 643 .quad compat_sys_futex /* 240 */
644 .quad compat_sys_sched_setaffinity 644 .quad compat_sys_sched_setaffinity
645 .quad compat_sys_sched_getaffinity 645 .quad compat_sys_sched_getaffinity
646 .quad sys32_set_thread_area 646 .quad sys_set_thread_area
647 .quad sys32_get_thread_area 647 .quad sys_get_thread_area
648 .quad compat_sys_io_setup /* 245 */ 648 .quad compat_sys_io_setup /* 245 */
649 .quad sys_io_destroy 649 .quad sys_io_destroy
650 .quad compat_sys_io_getevents 650 .quad compat_sys_io_getevents
diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32
index 2c9596b9349c..9a6577a746ba 100644
--- a/arch/x86/kernel/Makefile_32
+++ b/arch/x86/kernel/Makefile_32
@@ -10,6 +10,7 @@ obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \
10 pci-dma_32.o i386_ksyms_32.o i387_32.o bootflag.o e820_32.o\ 10 pci-dma_32.o i386_ksyms_32.o i387_32.o bootflag.o e820_32.o\
11 quirks.o i8237.o topology.o alternative.o i8253.o tsc_32.o io_delay.o rtc.o 11 quirks.o i8237.o topology.o alternative.o i8253.o tsc_32.o io_delay.o rtc.o
12 12
13obj-y += tls.o
13obj-$(CONFIG_STACKTRACE) += stacktrace.o 14obj-$(CONFIG_STACKTRACE) += stacktrace.o
14obj-y += cpu/ 15obj-y += cpu/
15obj-y += acpi/ 16obj-y += acpi/
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 631af167bc51..4d66a56280d3 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -501,32 +501,15 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
501 set_tsk_thread_flag(p, TIF_IO_BITMAP); 501 set_tsk_thread_flag(p, TIF_IO_BITMAP);
502 } 502 }
503 503
504 err = 0;
505
504 /* 506 /*
505 * Set a new TLS for the child thread? 507 * Set a new TLS for the child thread?
506 */ 508 */
507 if (clone_flags & CLONE_SETTLS) { 509 if (clone_flags & CLONE_SETTLS)
508 struct desc_struct *desc; 510 err = do_set_thread_area(p, -1,
509 struct user_desc info; 511 (struct user_desc __user *)childregs->esi, 0);
510 int idx;
511
512 err = -EFAULT;
513 if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
514 goto out;
515 err = -EINVAL;
516 if (LDT_empty(&info))
517 goto out;
518
519 idx = info.entry_number;
520 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
521 goto out;
522
523 desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
524 desc->a = LDT_entry_a(&info);
525 desc->b = LDT_entry_b(&info);
526 }
527 512
528 err = 0;
529 out:
530 if (err && p->thread.io_bitmap_ptr) { 513 if (err && p->thread.io_bitmap_ptr) {
531 kfree(p->thread.io_bitmap_ptr); 514 kfree(p->thread.io_bitmap_ptr);
532 p->thread.io_bitmap_max = 0; 515 p->thread.io_bitmap_max = 0;
@@ -872,120 +855,6 @@ unsigned long get_wchan(struct task_struct *p)
872 return 0; 855 return 0;
873} 856}
874 857
875/*
876 * sys_alloc_thread_area: get a yet unused TLS descriptor index.
877 */
878static int get_free_idx(void)
879{
880 struct thread_struct *t = &current->thread;
881 int idx;
882
883 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
884 if (desc_empty(t->tls_array + idx))
885 return idx + GDT_ENTRY_TLS_MIN;
886 return -ESRCH;
887}
888
889/*
890 * Set a given TLS descriptor:
891 */
892asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
893{
894 struct thread_struct *t = &current->thread;
895 struct user_desc info;
896 struct desc_struct *desc;
897 int cpu, idx;
898
899 if (copy_from_user(&info, u_info, sizeof(info)))
900 return -EFAULT;
901 idx = info.entry_number;
902
903 /*
904 * index -1 means the kernel should try to find and
905 * allocate an empty descriptor:
906 */
907 if (idx == -1) {
908 idx = get_free_idx();
909 if (idx < 0)
910 return idx;
911 if (put_user(idx, &u_info->entry_number))
912 return -EFAULT;
913 }
914
915 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
916 return -EINVAL;
917
918 desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
919
920 /*
921 * We must not get preempted while modifying the TLS.
922 */
923 cpu = get_cpu();
924
925 if (LDT_empty(&info)) {
926 desc->a = 0;
927 desc->b = 0;
928 } else {
929 desc->a = LDT_entry_a(&info);
930 desc->b = LDT_entry_b(&info);
931 }
932 load_TLS(t, cpu);
933
934 put_cpu();
935
936 return 0;
937}
938
939/*
940 * Get the current Thread-Local Storage area:
941 */
942
943#define GET_BASE(desc) ( \
944 (((desc)->a >> 16) & 0x0000ffff) | \
945 (((desc)->b << 16) & 0x00ff0000) | \
946 ( (desc)->b & 0xff000000) )
947
948#define GET_LIMIT(desc) ( \
949 ((desc)->a & 0x0ffff) | \
950 ((desc)->b & 0xf0000) )
951
952#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
953#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
954#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
955#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
956#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
957#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
958
959asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
960{
961 struct user_desc info;
962 struct desc_struct *desc;
963 int idx;
964
965 if (get_user(idx, &u_info->entry_number))
966 return -EFAULT;
967 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
968 return -EINVAL;
969
970 memset(&info, 0, sizeof(info));
971
972 desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
973
974 info.entry_number = idx;
975 info.base_addr = GET_BASE(desc);
976 info.limit = GET_LIMIT(desc);
977 info.seg_32bit = GET_32BIT(desc);
978 info.contents = GET_CONTENTS(desc);
979 info.read_exec_only = !GET_WRITABLE(desc);
980 info.limit_in_pages = GET_LIMIT_PAGES(desc);
981 info.seg_not_present = !GET_PRESENT(desc);
982 info.useable = GET_USEABLE(desc);
983
984 if (copy_to_user(u_info, &info, sizeof(info)))
985 return -EFAULT;
986 return 0;
987}
988
989unsigned long arch_align_stack(unsigned long sp) 858unsigned long arch_align_stack(unsigned long sp)
990{ 859{
991 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 860 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 9ea1d7546f80..ccc9d68d5a58 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -524,7 +524,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
524 if (clone_flags & CLONE_SETTLS) { 524 if (clone_flags & CLONE_SETTLS) {
525#ifdef CONFIG_IA32_EMULATION 525#ifdef CONFIG_IA32_EMULATION
526 if (test_thread_flag(TIF_IA32)) 526 if (test_thread_flag(TIF_IA32))
527 err = ia32_child_tls(p, childregs); 527 err = do_set_thread_area(p, -1,
528 (struct user_desc __user *)childregs->rsi, 0);
528 else 529 else
529#endif 530#endif
530 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 531 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
diff --git a/arch/x86/kernel/ptrace_32.c b/arch/x86/kernel/ptrace_32.c
index ff5431cc03ee..09227cfb7c4c 100644
--- a/arch/x86/kernel/ptrace_32.c
+++ b/arch/x86/kernel/ptrace_32.c
@@ -276,85 +276,6 @@ void ptrace_disable(struct task_struct *child)
276 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 276 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
277} 277}
278 278
279/*
280 * Perform get_thread_area on behalf of the traced child.
281 */
282static int
283ptrace_get_thread_area(struct task_struct *child,
284 int idx, struct user_desc __user *user_desc)
285{
286 struct user_desc info;
287 struct desc_struct *desc;
288
289/*
290 * Get the current Thread-Local Storage area:
291 */
292
293#define GET_BASE(desc) ( \
294 (((desc)->a >> 16) & 0x0000ffff) | \
295 (((desc)->b << 16) & 0x00ff0000) | \
296 ( (desc)->b & 0xff000000) )
297
298#define GET_LIMIT(desc) ( \
299 ((desc)->a & 0x0ffff) | \
300 ((desc)->b & 0xf0000) )
301
302#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
303#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
304#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
305#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
306#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
307#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
308
309 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
310 return -EINVAL;
311
312 desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
313
314 info.entry_number = idx;
315 info.base_addr = GET_BASE(desc);
316 info.limit = GET_LIMIT(desc);
317 info.seg_32bit = GET_32BIT(desc);
318 info.contents = GET_CONTENTS(desc);
319 info.read_exec_only = !GET_WRITABLE(desc);
320 info.limit_in_pages = GET_LIMIT_PAGES(desc);
321 info.seg_not_present = !GET_PRESENT(desc);
322 info.useable = GET_USEABLE(desc);
323
324 if (copy_to_user(user_desc, &info, sizeof(info)))
325 return -EFAULT;
326
327 return 0;
328}
329
330/*
331 * Perform set_thread_area on behalf of the traced child.
332 */
333static int
334ptrace_set_thread_area(struct task_struct *child,
335 int idx, struct user_desc __user *user_desc)
336{
337 struct user_desc info;
338 struct desc_struct *desc;
339
340 if (copy_from_user(&info, user_desc, sizeof(info)))
341 return -EFAULT;
342
343 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
344 return -EINVAL;
345
346 desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
347 if (LDT_empty(&info)) {
348 desc->a = 0;
349 desc->b = 0;
350 } else {
351 desc->a = LDT_entry_a(&info);
352 desc->b = LDT_entry_b(&info);
353 }
354
355 return 0;
356}
357
358long arch_ptrace(struct task_struct *child, long request, long addr, long data) 279long arch_ptrace(struct task_struct *child, long request, long addr, long data)
359{ 280{
360 struct user * dummy = NULL; 281 struct user * dummy = NULL;
@@ -601,13 +522,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
601 } 522 }
602 523
603 case PTRACE_GET_THREAD_AREA: 524 case PTRACE_GET_THREAD_AREA:
604 ret = ptrace_get_thread_area(child, addr, 525 if (addr < 0)
605 (struct user_desc __user *) data); 526 return -EIO;
527 ret = do_get_thread_area(child, addr,
528 (struct user_desc __user *) data);
606 break; 529 break;
607 530
608 case PTRACE_SET_THREAD_AREA: 531 case PTRACE_SET_THREAD_AREA:
609 ret = ptrace_set_thread_area(child, addr, 532 if (addr < 0)
610 (struct user_desc __user *) data); 533 return -EIO;
534 ret = do_set_thread_area(child, addr,
535 (struct user_desc __user *) data, 0);
611 break; 536 break;
612 537
613 default: 538 default:
diff --git a/arch/x86/kernel/ptrace_64.c b/arch/x86/kernel/ptrace_64.c
index 1edece36044c..375fadc23a25 100644
--- a/arch/x86/kernel/ptrace_64.c
+++ b/arch/x86/kernel/ptrace_64.c
@@ -474,23 +474,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
474 64bit debugger to fully examine them too. Better 474 64bit debugger to fully examine them too. Better
475 don't use it against 64bit processes, use 475 don't use it against 64bit processes, use
476 PTRACE_ARCH_PRCTL instead. */ 476 PTRACE_ARCH_PRCTL instead. */
477 case PTRACE_SET_THREAD_AREA: {
478 struct user_desc __user *p;
479 int old;
480 p = (struct user_desc __user *)data;
481 get_user(old, &p->entry_number);
482 put_user(addr, &p->entry_number);
483 ret = do_set_thread_area(&child->thread, p);
484 put_user(old, &p->entry_number);
485 break;
486 case PTRACE_GET_THREAD_AREA: 477 case PTRACE_GET_THREAD_AREA:
487 p = (struct user_desc __user *)data; 478 if (addr < 0)
488 get_user(old, &p->entry_number); 479 return -EIO;
489 put_user(addr, &p->entry_number); 480 ret = do_get_thread_area(child, addr,
490 ret = do_get_thread_area(&child->thread, p); 481 (struct user_desc __user *) data);
491 put_user(old, &p->entry_number); 482
483 break;
484 case PTRACE_SET_THREAD_AREA:
485 if (addr < 0)
486 return -EIO;
487 ret = do_set_thread_area(child, addr,
488 (struct user_desc __user *) data, 0);
492 break; 489 break;
493 }
494#endif 490#endif
495 /* normal 64bit interface to access TLS data. 491 /* normal 64bit interface to access TLS data.
496 Works just like arch_prctl, except that the arguments 492 Works just like arch_prctl, except that the arguments
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 5291596f19b0..67a377621b12 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -19,31 +19,34 @@ static int get_free_idx(void)
19 int idx; 19 int idx;
20 20
21 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) 21 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
22 if (desc_empty((struct n_desc_struct *)(t->tls_array) + idx)) 22 if (desc_empty(&t->tls_array[idx]))
23 return idx + GDT_ENTRY_TLS_MIN; 23 return idx + GDT_ENTRY_TLS_MIN;
24 return -ESRCH; 24 return -ESRCH;
25} 25}
26 26
27/* 27/*
28 * Set a given TLS descriptor: 28 * Set a given TLS descriptor:
29 * When you want addresses > 32bit use arch_prctl()
30 */ 29 */
31int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info) 30int do_set_thread_area(struct task_struct *p, int idx,
31 struct user_desc __user *u_info,
32 int can_allocate)
32{ 33{
34 struct thread_struct *t = &p->thread;
33 struct user_desc info; 35 struct user_desc info;
34 struct n_desc_struct *desc; 36 u32 *desc;
35 int cpu, idx; 37 int cpu;
36 38
37 if (copy_from_user(&info, u_info, sizeof(info))) 39 if (copy_from_user(&info, u_info, sizeof(info)))
38 return -EFAULT; 40 return -EFAULT;
39 41
40 idx = info.entry_number; 42 if (idx == -1)
43 idx = info.entry_number;
41 44
42 /* 45 /*
43 * index -1 means the kernel should try to find and 46 * index -1 means the kernel should try to find and
44 * allocate an empty descriptor: 47 * allocate an empty descriptor:
45 */ 48 */
46 if (idx == -1) { 49 if (idx == -1 && can_allocate) {
47 idx = get_free_idx(); 50 idx = get_free_idx();
48 if (idx < 0) 51 if (idx < 0)
49 return idx; 52 return idx;
@@ -54,7 +57,7 @@ int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
54 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) 57 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
55 return -EINVAL; 58 return -EINVAL;
56 59
57 desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN; 60 desc = (u32 *) &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
58 61
59 /* 62 /*
60 * We must not get preempted while modifying the TLS. 63 * We must not get preempted while modifying the TLS.
@@ -62,11 +65,11 @@ int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
62 cpu = get_cpu(); 65 cpu = get_cpu();
63 66
64 if (LDT_empty(&info)) { 67 if (LDT_empty(&info)) {
65 desc->a = 0; 68 desc[0] = 0;
66 desc->b = 0; 69 desc[1] = 0;
67 } else { 70 } else {
68 desc->a = LDT_entry_a(&info); 71 desc[0] = LDT_entry_a(&info);
69 desc->b = LDT_entry_b(&info); 72 desc[1] = LDT_entry_b(&info);
70 } 73 }
71 if (t == &current->thread) 74 if (t == &current->thread)
72 load_TLS(t, cpu); 75 load_TLS(t, cpu);
@@ -75,9 +78,9 @@ int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
75 return 0; 78 return 0;
76} 79}
77 80
78asmlinkage long sys32_set_thread_area(struct user_desc __user *u_info) 81asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
79{ 82{
80 return do_set_thread_area(&current->thread, u_info); 83 return do_set_thread_area(current, -1, u_info, 1);
81} 84}
82 85
83 86
@@ -85,34 +88,32 @@ asmlinkage long sys32_set_thread_area(struct user_desc __user *u_info)
85 * Get the current Thread-Local Storage area: 88 * Get the current Thread-Local Storage area:
86 */ 89 */
87 90
88#define GET_LIMIT(desc) ( \ 91#define GET_LIMIT(desc) (((desc)[0] & 0x0ffff) | ((desc)[1] & 0xf0000))
89 ((desc)->a & 0x0ffff) | \ 92#define GET_32BIT(desc) (((desc)[1] >> 22) & 1)
90 ((desc)->b & 0xf0000) ) 93#define GET_CONTENTS(desc) (((desc)[1] >> 10) & 3)
91 94#define GET_WRITABLE(desc) (((desc)[1] >> 9) & 1)
92#define GET_32BIT(desc) (((desc)->b >> 22) & 1) 95#define GET_LIMIT_PAGES(desc) (((desc)[1] >> 23) & 1)
93#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3) 96#define GET_PRESENT(desc) (((desc)[1] >> 15) & 1)
94#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1) 97#define GET_USEABLE(desc) (((desc)[1] >> 20) & 1)
95#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1) 98#define GET_LONGMODE(desc) (((desc)[1] >> 21) & 1)
96#define GET_PRESENT(desc) (((desc)->b >> 15) & 1) 99
97#define GET_USEABLE(desc) (((desc)->b >> 20) & 1) 100int do_get_thread_area(struct task_struct *p, int idx,
98#define GET_LONGMODE(desc) (((desc)->b >> 21) & 1) 101 struct user_desc __user *u_info)
99
100int do_get_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
101{ 102{
103 struct thread_struct *t = &p->thread;
102 struct user_desc info; 104 struct user_desc info;
103 struct n_desc_struct *desc; 105 u32 *desc;
104 int idx;
105 106
106 if (get_user(idx, &u_info->entry_number)) 107 if (idx == -1 && get_user(idx, &u_info->entry_number))
107 return -EFAULT; 108 return -EFAULT;
108 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) 109 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
109 return -EINVAL; 110 return -EINVAL;
110 111
111 desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN; 112 desc = (u32 *) &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
112 113
113 memset(&info, 0, sizeof(struct user_desc)); 114 memset(&info, 0, sizeof(struct user_desc));
114 info.entry_number = idx; 115 info.entry_number = idx;
115 info.base_addr = get_desc_base(desc); 116 info.base_addr = get_desc_base((void *)desc);
116 info.limit = GET_LIMIT(desc); 117 info.limit = GET_LIMIT(desc);
117 info.seg_32bit = GET_32BIT(desc); 118 info.seg_32bit = GET_32BIT(desc);
118 info.contents = GET_CONTENTS(desc); 119 info.contents = GET_CONTENTS(desc);
@@ -120,39 +121,16 @@ int do_get_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
120 info.limit_in_pages = GET_LIMIT_PAGES(desc); 121 info.limit_in_pages = GET_LIMIT_PAGES(desc);
121 info.seg_not_present = !GET_PRESENT(desc); 122 info.seg_not_present = !GET_PRESENT(desc);
122 info.useable = GET_USEABLE(desc); 123 info.useable = GET_USEABLE(desc);
124#ifdef CONFIG_X86_64
123 info.lm = GET_LONGMODE(desc); 125 info.lm = GET_LONGMODE(desc);
126#endif
124 127
125 if (copy_to_user(u_info, &info, sizeof(info))) 128 if (copy_to_user(u_info, &info, sizeof(info)))
126 return -EFAULT; 129 return -EFAULT;
127 return 0; 130 return 0;
128} 131}
129 132
130asmlinkage long sys32_get_thread_area(struct user_desc __user *u_info) 133asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
131{ 134{
132 return do_get_thread_area(&current->thread, u_info); 135 return do_get_thread_area(current, -1, u_info);
133}
134
135
136int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs)
137{
138 struct n_desc_struct *desc;
139 struct user_desc info;
140 struct user_desc __user *cp;
141 int idx;
142
143 cp = (void __user *)childregs->rsi;
144 if (copy_from_user(&info, cp, sizeof(info)))
145 return -EFAULT;
146 if (LDT_empty(&info))
147 return -EINVAL;
148
149 idx = info.entry_number;
150 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
151 return -EINVAL;
152
153 desc = (struct n_desc_struct *)(p->thread.tls_array) + idx - GDT_ENTRY_TLS_MIN;
154 desc->a = LDT_entry_a(&info);
155 desc->b = LDT_entry_b(&info);
156
157 return 0;
158} 136}