aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tls.c
diff options
context:
space:
mode:
authorRoland McGrath <roland@redhat.com>2008-01-30 07:30:46 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:30:46 -0500
commitefd1ca52d04d2f6df337a3332cee56cd60e6d4c4 (patch)
treecf1e630d25cc45f399388f5fc996d86cf3bcf9ff /arch/x86/kernel/tls.c
parent13abd0e50433092c41551bc13c32268028b6d663 (diff)
x86: TLS cleanup
This consolidates the four different places that implemented the same encoding magic for the GDT-slot 32-bit TLS support. The old tls32.c was renamed and is now only slightly modified to be the shared implementation. Signed-off-by: Roland McGrath <roland@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Zachary Amsden <zach@vmware.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/tls.c')
-rw-r--r--arch/x86/kernel/tls.c96
1 files changed, 37 insertions, 59 deletions
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 5291596f19b0..67a377621b12 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -19,31 +19,34 @@ static int get_free_idx(void)
19 int idx; 19 int idx;
20 20
21 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) 21 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
22 if (desc_empty((struct n_desc_struct *)(t->tls_array) + idx)) 22 if (desc_empty(&t->tls_array[idx]))
23 return idx + GDT_ENTRY_TLS_MIN; 23 return idx + GDT_ENTRY_TLS_MIN;
24 return -ESRCH; 24 return -ESRCH;
25} 25}
26 26
27/* 27/*
28 * Set a given TLS descriptor: 28 * Set a given TLS descriptor:
29 * When you want addresses > 32bit use arch_prctl()
30 */ 29 */
31int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info) 30int do_set_thread_area(struct task_struct *p, int idx,
31 struct user_desc __user *u_info,
32 int can_allocate)
32{ 33{
34 struct thread_struct *t = &p->thread;
33 struct user_desc info; 35 struct user_desc info;
34 struct n_desc_struct *desc; 36 u32 *desc;
35 int cpu, idx; 37 int cpu;
36 38
37 if (copy_from_user(&info, u_info, sizeof(info))) 39 if (copy_from_user(&info, u_info, sizeof(info)))
38 return -EFAULT; 40 return -EFAULT;
39 41
40 idx = info.entry_number; 42 if (idx == -1)
43 idx = info.entry_number;
41 44
42 /* 45 /*
43 * index -1 means the kernel should try to find and 46 * index -1 means the kernel should try to find and
44 * allocate an empty descriptor: 47 * allocate an empty descriptor:
45 */ 48 */
46 if (idx == -1) { 49 if (idx == -1 && can_allocate) {
47 idx = get_free_idx(); 50 idx = get_free_idx();
48 if (idx < 0) 51 if (idx < 0)
49 return idx; 52 return idx;
@@ -54,7 +57,7 @@ int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
54 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) 57 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
55 return -EINVAL; 58 return -EINVAL;
56 59
57 desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN; 60 desc = (u32 *) &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
58 61
59 /* 62 /*
60 * We must not get preempted while modifying the TLS. 63 * We must not get preempted while modifying the TLS.
@@ -62,11 +65,11 @@ int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
62 cpu = get_cpu(); 65 cpu = get_cpu();
63 66
64 if (LDT_empty(&info)) { 67 if (LDT_empty(&info)) {
65 desc->a = 0; 68 desc[0] = 0;
66 desc->b = 0; 69 desc[1] = 0;
67 } else { 70 } else {
68 desc->a = LDT_entry_a(&info); 71 desc[0] = LDT_entry_a(&info);
69 desc->b = LDT_entry_b(&info); 72 desc[1] = LDT_entry_b(&info);
70 } 73 }
71 if (t == &current->thread) 74 if (t == &current->thread)
72 load_TLS(t, cpu); 75 load_TLS(t, cpu);
@@ -75,9 +78,9 @@ int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
75 return 0; 78 return 0;
76} 79}
77 80
78asmlinkage long sys32_set_thread_area(struct user_desc __user *u_info) 81asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
79{ 82{
80 return do_set_thread_area(&current->thread, u_info); 83 return do_set_thread_area(current, -1, u_info, 1);
81} 84}
82 85
83 86
@@ -85,34 +88,32 @@ asmlinkage long sys32_set_thread_area(struct user_desc __user *u_info)
85 * Get the current Thread-Local Storage area: 88 * Get the current Thread-Local Storage area:
86 */ 89 */
87 90
88#define GET_LIMIT(desc) ( \ 91#define GET_LIMIT(desc) (((desc)[0] & 0x0ffff) | ((desc)[1] & 0xf0000))
89 ((desc)->a & 0x0ffff) | \ 92#define GET_32BIT(desc) (((desc)[1] >> 22) & 1)
90 ((desc)->b & 0xf0000) ) 93#define GET_CONTENTS(desc) (((desc)[1] >> 10) & 3)
91 94#define GET_WRITABLE(desc) (((desc)[1] >> 9) & 1)
92#define GET_32BIT(desc) (((desc)->b >> 22) & 1) 95#define GET_LIMIT_PAGES(desc) (((desc)[1] >> 23) & 1)
93#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3) 96#define GET_PRESENT(desc) (((desc)[1] >> 15) & 1)
94#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1) 97#define GET_USEABLE(desc) (((desc)[1] >> 20) & 1)
95#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1) 98#define GET_LONGMODE(desc) (((desc)[1] >> 21) & 1)
96#define GET_PRESENT(desc) (((desc)->b >> 15) & 1) 99
97#define GET_USEABLE(desc) (((desc)->b >> 20) & 1) 100int do_get_thread_area(struct task_struct *p, int idx,
98#define GET_LONGMODE(desc) (((desc)->b >> 21) & 1) 101 struct user_desc __user *u_info)
99
100int do_get_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
101{ 102{
103 struct thread_struct *t = &p->thread;
102 struct user_desc info; 104 struct user_desc info;
103 struct n_desc_struct *desc; 105 u32 *desc;
104 int idx;
105 106
106 if (get_user(idx, &u_info->entry_number)) 107 if (idx == -1 && get_user(idx, &u_info->entry_number))
107 return -EFAULT; 108 return -EFAULT;
108 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) 109 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
109 return -EINVAL; 110 return -EINVAL;
110 111
111 desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN; 112 desc = (u32 *) &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
112 113
113 memset(&info, 0, sizeof(struct user_desc)); 114 memset(&info, 0, sizeof(struct user_desc));
114 info.entry_number = idx; 115 info.entry_number = idx;
115 info.base_addr = get_desc_base(desc); 116 info.base_addr = get_desc_base((void *)desc);
116 info.limit = GET_LIMIT(desc); 117 info.limit = GET_LIMIT(desc);
117 info.seg_32bit = GET_32BIT(desc); 118 info.seg_32bit = GET_32BIT(desc);
118 info.contents = GET_CONTENTS(desc); 119 info.contents = GET_CONTENTS(desc);
@@ -120,39 +121,16 @@ int do_get_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
120 info.limit_in_pages = GET_LIMIT_PAGES(desc); 121 info.limit_in_pages = GET_LIMIT_PAGES(desc);
121 info.seg_not_present = !GET_PRESENT(desc); 122 info.seg_not_present = !GET_PRESENT(desc);
122 info.useable = GET_USEABLE(desc); 123 info.useable = GET_USEABLE(desc);
124#ifdef CONFIG_X86_64
123 info.lm = GET_LONGMODE(desc); 125 info.lm = GET_LONGMODE(desc);
126#endif
124 127
125 if (copy_to_user(u_info, &info, sizeof(info))) 128 if (copy_to_user(u_info, &info, sizeof(info)))
126 return -EFAULT; 129 return -EFAULT;
127 return 0; 130 return 0;
128} 131}
129 132
130asmlinkage long sys32_get_thread_area(struct user_desc __user *u_info) 133asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
131{ 134{
132 return do_get_thread_area(&current->thread, u_info); 135 return do_get_thread_area(current, -1, u_info);
133}
134
135
136int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs)
137{
138 struct n_desc_struct *desc;
139 struct user_desc info;
140 struct user_desc __user *cp;
141 int idx;
142
143 cp = (void __user *)childregs->rsi;
144 if (copy_from_user(&info, cp, sizeof(info)))
145 return -EFAULT;
146 if (LDT_empty(&info))
147 return -EINVAL;
148
149 idx = info.entry_number;
150 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
151 return -EINVAL;
152
153 desc = (struct n_desc_struct *)(p->thread.tls_array) + idx - GDT_ENTRY_TLS_MIN;
154 desc->a = LDT_entry_a(&info);
155 desc->b = LDT_entry_b(&info);
156
157 return 0;
158} 136}