diff options
Diffstat (limited to 'arch/x86/kernel/ldt_64.c')
-rw-r--r-- | arch/x86/kernel/ldt_64.c | 250 |
1 files changed, 0 insertions, 250 deletions
diff --git a/arch/x86/kernel/ldt_64.c b/arch/x86/kernel/ldt_64.c deleted file mode 100644 index 60e57abb8e90..000000000000 --- a/arch/x86/kernel/ldt_64.c +++ /dev/null | |||
@@ -1,250 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds | ||
3 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | ||
4 | * Copyright (C) 2002 Andi Kleen | ||
5 | * | ||
6 | * This handles calls from both 32bit and 64bit mode. | ||
7 | */ | ||
8 | |||
9 | #include <linux/errno.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/vmalloc.h> | ||
15 | #include <linux/slab.h> | ||
16 | |||
17 | #include <asm/uaccess.h> | ||
18 | #include <asm/system.h> | ||
19 | #include <asm/ldt.h> | ||
20 | #include <asm/desc.h> | ||
21 | #include <asm/proto.h> | ||
22 | |||
23 | #ifdef CONFIG_SMP /* avoids "defined but not used" warnig */ | ||
24 | static void flush_ldt(void *null) | ||
25 | { | ||
26 | if (current->active_mm) | ||
27 | load_LDT(¤t->active_mm->context); | ||
28 | } | ||
29 | #endif | ||
30 | |||
31 | static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload) | ||
32 | { | ||
33 | void *oldldt; | ||
34 | void *newldt; | ||
35 | unsigned oldsize; | ||
36 | |||
37 | if (mincount <= (unsigned)pc->size) | ||
38 | return 0; | ||
39 | oldsize = pc->size; | ||
40 | mincount = (mincount+511)&(~511); | ||
41 | if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE) | ||
42 | newldt = vmalloc(mincount*LDT_ENTRY_SIZE); | ||
43 | else | ||
44 | newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL); | ||
45 | |||
46 | if (!newldt) | ||
47 | return -ENOMEM; | ||
48 | |||
49 | if (oldsize) | ||
50 | memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE); | ||
51 | oldldt = pc->ldt; | ||
52 | memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE); | ||
53 | wmb(); | ||
54 | pc->ldt = newldt; | ||
55 | wmb(); | ||
56 | pc->size = mincount; | ||
57 | wmb(); | ||
58 | if (reload) { | ||
59 | #ifdef CONFIG_SMP | ||
60 | cpumask_t mask; | ||
61 | |||
62 | preempt_disable(); | ||
63 | mask = cpumask_of_cpu(smp_processor_id()); | ||
64 | load_LDT(pc); | ||
65 | if (!cpus_equal(current->mm->cpu_vm_mask, mask)) | ||
66 | smp_call_function(flush_ldt, NULL, 1, 1); | ||
67 | preempt_enable(); | ||
68 | #else | ||
69 | load_LDT(pc); | ||
70 | #endif | ||
71 | } | ||
72 | if (oldsize) { | ||
73 | if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE) | ||
74 | vfree(oldldt); | ||
75 | else | ||
76 | kfree(oldldt); | ||
77 | } | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | static inline int copy_ldt(mm_context_t *new, mm_context_t *old) | ||
82 | { | ||
83 | int err = alloc_ldt(new, old->size, 0); | ||
84 | if (err < 0) | ||
85 | return err; | ||
86 | memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE); | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * we do not have to muck with descriptors here, that is | ||
92 | * done in switch_mm() as needed. | ||
93 | */ | ||
94 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
95 | { | ||
96 | struct mm_struct * old_mm; | ||
97 | int retval = 0; | ||
98 | |||
99 | mutex_init(&mm->context.lock); | ||
100 | mm->context.size = 0; | ||
101 | old_mm = current->mm; | ||
102 | if (old_mm && old_mm->context.size > 0) { | ||
103 | mutex_lock(&old_mm->context.lock); | ||
104 | retval = copy_ldt(&mm->context, &old_mm->context); | ||
105 | mutex_unlock(&old_mm->context.lock); | ||
106 | } | ||
107 | return retval; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * | ||
112 | * Don't touch the LDT register - we're already in the next thread. | ||
113 | */ | ||
114 | void destroy_context(struct mm_struct *mm) | ||
115 | { | ||
116 | if (mm->context.size) { | ||
117 | if ((unsigned)mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE) | ||
118 | vfree(mm->context.ldt); | ||
119 | else | ||
120 | kfree(mm->context.ldt); | ||
121 | mm->context.size = 0; | ||
122 | } | ||
123 | } | ||
124 | |||
125 | static int read_ldt(void __user * ptr, unsigned long bytecount) | ||
126 | { | ||
127 | int err; | ||
128 | unsigned long size; | ||
129 | struct mm_struct * mm = current->mm; | ||
130 | |||
131 | if (!mm->context.size) | ||
132 | return 0; | ||
133 | if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) | ||
134 | bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; | ||
135 | |||
136 | mutex_lock(&mm->context.lock); | ||
137 | size = mm->context.size*LDT_ENTRY_SIZE; | ||
138 | if (size > bytecount) | ||
139 | size = bytecount; | ||
140 | |||
141 | err = 0; | ||
142 | if (copy_to_user(ptr, mm->context.ldt, size)) | ||
143 | err = -EFAULT; | ||
144 | mutex_unlock(&mm->context.lock); | ||
145 | if (err < 0) | ||
146 | goto error_return; | ||
147 | if (size != bytecount) { | ||
148 | /* zero-fill the rest */ | ||
149 | if (clear_user(ptr+size, bytecount-size) != 0) { | ||
150 | err = -EFAULT; | ||
151 | goto error_return; | ||
152 | } | ||
153 | } | ||
154 | return bytecount; | ||
155 | error_return: | ||
156 | return err; | ||
157 | } | ||
158 | |||
159 | static int read_default_ldt(void __user * ptr, unsigned long bytecount) | ||
160 | { | ||
161 | /* Arbitrary number */ | ||
162 | /* x86-64 default LDT is all zeros */ | ||
163 | if (bytecount > 128) | ||
164 | bytecount = 128; | ||
165 | if (clear_user(ptr, bytecount)) | ||
166 | return -EFAULT; | ||
167 | return bytecount; | ||
168 | } | ||
169 | |||
170 | static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode) | ||
171 | { | ||
172 | struct task_struct *me = current; | ||
173 | struct mm_struct * mm = me->mm; | ||
174 | __u32 entry_1, entry_2, *lp; | ||
175 | int error; | ||
176 | struct user_desc ldt_info; | ||
177 | |||
178 | error = -EINVAL; | ||
179 | |||
180 | if (bytecount != sizeof(ldt_info)) | ||
181 | goto out; | ||
182 | error = -EFAULT; | ||
183 | if (copy_from_user(&ldt_info, ptr, bytecount)) | ||
184 | goto out; | ||
185 | |||
186 | error = -EINVAL; | ||
187 | if (ldt_info.entry_number >= LDT_ENTRIES) | ||
188 | goto out; | ||
189 | if (ldt_info.contents == 3) { | ||
190 | if (oldmode) | ||
191 | goto out; | ||
192 | if (ldt_info.seg_not_present == 0) | ||
193 | goto out; | ||
194 | } | ||
195 | |||
196 | mutex_lock(&mm->context.lock); | ||
197 | if (ldt_info.entry_number >= (unsigned)mm->context.size) { | ||
198 | error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1); | ||
199 | if (error < 0) | ||
200 | goto out_unlock; | ||
201 | } | ||
202 | |||
203 | lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt); | ||
204 | |||
205 | /* Allow LDTs to be cleared by the user. */ | ||
206 | if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { | ||
207 | if (oldmode || LDT_empty(&ldt_info)) { | ||
208 | entry_1 = 0; | ||
209 | entry_2 = 0; | ||
210 | goto install; | ||
211 | } | ||
212 | } | ||
213 | |||
214 | entry_1 = LDT_entry_a(&ldt_info); | ||
215 | entry_2 = LDT_entry_b(&ldt_info); | ||
216 | if (oldmode) | ||
217 | entry_2 &= ~(1 << 20); | ||
218 | |||
219 | /* Install the new entry ... */ | ||
220 | install: | ||
221 | *lp = entry_1; | ||
222 | *(lp+1) = entry_2; | ||
223 | error = 0; | ||
224 | |||
225 | out_unlock: | ||
226 | mutex_unlock(&mm->context.lock); | ||
227 | out: | ||
228 | return error; | ||
229 | } | ||
230 | |||
231 | asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount) | ||
232 | { | ||
233 | int ret = -ENOSYS; | ||
234 | |||
235 | switch (func) { | ||
236 | case 0: | ||
237 | ret = read_ldt(ptr, bytecount); | ||
238 | break; | ||
239 | case 1: | ||
240 | ret = write_ldt(ptr, bytecount, 1); | ||
241 | break; | ||
242 | case 2: | ||
243 | ret = read_default_ldt(ptr, bytecount); | ||
244 | break; | ||
245 | case 0x11: | ||
246 | ret = write_ldt(ptr, bytecount, 0); | ||
247 | break; | ||
248 | } | ||
249 | return ret; | ||
250 | } | ||