aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/desc.h15
-rw-r--r--arch/x86/include/asm/mmu.h3
-rw-r--r--arch/x86/include/asm/mmu_context.h54
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c12
-rw-r--r--arch/x86/kernel/ldt.c262
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/step.c6
-rw-r--r--arch/x86/power/cpu.c3
9 files changed, 210 insertions, 153 deletions
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index a0bf89fd2647..4e10d73cf018 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
280 set_ldt(NULL, 0); 280 set_ldt(NULL, 0);
281} 281}
282 282
283/*
284 * load one particular LDT into the current CPU
285 */
286static inline void load_LDT_nolock(mm_context_t *pc)
287{
288 set_ldt(pc->ldt, pc->size);
289}
290
291static inline void load_LDT(mm_context_t *pc)
292{
293 preempt_disable();
294 load_LDT_nolock(pc);
295 preempt_enable();
296}
297
298static inline unsigned long get_desc_base(const struct desc_struct *desc) 283static inline unsigned long get_desc_base(const struct desc_struct *desc)
299{ 284{
300 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); 285 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 09b9620a73b4..364d27481a52 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -9,8 +9,7 @@
9 * we put the segment information here. 9 * we put the segment information here.
10 */ 10 */
11typedef struct { 11typedef struct {
12 void *ldt; 12 struct ldt_struct *ldt;
13 int size;
14 13
15#ifdef CONFIG_X86_64 14#ifdef CONFIG_X86_64
16 /* True if mm supports a task running in 32 bit compatibility mode. */ 15 /* True if mm supports a task running in 32 bit compatibility mode. */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 804a3a6030ca..984abfe47edc 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -34,6 +34,50 @@ static inline void load_mm_cr4(struct mm_struct *mm) {}
34#endif 34#endif
35 35
36/* 36/*
37 * ldt_structs can be allocated, used, and freed, but they are never
38 * modified while live.
39 */
40struct ldt_struct {
41 /*
42 * Xen requires page-aligned LDTs with special permissions. This is
43 * needed to prevent us from installing evil descriptors such as
44 * call gates. On native, we could merge the ldt_struct and LDT
45 * allocations, but it's not worth trying to optimize.
46 */
47 struct desc_struct *entries;
48 int size;
49};
50
51static inline void load_mm_ldt(struct mm_struct *mm)
52{
53 struct ldt_struct *ldt;
54
55 /* lockless_dereference synchronizes with smp_store_release */
56 ldt = lockless_dereference(mm->context.ldt);
57
58 /*
59 * Any change to mm->context.ldt is followed by an IPI to all
60 * CPUs with the mm active. The LDT will not be freed until
61 * after the IPI is handled by all such CPUs. This means that,
62 * if the ldt_struct changes before we return, the values we see
63 * will be safe, and the new values will be loaded before we run
64 * any user code.
65 *
66 * NB: don't try to convert this to use RCU without extreme care.
67 * We would still need IRQs off, because we don't want to change
68 * the local LDT after an IPI loaded a newer value than the one
69 * that we can see.
70 */
71
72 if (unlikely(ldt))
73 set_ldt(ldt->entries, ldt->size);
74 else
75 clear_LDT();
76
77 DEBUG_LOCKS_WARN_ON(preemptible());
78}
79
80/*
37 * Used for LDT copy/destruction. 81 * Used for LDT copy/destruction.
38 */ 82 */
39int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 83int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
@@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
78 * was called and then modify_ldt changed 122 * was called and then modify_ldt changed
79 * prev->context.ldt but suppressed an IPI to this CPU. 123 * prev->context.ldt but suppressed an IPI to this CPU.
80 * In this case, prev->context.ldt != NULL, because we 124 * In this case, prev->context.ldt != NULL, because we
81 * never free an LDT while the mm still exists. That 125 * never set context.ldt to NULL while the mm still
82 * means that next->context.ldt != prev->context.ldt, 126 * exists. That means that next->context.ldt !=
83 * because mms never share an LDT. 127 * prev->context.ldt, because mms never share an LDT.
84 */ 128 */
85 if (unlikely(prev->context.ldt != next->context.ldt)) 129 if (unlikely(prev->context.ldt != next->context.ldt))
86 load_LDT_nolock(&next->context); 130 load_mm_ldt(next);
87 } 131 }
88#ifdef CONFIG_SMP 132#ifdef CONFIG_SMP
89 else { 133 else {
@@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
106 load_cr3(next->pgd); 150 load_cr3(next->pgd);
107 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 151 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
108 load_mm_cr4(next); 152 load_mm_cr4(next);
109 load_LDT_nolock(&next->context); 153 load_mm_ldt(next);
110 } 154 }
111 } 155 }
112#endif 156#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 922c5e0cea4c..cb9e5df42dd2 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1410,7 +1410,7 @@ void cpu_init(void)
1410 load_sp0(t, &current->thread); 1410 load_sp0(t, &current->thread);
1411 set_tss_desc(cpu, t); 1411 set_tss_desc(cpu, t);
1412 load_TR_desc(); 1412 load_TR_desc();
1413 load_LDT(&init_mm.context); 1413 load_mm_ldt(&init_mm);
1414 1414
1415 clear_all_debug_regs(); 1415 clear_all_debug_regs();
1416 dbg_restore_debug_regs(); 1416 dbg_restore_debug_regs();
@@ -1459,7 +1459,7 @@ void cpu_init(void)
1459 load_sp0(t, thread); 1459 load_sp0(t, thread);
1460 set_tss_desc(cpu, t); 1460 set_tss_desc(cpu, t);
1461 load_TR_desc(); 1461 load_TR_desc();
1462 load_LDT(&init_mm.context); 1462 load_mm_ldt(&init_mm);
1463 1463
1464 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1464 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1465 1465
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3658de47900f..9469dfa55607 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -2179,21 +2179,25 @@ static unsigned long get_segment_base(unsigned int segment)
2179 int idx = segment >> 3; 2179 int idx = segment >> 3;
2180 2180
2181 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { 2181 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2182 struct ldt_struct *ldt;
2183
2182 if (idx > LDT_ENTRIES) 2184 if (idx > LDT_ENTRIES)
2183 return 0; 2185 return 0;
2184 2186
2185 if (idx > current->active_mm->context.size) 2187 /* IRQs are off, so this synchronizes with smp_store_release */
2188 ldt = lockless_dereference(current->active_mm->context.ldt);
2189 if (!ldt || idx > ldt->size)
2186 return 0; 2190 return 0;
2187 2191
2188 desc = current->active_mm->context.ldt; 2192 desc = &ldt->entries[idx];
2189 } else { 2193 } else {
2190 if (idx > GDT_ENTRIES) 2194 if (idx > GDT_ENTRIES)
2191 return 0; 2195 return 0;
2192 2196
2193 desc = raw_cpu_ptr(gdt_page.gdt); 2197 desc = raw_cpu_ptr(gdt_page.gdt) + idx;
2194 } 2198 }
2195 2199
2196 return get_desc_base(desc + idx); 2200 return get_desc_base(desc);
2197} 2201}
2198 2202
2199#ifdef CONFIG_COMPAT 2203#ifdef CONFIG_COMPAT
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index c37886d759cc..2bcc0525f1c1 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -12,6 +12,7 @@
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/smp.h> 14#include <linux/smp.h>
15#include <linux/slab.h>
15#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
16#include <linux/uaccess.h> 17#include <linux/uaccess.h>
17 18
@@ -20,82 +21,82 @@
20#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
21#include <asm/syscalls.h> 22#include <asm/syscalls.h>
22 23
23#ifdef CONFIG_SMP 24/* context.lock is held for us, so we don't need any locking. */
24static void flush_ldt(void *current_mm) 25static void flush_ldt(void *current_mm)
25{ 26{
26 if (current->active_mm == current_mm) 27 mm_context_t *pc;
27 load_LDT(&current->active_mm->context); 28
29 if (current->active_mm != current_mm)
30 return;
31
32 pc = &current->active_mm->context;
33 set_ldt(pc->ldt->entries, pc->ldt->size);
28} 34}
29#endif
30 35
31static int alloc_ldt(mm_context_t *pc, int mincount, int reload) 36/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
37static struct ldt_struct *alloc_ldt_struct(int size)
32{ 38{
33 void *oldldt, *newldt; 39 struct ldt_struct *new_ldt;
34 int oldsize; 40 int alloc_size;
35 41
36 if (mincount <= pc->size) 42 if (size > LDT_ENTRIES)
37 return 0; 43 return NULL;
38 oldsize = pc->size; 44
39 mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & 45 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
40 (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); 46 if (!new_ldt)
41 if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) 47 return NULL;
42 newldt = vmalloc(mincount * LDT_ENTRY_SIZE); 48
49 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
50 alloc_size = size * LDT_ENTRY_SIZE;
51
52 /*
53 * Xen is very picky: it requires a page-aligned LDT that has no
54 * trailing nonzero bytes in any page that contains LDT descriptors.
55 * Keep it simple: zero the whole allocation and never allocate less
56 * than PAGE_SIZE.
57 */
58 if (alloc_size > PAGE_SIZE)
59 new_ldt->entries = vzalloc(alloc_size);
43 else 60 else
44 newldt = (void *)__get_free_page(GFP_KERNEL); 61 new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
45
46 if (!newldt)
47 return -ENOMEM;
48 62
49 if (oldsize) 63 if (!new_ldt->entries) {
50 memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE); 64 kfree(new_ldt);
51 oldldt = pc->ldt; 65 return NULL;
52 memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, 66 }
53 (mincount - oldsize) * LDT_ENTRY_SIZE);
54 67
55 paravirt_alloc_ldt(newldt, mincount); 68 new_ldt->size = size;
69 return new_ldt;
70}
56 71
57#ifdef CONFIG_X86_64 72/* After calling this, the LDT is immutable. */
58 /* CHECKME: Do we really need this ? */ 73static void finalize_ldt_struct(struct ldt_struct *ldt)
59 wmb(); 74{
60#endif 75 paravirt_alloc_ldt(ldt->entries, ldt->size);
61 pc->ldt = newldt;
62 wmb();
63 pc->size = mincount;
64 wmb();
65
66 if (reload) {
67#ifdef CONFIG_SMP
68 preempt_disable();
69 load_LDT(pc);
70 if (!cpumask_equal(mm_cpumask(current->mm),
71 cpumask_of(smp_processor_id())))
72 smp_call_function(flush_ldt, current->mm, 1);
73 preempt_enable();
74#else
75 load_LDT(pc);
76#endif
77 }
78 if (oldsize) {
79 paravirt_free_ldt(oldldt, oldsize);
80 if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
81 vfree(oldldt);
82 else
83 put_page(virt_to_page(oldldt));
84 }
85 return 0;
86} 76}
87 77
88static inline int copy_ldt(mm_context_t *new, mm_context_t *old) 78/* context.lock is held */
79static void install_ldt(struct mm_struct *current_mm,
80 struct ldt_struct *ldt)
89{ 81{
90 int err = alloc_ldt(new, old->size, 0); 82 /* Synchronizes with lockless_dereference in load_mm_ldt. */
91 int i; 83 smp_store_release(&current_mm->context.ldt, ldt);
84
85 /* Activate the LDT for all CPUs using current_mm. */
86 on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
87}
92 88
93 if (err < 0) 89static void free_ldt_struct(struct ldt_struct *ldt)
94 return err; 90{
91 if (likely(!ldt))
92 return;
95 93
96 for (i = 0; i < old->size; i++) 94 paravirt_free_ldt(ldt->entries, ldt->size);
97 write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); 95 if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
98 return 0; 96 vfree(ldt->entries);
97 else
98 kfree(ldt->entries);
99 kfree(ldt);
99} 100}
100 101
101/* 102/*
@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
104 */ 105 */
105int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 106int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
106{ 107{
108 struct ldt_struct *new_ldt;
107 struct mm_struct *old_mm; 109 struct mm_struct *old_mm;
108 int retval = 0; 110 int retval = 0;
109 111
110 mutex_init(&mm->context.lock); 112 mutex_init(&mm->context.lock);
111 mm->context.size = 0;
112 old_mm = current->mm; 113 old_mm = current->mm;
113 if (old_mm && old_mm->context.size > 0) { 114 if (!old_mm) {
114 mutex_lock(&old_mm->context.lock); 115 mm->context.ldt = NULL;
115 retval = copy_ldt(&mm->context, &old_mm->context); 116 return 0;
116 mutex_unlock(&old_mm->context.lock);
117 } 117 }
118
119 mutex_lock(&old_mm->context.lock);
120 if (!old_mm->context.ldt) {
121 mm->context.ldt = NULL;
122 goto out_unlock;
123 }
124
125 new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
126 if (!new_ldt) {
127 retval = -ENOMEM;
128 goto out_unlock;
129 }
130
131 memcpy(new_ldt->entries, old_mm->context.ldt->entries,
132 new_ldt->size * LDT_ENTRY_SIZE);
133 finalize_ldt_struct(new_ldt);
134
135 mm->context.ldt = new_ldt;
136
137out_unlock:
138 mutex_unlock(&old_mm->context.lock);
118 return retval; 139 return retval;
119} 140}
120 141
@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
125 */ 146 */
126void destroy_context(struct mm_struct *mm) 147void destroy_context(struct mm_struct *mm)
127{ 148{
128 if (mm->context.size) { 149 free_ldt_struct(mm->context.ldt);
129#ifdef CONFIG_X86_32 150 mm->context.ldt = NULL;
130 /* CHECKME: Can this ever happen ? */
131 if (mm == current->active_mm)
132 clear_LDT();
133#endif
134 paravirt_free_ldt(mm->context.ldt, mm->context.size);
135 if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
136 vfree(mm->context.ldt);
137 else
138 put_page(virt_to_page(mm->context.ldt));
139 mm->context.size = 0;
140 }
141} 151}
142 152
143static int read_ldt(void __user *ptr, unsigned long bytecount) 153static int read_ldt(void __user *ptr, unsigned long bytecount)
144{ 154{
145 int err; 155 int retval;
146 unsigned long size; 156 unsigned long size;
147 struct mm_struct *mm = current->mm; 157 struct mm_struct *mm = current->mm;
148 158
149 if (!mm->context.size) 159 mutex_lock(&mm->context.lock);
150 return 0; 160
161 if (!mm->context.ldt) {
162 retval = 0;
163 goto out_unlock;
164 }
165
151 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) 166 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
152 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; 167 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
153 168
154 mutex_lock(&mm->context.lock); 169 size = mm->context.ldt->size * LDT_ENTRY_SIZE;
155 size = mm->context.size * LDT_ENTRY_SIZE;
156 if (size > bytecount) 170 if (size > bytecount)
157 size = bytecount; 171 size = bytecount;
158 172
159 err = 0; 173 if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
160 if (copy_to_user(ptr, mm->context.ldt, size)) 174 retval = -EFAULT;
161 err = -EFAULT; 175 goto out_unlock;
162 mutex_unlock(&mm->context.lock); 176 }
163 if (err < 0) 177
164 goto error_return;
165 if (size != bytecount) { 178 if (size != bytecount) {
166 /* zero-fill the rest */ 179 /* Zero-fill the rest and pretend we read bytecount bytes. */
167 if (clear_user(ptr + size, bytecount - size) != 0) { 180 if (clear_user(ptr + size, bytecount - size)) {
168 err = -EFAULT; 181 retval = -EFAULT;
169 goto error_return; 182 goto out_unlock;
170 } 183 }
171 } 184 }
172 return bytecount; 185 retval = bytecount;
173error_return: 186
174 return err; 187out_unlock:
188 mutex_unlock(&mm->context.lock);
189 return retval;
175} 190}
176 191
177static int read_default_ldt(void __user *ptr, unsigned long bytecount) 192static int read_default_ldt(void __user *ptr, unsigned long bytecount)
@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
195 struct desc_struct ldt; 210 struct desc_struct ldt;
196 int error; 211 int error;
197 struct user_desc ldt_info; 212 struct user_desc ldt_info;
213 int oldsize, newsize;
214 struct ldt_struct *new_ldt, *old_ldt;
198 215
199 error = -EINVAL; 216 error = -EINVAL;
200 if (bytecount != sizeof(ldt_info)) 217 if (bytecount != sizeof(ldt_info))
@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
213 goto out; 230 goto out;
214 } 231 }
215 232
216 mutex_lock(&mm->context.lock); 233 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
217 if (ldt_info.entry_number >= mm->context.size) { 234 LDT_empty(&ldt_info)) {
218 error = alloc_ldt(&current->mm->context, 235 /* The user wants to clear the entry. */
219 ldt_info.entry_number + 1, 1); 236 memset(&ldt, 0, sizeof(ldt));
220 if (error < 0) 237 } else {
221 goto out_unlock; 238 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
222 } 239 error = -EINVAL;
223 240 goto out;
224 /* Allow LDTs to be cleared by the user. */
225 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
226 if (oldmode || LDT_empty(&ldt_info)) {
227 memset(&ldt, 0, sizeof(ldt));
228 goto install;
229 } 241 }
242
243 fill_ldt(&ldt, &ldt_info);
244 if (oldmode)
245 ldt.avl = 0;
230 } 246 }
231 247
232 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { 248 mutex_lock(&mm->context.lock);
233 error = -EINVAL; 249
250 old_ldt = mm->context.ldt;
251 oldsize = old_ldt ? old_ldt->size : 0;
252 newsize = max((int)(ldt_info.entry_number + 1), oldsize);
253
254 error = -ENOMEM;
255 new_ldt = alloc_ldt_struct(newsize);
256 if (!new_ldt)
234 goto out_unlock; 257 goto out_unlock;
235 }
236 258
237 fill_ldt(&ldt, &ldt_info); 259 if (old_ldt)
238 if (oldmode) 260 memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
239 ldt.avl = 0; 261 new_ldt->entries[ldt_info.entry_number] = ldt;
262 finalize_ldt_struct(new_ldt);
240 263
241 /* Install the new entry ... */ 264 install_ldt(mm, new_ldt);
242install: 265 free_ldt_struct(old_ldt);
243 write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
244 error = 0; 266 error = 0;
245 267
246out_unlock: 268out_unlock:
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 71d7849a07f7..f6b916387590 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -121,11 +121,11 @@ void __show_regs(struct pt_regs *regs, int all)
121void release_thread(struct task_struct *dead_task) 121void release_thread(struct task_struct *dead_task)
122{ 122{
123 if (dead_task->mm) { 123 if (dead_task->mm) {
124 if (dead_task->mm->context.size) { 124 if (dead_task->mm->context.ldt) {
125 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", 125 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
126 dead_task->comm, 126 dead_task->comm,
127 dead_task->mm->context.ldt, 127 dead_task->mm->context.ldt,
128 dead_task->mm->context.size); 128 dead_task->mm->context.ldt->size);
129 BUG(); 129 BUG();
130 } 130 }
131 } 131 }
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 9b4d51d0c0d0..6273324186ac 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -5,6 +5,7 @@
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/ptrace.h> 6#include <linux/ptrace.h>
7#include <asm/desc.h> 7#include <asm/desc.h>
8#include <asm/mmu_context.h>
8 9
9unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) 10unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
10{ 11{
@@ -30,10 +31,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
30 seg &= ~7UL; 31 seg &= ~7UL;
31 32
32 mutex_lock(&child->mm->context.lock); 33 mutex_lock(&child->mm->context.lock);
33 if (unlikely((seg >> 3) >= child->mm->context.size)) 34 if (unlikely(!child->mm->context.ldt ||
35 (seg >> 3) >= child->mm->context.ldt->size))
34 addr = -1L; /* bogus selector, access would fault */ 36 addr = -1L; /* bogus selector, access would fault */
35 else { 37 else {
36 desc = child->mm->context.ldt + seg; 38 desc = &child->mm->context.ldt->entries[seg];
37 base = get_desc_base(desc); 39 base = get_desc_base(desc);
38 40
39 /* 16-bit code segment? */ 41 /* 16-bit code segment? */
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 0d7dd1f5ac36..9ab52791fed5 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -22,6 +22,7 @@
22#include <asm/fpu/internal.h> 22#include <asm/fpu/internal.h>
23#include <asm/debugreg.h> 23#include <asm/debugreg.h>
24#include <asm/cpu.h> 24#include <asm/cpu.h>
25#include <asm/mmu_context.h>
25 26
26#ifdef CONFIG_X86_32 27#ifdef CONFIG_X86_32
27__visible unsigned long saved_context_ebx; 28__visible unsigned long saved_context_ebx;
@@ -153,7 +154,7 @@ static void fix_processor_context(void)
153 syscall_init(); /* This sets MSR_*STAR and related */ 154 syscall_init(); /* This sets MSR_*STAR and related */
154#endif 155#endif
155 load_TR_desc(); /* This does ltr */ 156 load_TR_desc(); /* This does ltr */
156 load_LDT(&current->active_mm->context); /* This does lldt */ 157 load_mm_ldt(current->active_mm); /* This does lldt */
157 158
158 fpu__resume_cpu(); 159 fpu__resume_cpu();
159} 160}