aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2017-06-06 13:31:16 -0400
committerIngo Molnar <mingo@kernel.org>2017-06-08 03:28:21 -0400
commitbbf79d21bd4627a01ca8721c9373752819f8e4cc (patch)
treed7f3bda1eb4d68c9b458452b072a493a8fd62767
parent5dd0b16cdaff9b94da06074d5888b03235c0bf17 (diff)
x86/ldt: Rename ldt_struct::size to ::nr_entries
... because this is exactly what it is: the number of entries in the LDT. Calling it "size" is simply confusing and it is actually begging to be called "nr_entries" or somesuch, especially if you see constructs like: alloc_size = size * LDT_ENTRY_SIZE; since LDT_ENTRY_SIZE is the size of a single entry. There should be no functionality change resulting from this patch, as the before/after output from tools/testing/selftests/x86/ldt_gdt.c shows. Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Andy Lutomirski <luto@amacapital.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20170606173116.13977-1-bp@alien8.de [ Renamed 'n_entries' to 'nr_entries' ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/events/core.c2
-rw-r--r--arch/x86/include/asm/mmu_context.h4
-rw-r--r--arch/x86/kernel/ldt.c49
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/step.c2
-rw-r--r--arch/x86/math-emu/fpu_system.h2
6 files changed, 31 insertions, 30 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 77a33096728d..d4d5e1ee8e9c 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2333,7 +2333,7 @@ static unsigned long get_segment_base(unsigned int segment)
2333 2333
2334 /* IRQs are off, so this synchronizes with smp_store_release */ 2334 /* IRQs are off, so this synchronizes with smp_store_release */
2335 ldt = lockless_dereference(current->active_mm->context.ldt); 2335 ldt = lockless_dereference(current->active_mm->context.ldt);
2336 if (!ldt || idx > ldt->size) 2336 if (!ldt || idx > ldt->nr_entries)
2337 return 0; 2337 return 0;
2338 2338
2339 desc = &ldt->entries[idx]; 2339 desc = &ldt->entries[idx];
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index f20d7ea47095..5a93f6261302 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -47,7 +47,7 @@ struct ldt_struct {
47 * allocations, but it's not worth trying to optimize. 47 * allocations, but it's not worth trying to optimize.
48 */ 48 */
49 struct desc_struct *entries; 49 struct desc_struct *entries;
50 unsigned int size; 50 unsigned int nr_entries;
51}; 51};
52 52
53/* 53/*
@@ -87,7 +87,7 @@ static inline void load_mm_ldt(struct mm_struct *mm)
87 */ 87 */
88 88
89 if (unlikely(ldt)) 89 if (unlikely(ldt))
90 set_ldt(ldt->entries, ldt->size); 90 set_ldt(ldt->entries, ldt->nr_entries);
91 else 91 else
92 clear_LDT(); 92 clear_LDT();
93#else 93#else
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index de503e7a64ad..a870910c8565 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -31,16 +31,16 @@ static void flush_ldt(void *__mm)
31 return; 31 return;
32 32
33 pc = &mm->context; 33 pc = &mm->context;
34 set_ldt(pc->ldt->entries, pc->ldt->size); 34 set_ldt(pc->ldt->entries, pc->ldt->nr_entries);
35} 35}
36 36
37/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ 37/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
38static struct ldt_struct *alloc_ldt_struct(unsigned int size) 38static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
39{ 39{
40 struct ldt_struct *new_ldt; 40 struct ldt_struct *new_ldt;
41 unsigned int alloc_size; 41 unsigned int alloc_size;
42 42
43 if (size > LDT_ENTRIES) 43 if (num_entries > LDT_ENTRIES)
44 return NULL; 44 return NULL;
45 45
46 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); 46 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
@@ -48,7 +48,7 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int size)
48 return NULL; 48 return NULL;
49 49
50 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); 50 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
51 alloc_size = size * LDT_ENTRY_SIZE; 51 alloc_size = num_entries * LDT_ENTRY_SIZE;
52 52
53 /* 53 /*
54 * Xen is very picky: it requires a page-aligned LDT that has no 54 * Xen is very picky: it requires a page-aligned LDT that has no
@@ -66,14 +66,14 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int size)
66 return NULL; 66 return NULL;
67 } 67 }
68 68
69 new_ldt->size = size; 69 new_ldt->nr_entries = num_entries;
70 return new_ldt; 70 return new_ldt;
71} 71}
72 72
73/* After calling this, the LDT is immutable. */ 73/* After calling this, the LDT is immutable. */
74static void finalize_ldt_struct(struct ldt_struct *ldt) 74static void finalize_ldt_struct(struct ldt_struct *ldt)
75{ 75{
76 paravirt_alloc_ldt(ldt->entries, ldt->size); 76 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
77} 77}
78 78
79/* context.lock is held */ 79/* context.lock is held */
@@ -92,8 +92,8 @@ static void free_ldt_struct(struct ldt_struct *ldt)
92 if (likely(!ldt)) 92 if (likely(!ldt))
93 return; 93 return;
94 94
95 paravirt_free_ldt(ldt->entries, ldt->size); 95 paravirt_free_ldt(ldt->entries, ldt->nr_entries);
96 if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) 96 if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
97 vfree_atomic(ldt->entries); 97 vfree_atomic(ldt->entries);
98 else 98 else
99 free_page((unsigned long)ldt->entries); 99 free_page((unsigned long)ldt->entries);
@@ -123,14 +123,14 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
123 goto out_unlock; 123 goto out_unlock;
124 } 124 }
125 125
126 new_ldt = alloc_ldt_struct(old_mm->context.ldt->size); 126 new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
127 if (!new_ldt) { 127 if (!new_ldt) {
128 retval = -ENOMEM; 128 retval = -ENOMEM;
129 goto out_unlock; 129 goto out_unlock;
130 } 130 }
131 131
132 memcpy(new_ldt->entries, old_mm->context.ldt->entries, 132 memcpy(new_ldt->entries, old_mm->context.ldt->entries,
133 new_ldt->size * LDT_ENTRY_SIZE); 133 new_ldt->nr_entries * LDT_ENTRY_SIZE);
134 finalize_ldt_struct(new_ldt); 134 finalize_ldt_struct(new_ldt);
135 135
136 mm->context.ldt = new_ldt; 136 mm->context.ldt = new_ldt;
@@ -153,9 +153,9 @@ void destroy_context_ldt(struct mm_struct *mm)
153 153
154static int read_ldt(void __user *ptr, unsigned long bytecount) 154static int read_ldt(void __user *ptr, unsigned long bytecount)
155{ 155{
156 int retval;
157 unsigned long size;
158 struct mm_struct *mm = current->mm; 156 struct mm_struct *mm = current->mm;
157 unsigned long entries_size;
158 int retval;
159 159
160 mutex_lock(&mm->context.lock); 160 mutex_lock(&mm->context.lock);
161 161
@@ -167,18 +167,18 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
167 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) 167 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
168 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; 168 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
169 169
170 size = mm->context.ldt->size * LDT_ENTRY_SIZE; 170 entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
171 if (size > bytecount) 171 if (entries_size > bytecount)
172 size = bytecount; 172 entries_size = bytecount;
173 173
174 if (copy_to_user(ptr, mm->context.ldt->entries, size)) { 174 if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
175 retval = -EFAULT; 175 retval = -EFAULT;
176 goto out_unlock; 176 goto out_unlock;
177 } 177 }
178 178
179 if (size != bytecount) { 179 if (entries_size != bytecount) {
180 /* Zero-fill the rest and pretend we read bytecount bytes. */ 180 /* Zero-fill the rest and pretend we read bytecount bytes. */
181 if (clear_user(ptr + size, bytecount - size)) { 181 if (clear_user(ptr + entries_size, bytecount - entries_size)) {
182 retval = -EFAULT; 182 retval = -EFAULT;
183 goto out_unlock; 183 goto out_unlock;
184 } 184 }
@@ -209,7 +209,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
209{ 209{
210 struct mm_struct *mm = current->mm; 210 struct mm_struct *mm = current->mm;
211 struct ldt_struct *new_ldt, *old_ldt; 211 struct ldt_struct *new_ldt, *old_ldt;
212 unsigned int oldsize, newsize; 212 unsigned int old_nr_entries, new_nr_entries;
213 struct user_desc ldt_info; 213 struct user_desc ldt_info;
214 struct desc_struct ldt; 214 struct desc_struct ldt;
215 int error; 215 int error;
@@ -248,17 +248,18 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
248 248
249 mutex_lock(&mm->context.lock); 249 mutex_lock(&mm->context.lock);
250 250
251 old_ldt = mm->context.ldt; 251 old_ldt = mm->context.ldt;
252 oldsize = old_ldt ? old_ldt->size : 0; 252 old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
253 newsize = max(ldt_info.entry_number + 1, oldsize); 253 new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
254 254
255 error = -ENOMEM; 255 error = -ENOMEM;
256 new_ldt = alloc_ldt_struct(newsize); 256 new_ldt = alloc_ldt_struct(new_nr_entries);
257 if (!new_ldt) 257 if (!new_ldt)
258 goto out_unlock; 258 goto out_unlock;
259 259
260 if (old_ldt) 260 if (old_ldt)
261 memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE); 261 memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
262
262 new_ldt->entries[ldt_info.entry_number] = ldt; 263 new_ldt->entries[ldt_info.entry_number] = ldt;
263 finalize_ldt_struct(new_ldt); 264 finalize_ldt_struct(new_ldt);
264 265
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index b6840bf3940b..9c39ab8bcc41 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -142,7 +142,7 @@ void release_thread(struct task_struct *dead_task)
142 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", 142 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
143 dead_task->comm, 143 dead_task->comm,
144 dead_task->mm->context.ldt->entries, 144 dead_task->mm->context.ldt->entries,
145 dead_task->mm->context.ldt->size); 145 dead_task->mm->context.ldt->nr_entries);
146 BUG(); 146 BUG();
147 } 147 }
148#endif 148#endif
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index f07f83b3611b..5f25cfbd952e 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -34,7 +34,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
34 34
35 mutex_lock(&child->mm->context.lock); 35 mutex_lock(&child->mm->context.lock);
36 if (unlikely(!child->mm->context.ldt || 36 if (unlikely(!child->mm->context.ldt ||
37 seg >= child->mm->context.ldt->size)) 37 seg >= child->mm->context.ldt->nr_entries))
38 addr = -1L; /* bogus selector, access would fault */ 38 addr = -1L; /* bogus selector, access would fault */
39 else { 39 else {
40 desc = &child->mm->context.ldt->entries[seg]; 40 desc = &child->mm->context.ldt->entries[seg];
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 5e044d506b7a..a179254a5122 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -27,7 +27,7 @@ static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
27#ifdef CONFIG_MODIFY_LDT_SYSCALL 27#ifdef CONFIG_MODIFY_LDT_SYSCALL
28 seg >>= 3; 28 seg >>= 3;
29 mutex_lock(&current->mm->context.lock); 29 mutex_lock(&current->mm->context.lock);
30 if (current->mm->context.ldt && seg < current->mm->context.ldt->size) 30 if (current->mm->context.ldt && seg < current->mm->context.ldt->nr_entries)
31 ret = current->mm->context.ldt->entries[seg]; 31 ret = current->mm->context.ldt->entries[seg];
32 mutex_unlock(&current->mm->context.lock); 32 mutex_unlock(&current->mm->context.lock);
33#endif 33#endif