aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/boot
diff options
context:
space:
mode:
authorKees Cook <keescook@chromium.org>2016-05-25 18:45:33 -0400
committerIngo Molnar <mingo@kernel.org>2016-06-26 06:32:05 -0400
commited9f007ee68478f6a50ec9971ade25a0129a5c0e (patch)
treec5a1d1d15737ce5848d6c3bdef552004e0536774 /arch/x86/boot
parent8391c73c96f28d4e8c40fd401fd0c9c04391b44a (diff)
x86/KASLR: Extend kernel image physical address randomization to addresses larger than 4G
We want the physical address to be randomized anywhere between 16MB and the top of physical memory (up to 64TB). This patch exchanges the prior slots[] array for the new slot_areas[] array, and lifts the limitation of KERNEL_IMAGE_SIZE on the physical address offset for 64-bit. As before, process_e820_entry() walks memory and populates slot_areas[], splitting on any detected mem_avoid collisions. Finally, since the slots[] array and its associated functions are not needed any more, so they are removed. Based on earlier patches by Baoquan He. Originally-from: Baoquan He <bhe@redhat.com> Signed-off-by: Kees Cook <keescook@chromium.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Baoquan He <bhe@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: H.J. Lu <hjl.tools@gmail.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1464216334-17200-5-git-send-email-keescook@chromium.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/boot')
-rw-r--r--arch/x86/boot/compressed/kaslr.c115
1 files changed, 69 insertions, 46 deletions
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 5550546916be..36e28112523a 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -132,17 +132,6 @@ enum mem_avoid_index {
132 132
133static struct mem_vector mem_avoid[MEM_AVOID_MAX]; 133static struct mem_vector mem_avoid[MEM_AVOID_MAX];
134 134
135static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
136{
137 /* Item at least partially before region. */
138 if (item->start < region->start)
139 return false;
140 /* Item at least partially after region. */
141 if (item->start + item->size > region->start + region->size)
142 return false;
143 return true;
144}
145
146static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two) 135static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
147{ 136{
148 /* Item one is entirely before item two. */ 137 /* Item one is entirely before item two. */
@@ -319,8 +308,6 @@ static bool mem_avoid_overlap(struct mem_vector *img,
319 return is_overlapping; 308 return is_overlapping;
320} 309}
321 310
322static unsigned long slots[KERNEL_IMAGE_SIZE / CONFIG_PHYSICAL_ALIGN];
323
324struct slot_area { 311struct slot_area {
325 unsigned long addr; 312 unsigned long addr;
326 int num; 313 int num;
@@ -351,36 +338,44 @@ static void store_slot_info(struct mem_vector *region, unsigned long image_size)
351 } 338 }
352} 339}
353 340
354static void slots_append(unsigned long addr)
355{
356 /* Overflowing the slots list should be impossible. */
357 if (slot_max >= KERNEL_IMAGE_SIZE / CONFIG_PHYSICAL_ALIGN)
358 return;
359
360 slots[slot_max++] = addr;
361}
362
363static unsigned long slots_fetch_random(void) 341static unsigned long slots_fetch_random(void)
364{ 342{
343 unsigned long slot;
344 int i;
345
365 /* Handle case of no slots stored. */ 346 /* Handle case of no slots stored. */
366 if (slot_max == 0) 347 if (slot_max == 0)
367 return 0; 348 return 0;
368 349
369 return slots[get_random_long("Physical") % slot_max]; 350 slot = get_random_long("Physical") % slot_max;
351
352 for (i = 0; i < slot_area_index; i++) {
353 if (slot >= slot_areas[i].num) {
354 slot -= slot_areas[i].num;
355 continue;
356 }
357 return slot_areas[i].addr + slot * CONFIG_PHYSICAL_ALIGN;
358 }
359
360 if (i == slot_area_index)
361 debug_putstr("slots_fetch_random() failed!?\n");
362 return 0;
370} 363}
371 364
372static void process_e820_entry(struct e820entry *entry, 365static void process_e820_entry(struct e820entry *entry,
373 unsigned long minimum, 366 unsigned long minimum,
374 unsigned long image_size) 367 unsigned long image_size)
375{ 368{
376 struct mem_vector region, img, overlap; 369 struct mem_vector region, overlap;
370 struct slot_area slot_area;
371 unsigned long start_orig;
377 372
378 /* Skip non-RAM entries. */ 373 /* Skip non-RAM entries. */
379 if (entry->type != E820_RAM) 374 if (entry->type != E820_RAM)
380 return; 375 return;
381 376
382 /* Ignore entries entirely above our maximum. */ 377 /* On 32-bit, ignore entries entirely above our maximum. */
383 if (entry->addr >= KERNEL_IMAGE_SIZE) 378 if (IS_ENABLED(CONFIG_X86_32) && entry->addr >= KERNEL_IMAGE_SIZE)
384 return; 379 return;
385 380
386 /* Ignore entries entirely below our minimum. */ 381 /* Ignore entries entirely below our minimum. */
@@ -390,31 +385,55 @@ static void process_e820_entry(struct e820entry *entry,
390 region.start = entry->addr; 385 region.start = entry->addr;
391 region.size = entry->size; 386 region.size = entry->size;
392 387
393 /* Potentially raise address to minimum location. */ 388 /* Give up if slot area array is full. */
394 if (region.start < minimum) 389 while (slot_area_index < MAX_SLOT_AREA) {
395 region.start = minimum; 390 start_orig = region.start;
396 391
397 /* Potentially raise address to meet alignment requirements. */ 392 /* Potentially raise address to minimum location. */
398 region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN); 393 if (region.start < minimum)
394 region.start = minimum;
399 395
400 /* Did we raise the address above the bounds of this e820 region? */ 396 /* Potentially raise address to meet alignment needs. */
401 if (region.start > entry->addr + entry->size) 397 region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
402 return;
403 398
404 /* Reduce size by any delta from the original address. */ 399 /* Did we raise the address above this e820 region? */
405 region.size -= region.start - entry->addr; 400 if (region.start > entry->addr + entry->size)
401 return;
406 402
407 /* Reduce maximum size to fit end of image within maximum limit. */ 403 /* Reduce size by any delta from the original address. */
408 if (region.start + region.size > KERNEL_IMAGE_SIZE) 404 region.size -= region.start - start_orig;
409 region.size = KERNEL_IMAGE_SIZE - region.start;
410 405
411 /* Walk each aligned slot and check for avoided areas. */ 406 /* On 32-bit, reduce region size to fit within max size. */
412 for (img.start = region.start, img.size = image_size ; 407 if (IS_ENABLED(CONFIG_X86_32) &&
413 mem_contains(&region, &img) ; 408 region.start + region.size > KERNEL_IMAGE_SIZE)
414 img.start += CONFIG_PHYSICAL_ALIGN) { 409 region.size = KERNEL_IMAGE_SIZE - region.start;
415 if (mem_avoid_overlap(&img, &overlap)) 410
416 continue; 411 /* Return if region can't contain decompressed kernel */
417 slots_append(img.start); 412 if (region.size < image_size)
413 return;
414
415 /* If nothing overlaps, store the region and return. */
416 if (!mem_avoid_overlap(&region, &overlap)) {
417 store_slot_info(&region, image_size);
418 return;
419 }
420
421 /* Store beginning of region if holds at least image_size. */
422 if (overlap.start > region.start + image_size) {
423 struct mem_vector beginning;
424
425 beginning.start = region.start;
426 beginning.size = overlap.start - region.start;
427 store_slot_info(&beginning, image_size);
428 }
429
430 /* Return if overlap extends to or past end of region. */
431 if (overlap.start + overlap.size >= region.start + region.size)
432 return;
433
434 /* Clip off the overlapping region and start over. */
435 region.size -= overlap.start - region.start + overlap.size;
436 region.start = overlap.start + overlap.size;
418 } 437 }
419} 438}
420 439
@@ -431,6 +450,10 @@ static unsigned long find_random_phys_addr(unsigned long minimum,
431 for (i = 0; i < boot_params->e820_entries; i++) { 450 for (i = 0; i < boot_params->e820_entries; i++) {
432 process_e820_entry(&boot_params->e820_map[i], minimum, 451 process_e820_entry(&boot_params->e820_map[i], minimum,
433 image_size); 452 image_size);
453 if (slot_area_index == MAX_SLOT_AREA) {
454 debug_putstr("Aborted e820 scan (slot_areas full)!\n");
455 break;
456 }
434 } 457 }
435 458
436 return slots_fetch_random(); 459 return slots_fetch_random();