aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAKASHI Takahiro <takahiro.akashi@linaro.org>2018-04-13 18:35:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-13 20:10:27 -0400
commitcbe6601617302b0998d7f6779d04a222fc3a819b (patch)
tree0cc4eeb5223314a541f7bd755c5df45d7c2d11db
parent9ec4ecef0af7790551109283ca039a7c52de343c (diff)
x86: kexec_file: purge system-ram walking from prepare_elf64_headers()
While prepare_elf64_headers() in x86 looks pretty generic for other architectures' use, it contains some code which tries to list crash memory regions by walking through system resources, which is not always architecture agnostic. To make this function more generic, the related code should be purged. In this patch, prepare_elf64_headers() simply scans crash_mem buffer passed and add all the listed regions to elf header as a PT_LOAD segment. So walk_system_ram_res(prepare_elf64_headers_callback) have been moved forward before prepare_elf64_headers() where the callback, prepare_elf64_headers_callback(), is now responsible for filling up crash_mem buffer. Meanwhile exclude_elf_header_ranges() used to be called every time in this callback it is rather redundant and now called only once in prepare_elf_headers() as well. Link: http://lkml.kernel.org/r/20180306102303.9063-4-takahiro.akashi@linaro.org Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org> Acked-by: Dave Young <dyoung@redhat.com> Tested-by: Dave Young <dyoung@redhat.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Baoquan He <bhe@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/kernel/crash.c121
1 files changed, 58 insertions, 63 deletions
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 1f6680427ff0..4a9c4ebcc371 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -317,18 +317,11 @@ static int exclude_mem_range(struct crash_mem *mem,
317 * Look for any unwanted ranges between mstart, mend and remove them. This 317 * Look for any unwanted ranges between mstart, mend and remove them. This
318 * might lead to split and split ranges are put in ced->mem.ranges[] array 318 * might lead to split and split ranges are put in ced->mem.ranges[] array
319 */ 319 */
320static int elf_header_exclude_ranges(struct crash_elf_data *ced, 320static int elf_header_exclude_ranges(struct crash_elf_data *ced)
321 unsigned long long mstart, unsigned long long mend)
322{ 321{
323 struct crash_mem *cmem = &ced->mem; 322 struct crash_mem *cmem = &ced->mem;
324 int ret = 0; 323 int ret = 0;
325 324
326 memset(cmem->ranges, 0, sizeof(cmem->ranges));
327
328 cmem->ranges[0].start = mstart;
329 cmem->ranges[0].end = mend;
330 cmem->nr_ranges = 1;
331
332 /* Exclude crashkernel region */ 325 /* Exclude crashkernel region */
333 ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end); 326 ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
334 if (ret) 327 if (ret)
@@ -346,53 +339,13 @@ static int elf_header_exclude_ranges(struct crash_elf_data *ced,
346static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg) 339static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
347{ 340{
348 struct crash_elf_data *ced = arg; 341 struct crash_elf_data *ced = arg;
349 Elf64_Ehdr *ehdr; 342 struct crash_mem *cmem = &ced->mem;
350 Elf64_Phdr *phdr;
351 unsigned long mstart, mend;
352 struct kimage *image = ced->image;
353 struct crash_mem *cmem;
354 int ret, i;
355
356 ehdr = ced->ehdr;
357
358 /* Exclude unwanted mem ranges */
359 ret = elf_header_exclude_ranges(ced, res->start, res->end);
360 if (ret)
361 return ret;
362
363 /* Go through all the ranges in ced->mem.ranges[] and prepare phdr */
364 cmem = &ced->mem;
365
366 for (i = 0; i < cmem->nr_ranges; i++) {
367 mstart = cmem->ranges[i].start;
368 mend = cmem->ranges[i].end;
369
370 phdr = ced->bufp;
371 ced->bufp += sizeof(Elf64_Phdr);
372 343
373 phdr->p_type = PT_LOAD; 344 cmem->ranges[cmem->nr_ranges].start = res->start;
374 phdr->p_flags = PF_R|PF_W|PF_X; 345 cmem->ranges[cmem->nr_ranges].end = res->end;
375 phdr->p_offset = mstart; 346 cmem->nr_ranges++;
376 347
377 /* 348 return 0;
378 * If a range matches backup region, adjust offset to backup
379 * segment.
380 */
381 if (mstart == image->arch.backup_src_start &&
382 (mend - mstart + 1) == image->arch.backup_src_sz)
383 phdr->p_offset = image->arch.backup_load_addr;
384
385 phdr->p_paddr = mstart;
386 phdr->p_vaddr = (unsigned long long) __va(mstart);
387 phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
388 phdr->p_align = 0;
389 ehdr->e_phnum++;
390 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
391 phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
392 ehdr->e_phnum, phdr->p_offset);
393 }
394
395 return ret;
396} 349}
397 350
398static int prepare_elf64_headers(struct crash_elf_data *ced, 351static int prepare_elf64_headers(struct crash_elf_data *ced,
@@ -402,9 +355,10 @@ static int prepare_elf64_headers(struct crash_elf_data *ced,
402 Elf64_Phdr *phdr; 355 Elf64_Phdr *phdr;
403 unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz; 356 unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
404 unsigned char *buf, *bufp; 357 unsigned char *buf, *bufp;
405 unsigned int cpu; 358 unsigned int cpu, i;
406 unsigned long long notes_addr; 359 unsigned long long notes_addr;
407 int ret; 360 struct crash_mem *cmem = &ced->mem;
361 unsigned long mstart, mend;
408 362
409 /* extra phdr for vmcoreinfo elf note */ 363 /* extra phdr for vmcoreinfo elf note */
410 nr_phdr = nr_cpus + 1; 364 nr_phdr = nr_cpus + 1;
@@ -473,13 +427,25 @@ static int prepare_elf64_headers(struct crash_elf_data *ced,
473 (ehdr->e_phnum)++; 427 (ehdr->e_phnum)++;
474#endif 428#endif
475 429
476 /* Prepare PT_LOAD headers for system ram chunks. */ 430 /* Go through all the ranges in cmem->ranges[] and prepare phdr */
477 ced->ehdr = ehdr; 431 for (i = 0; i < cmem->nr_ranges; i++) {
478 ced->bufp = bufp; 432 mstart = cmem->ranges[i].start;
479 ret = walk_system_ram_res(0, -1, ced, 433 mend = cmem->ranges[i].end;
480 prepare_elf64_ram_headers_callback); 434
481 if (ret < 0) 435 phdr->p_type = PT_LOAD;
482 return ret; 436 phdr->p_flags = PF_R|PF_W|PF_X;
437 phdr->p_offset = mstart;
438
439 phdr->p_paddr = mstart;
440 phdr->p_vaddr = (unsigned long long) __va(mstart);
441 phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
442 phdr->p_align = 0;
443 ehdr->e_phnum++;
444 phdr++;
445 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
446 phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
447 ehdr->e_phnum, phdr->p_offset);
448 }
483 449
484 *addr = buf; 450 *addr = buf;
485 *sz = elf_sz; 451 *sz = elf_sz;
@@ -491,7 +457,9 @@ static int prepare_elf_headers(struct kimage *image, void **addr,
491 unsigned long *sz) 457 unsigned long *sz)
492{ 458{
493 struct crash_elf_data *ced; 459 struct crash_elf_data *ced;
494 int ret; 460 Elf64_Ehdr *ehdr;
461 Elf64_Phdr *phdr;
462 int ret, i;
495 463
496 ced = kzalloc(sizeof(*ced), GFP_KERNEL); 464 ced = kzalloc(sizeof(*ced), GFP_KERNEL);
497 if (!ced) 465 if (!ced)
@@ -499,8 +467,35 @@ static int prepare_elf_headers(struct kimage *image, void **addr,
499 467
500 fill_up_crash_elf_data(ced, image); 468 fill_up_crash_elf_data(ced, image);
501 469
470 ret = walk_system_ram_res(0, -1, ced,
471 prepare_elf64_ram_headers_callback);
472 if (ret)
473 goto out;
474
475 /* Exclude unwanted mem ranges */
476 ret = elf_header_exclude_ranges(ced);
477 if (ret)
478 goto out;
479
502 /* By default prepare 64bit headers */ 480 /* By default prepare 64bit headers */
503 ret = prepare_elf64_headers(ced, addr, sz); 481 ret = prepare_elf64_headers(ced, addr, sz);
482 if (ret)
483 goto out;
484
485 /*
486 * If a range matches backup region, adjust offset to backup
487 * segment.
488 */
489 ehdr = (Elf64_Ehdr *)*addr;
490 phdr = (Elf64_Phdr *)(ehdr + 1);
491 for (i = 0; i < ehdr->e_phnum; phdr++, i++)
492 if (phdr->p_type == PT_LOAD &&
493 phdr->p_paddr == image->arch.backup_src_start &&
494 phdr->p_memsz == image->arch.backup_src_sz) {
495 phdr->p_offset = image->arch.backup_load_addr;
496 break;
497 }
498out:
504 kfree(ced); 499 kfree(ced);
505 return ret; 500 return ret;
506} 501}