aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLaura Abbott <labbott@redhat.com>2018-04-18 13:08:32 -0400
committerBoris Ostrovsky <boris.ostrovsky@oracle.com>2018-04-19 16:35:51 -0400
commiteb0b4aa89cf21b69e15168010189e9d9c7483e54 (patch)
tree082db0025e38fc3ef5c52e09439356b7171c95ed
parentebf04f331fa15a966262341a7dc6b1a0efd633e4 (diff)
x86/xen: Remove use of VLAs
There's an ongoing effort to remove VLAs[1] from the kernel to eventually turn on -Wvla. It turns out, the few VLAs in use in Xen produce only a single entry array that is always bounded by GDT_SIZE. Clean up the code to get rid of the VLA and the loop. [1] https://lkml.org/lkml/2018/3/7/621 Signed-off-by: Laura Abbott <labbott@redhat.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> [boris: Use BUG_ON(size>PAGE_SIZE) instead of GDT_SIZE] Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-rw-r--r--arch/x86/xen/enlighten_pv.c86
1 files changed, 31 insertions, 55 deletions
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index c36d23aa6c35..357969a3697c 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -421,45 +421,33 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
421{ 421{
422 unsigned long va = dtr->address; 422 unsigned long va = dtr->address;
423 unsigned int size = dtr->size + 1; 423 unsigned int size = dtr->size + 1;
424 unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE); 424 unsigned long pfn, mfn;
425 unsigned long frames[pages]; 425 int level;
426 int f; 426 pte_t *ptep;
427 427 void *virt;
428 /*
429 * A GDT can be up to 64k in size, which corresponds to 8192
430 * 8-byte entries, or 16 4k pages..
431 */
432 428
433 BUG_ON(size > 65536); 429 /* @size should be at most GDT_SIZE which is smaller than PAGE_SIZE. */
430 BUG_ON(size > PAGE_SIZE);
434 BUG_ON(va & ~PAGE_MASK); 431 BUG_ON(va & ~PAGE_MASK);
435 432
436 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { 433 /*
437 int level; 434 * The GDT is per-cpu and is in the percpu data area.
438 pte_t *ptep; 435 * That can be virtually mapped, so we need to do a
439 unsigned long pfn, mfn; 436 * page-walk to get the underlying MFN for the
440 void *virt; 437 * hypercall. The page can also be in the kernel's
441 438 * linear range, so we need to RO that mapping too.
442 /* 439 */
443 * The GDT is per-cpu and is in the percpu data area. 440 ptep = lookup_address(va, &level);
444 * That can be virtually mapped, so we need to do a 441 BUG_ON(ptep == NULL);
445 * page-walk to get the underlying MFN for the
446 * hypercall. The page can also be in the kernel's
447 * linear range, so we need to RO that mapping too.
448 */
449 ptep = lookup_address(va, &level);
450 BUG_ON(ptep == NULL);
451
452 pfn = pte_pfn(*ptep);
453 mfn = pfn_to_mfn(pfn);
454 virt = __va(PFN_PHYS(pfn));
455 442
456 frames[f] = mfn; 443 pfn = pte_pfn(*ptep);
444 mfn = pfn_to_mfn(pfn);
445 virt = __va(PFN_PHYS(pfn));
457 446
458 make_lowmem_page_readonly((void *)va); 447 make_lowmem_page_readonly((void *)va);
459 make_lowmem_page_readonly(virt); 448 make_lowmem_page_readonly(virt);
460 }
461 449
462 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct))) 450 if (HYPERVISOR_set_gdt(&mfn, size / sizeof(struct desc_struct)))
463 BUG(); 451 BUG();
464} 452}
465 453
@@ -470,34 +458,22 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
470{ 458{
471 unsigned long va = dtr->address; 459 unsigned long va = dtr->address;
472 unsigned int size = dtr->size + 1; 460 unsigned int size = dtr->size + 1;
473 unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE); 461 unsigned long pfn, mfn;
474 unsigned long frames[pages]; 462 pte_t pte;
475 int f;
476
477 /*
478 * A GDT can be up to 64k in size, which corresponds to 8192
479 * 8-byte entries, or 16 4k pages..
480 */
481 463
482 BUG_ON(size > 65536); 464 /* @size should be at most GDT_SIZE which is smaller than PAGE_SIZE. */
465 BUG_ON(size > PAGE_SIZE);
483 BUG_ON(va & ~PAGE_MASK); 466 BUG_ON(va & ~PAGE_MASK);
484 467
485 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { 468 pfn = virt_to_pfn(va);
486 pte_t pte; 469 mfn = pfn_to_mfn(pfn);
487 unsigned long pfn, mfn;
488 470
489 pfn = virt_to_pfn(va); 471 pte = pfn_pte(pfn, PAGE_KERNEL_RO);
490 mfn = pfn_to_mfn(pfn);
491 472
492 pte = pfn_pte(pfn, PAGE_KERNEL_RO); 473 if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
493 474 BUG();
494 if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
495 BUG();
496
497 frames[f] = mfn;
498 }
499 475
500 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct))) 476 if (HYPERVISOR_set_gdt(&mfn, size / sizeof(struct desc_struct)))
501 BUG(); 477 BUG();
502} 478}
503 479