summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>2017-02-03 16:57:22 -0500
committerBoris Ostrovsky <boris.ostrovsky@oracle.com>2017-02-07 08:07:01 -0500
commit063334f30543597430f172bd7690d21e3590e148 (patch)
tree66ecb71e2297708467e28ab41ee7f6765add8e60
parent5a7670ee23f2c07a639c263b70140eaf1da9f68f (diff)
xen/x86: Remove PVH support
We are replacing existing PVH guests with new implementation. We are keeping xen_pvh_domain() macro (for now set to zero) because when we introduce new PVH implementation later in this series we will reuse current PVH-specific code (xen_pvh_gnttab_setup()), and that code is conditioned by 'if (xen_pvh_domain())'. (We will also need a noop xen_pvh_domain() for !CONFIG_XEN_PVH). Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Reviewed-by: Juergen Gross <jgross@suse.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r--arch/x86/xen/enlighten.c140
-rw-r--r--arch/x86/xen/mmu.c21
-rw-r--r--arch/x86/xen/setup.c37
-rw-r--r--arch/x86/xen/smp.c78
-rw-r--r--arch/x86/xen/smp.h8
-rw-r--r--arch/x86/xen/xen-head.S62
-rw-r--r--arch/x86/xen/xen-ops.h1
-rw-r--r--drivers/xen/events/events_base.c1
-rw-r--r--include/xen/xen.h13
9 files changed, 54 insertions, 307 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 51ef95232725..828f1b226f56 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1138,10 +1138,11 @@ void xen_setup_vcpu_info_placement(void)
1138 xen_vcpu_setup(cpu); 1138 xen_vcpu_setup(cpu);
1139 } 1139 }
1140 1140
1141 /* xen_vcpu_setup managed to place the vcpu_info within the 1141 /*
1142 * percpu area for all cpus, so make use of it. Note that for 1142 * xen_vcpu_setup managed to place the vcpu_info within the
1143 * PVH we want to use native IRQ mechanism. */ 1143 * percpu area for all cpus, so make use of it.
1144 if (have_vcpu_info_placement && !xen_pvh_domain()) { 1144 */
1145 if (have_vcpu_info_placement) {
1145 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); 1146 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
1146 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct); 1147 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
1147 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); 1148 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
@@ -1413,49 +1414,9 @@ static void __init xen_boot_params_init_edd(void)
1413 * Set up the GDT and segment registers for -fstack-protector. Until 1414 * Set up the GDT and segment registers for -fstack-protector. Until
1414 * we do this, we have to be careful not to call any stack-protected 1415 * we do this, we have to be careful not to call any stack-protected
1415 * function, which is most of the kernel. 1416 * function, which is most of the kernel.
1416 *
1417 * Note, that it is __ref because the only caller of this after init
1418 * is PVH which is not going to use xen_load_gdt_boot or other
1419 * __init functions.
1420 */ 1417 */
1421static void __ref xen_setup_gdt(int cpu) 1418static void xen_setup_gdt(int cpu)
1422{ 1419{
1423 if (xen_feature(XENFEAT_auto_translated_physmap)) {
1424#ifdef CONFIG_X86_64
1425 unsigned long dummy;
1426
1427 load_percpu_segment(cpu); /* We need to access per-cpu area */
1428 switch_to_new_gdt(cpu); /* GDT and GS set */
1429
1430 /* We are switching of the Xen provided GDT to our HVM mode
1431 * GDT. The new GDT has __KERNEL_CS with CS.L = 1
1432 * and we are jumping to reload it.
1433 */
1434 asm volatile ("pushq %0\n"
1435 "leaq 1f(%%rip),%0\n"
1436 "pushq %0\n"
1437 "lretq\n"
1438 "1:\n"
1439 : "=&r" (dummy) : "0" (__KERNEL_CS));
1440
1441 /*
1442 * While not needed, we also set the %es, %ds, and %fs
1443 * to zero. We don't care about %ss as it is NULL.
1444 * Strictly speaking this is not needed as Xen zeros those
1445 * out (and also MSR_FS_BASE, MSR_GS_BASE, MSR_KERNEL_GS_BASE)
1446 *
1447 * Linux zeros them in cpu_init() and in secondary_startup_64
1448 * (for BSP).
1449 */
1450 loadsegment(es, 0);
1451 loadsegment(ds, 0);
1452 loadsegment(fs, 0);
1453#else
1454 /* PVH: TODO Implement. */
1455 BUG();
1456#endif
1457 return; /* PVH does not need any PV GDT ops. */
1458 }
1459 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot; 1420 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
1460 pv_cpu_ops.load_gdt = xen_load_gdt_boot; 1421 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
1461 1422
@@ -1466,59 +1427,6 @@ static void __ref xen_setup_gdt(int cpu)
1466 pv_cpu_ops.load_gdt = xen_load_gdt; 1427 pv_cpu_ops.load_gdt = xen_load_gdt;
1467} 1428}
1468 1429
1469#ifdef CONFIG_XEN_PVH
1470/*
1471 * A PV guest starts with default flags that are not set for PVH, set them
1472 * here asap.
1473 */
1474static void xen_pvh_set_cr_flags(int cpu)
1475{
1476
1477 /* Some of these are setup in 'secondary_startup_64'. The others:
1478 * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests
1479 * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */
1480 write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM);
1481
1482 if (!cpu)
1483 return;
1484 /*
1485 * For BSP, PSE PGE are set in probe_page_size_mask(), for APs
1486 * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu__init_cpu().
1487 */
1488 if (boot_cpu_has(X86_FEATURE_PSE))
1489 cr4_set_bits_and_update_boot(X86_CR4_PSE);
1490
1491 if (boot_cpu_has(X86_FEATURE_PGE))
1492 cr4_set_bits_and_update_boot(X86_CR4_PGE);
1493}
1494
1495/*
1496 * Note, that it is ref - because the only caller of this after init
1497 * is PVH which is not going to use xen_load_gdt_boot or other
1498 * __init functions.
1499 */
1500void __ref xen_pvh_secondary_vcpu_init(int cpu)
1501{
1502 xen_setup_gdt(cpu);
1503 xen_pvh_set_cr_flags(cpu);
1504}
1505
1506static void __init xen_pvh_early_guest_init(void)
1507{
1508 if (!xen_feature(XENFEAT_auto_translated_physmap))
1509 return;
1510
1511 BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector));
1512
1513 xen_pvh_early_cpu_init(0, false);
1514 xen_pvh_set_cr_flags(0);
1515
1516#ifdef CONFIG_X86_32
1517 BUG(); /* PVH: Implement proper support. */
1518#endif
1519}
1520#endif /* CONFIG_XEN_PVH */
1521
1522static void __init xen_dom0_set_legacy_features(void) 1430static void __init xen_dom0_set_legacy_features(void)
1523{ 1431{
1524 x86_platform.legacy.rtc = 1; 1432 x86_platform.legacy.rtc = 1;
@@ -1555,24 +1463,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
1555 xen_domain_type = XEN_PV_DOMAIN; 1463 xen_domain_type = XEN_PV_DOMAIN;
1556 1464
1557 xen_setup_features(); 1465 xen_setup_features();
1558#ifdef CONFIG_XEN_PVH 1466
1559 xen_pvh_early_guest_init();
1560#endif
1561 xen_setup_machphys_mapping(); 1467 xen_setup_machphys_mapping();
1562 1468
1563 /* Install Xen paravirt ops */ 1469 /* Install Xen paravirt ops */
1564 pv_info = xen_info; 1470 pv_info = xen_info;
1565 pv_init_ops = xen_init_ops; 1471 pv_init_ops = xen_init_ops;
1566 if (!xen_pvh_domain()) { 1472 pv_cpu_ops = xen_cpu_ops;
1567 pv_cpu_ops = xen_cpu_ops;
1568 1473
1569 x86_platform.get_nmi_reason = xen_get_nmi_reason; 1474 x86_platform.get_nmi_reason = xen_get_nmi_reason;
1570 }
1571 1475
1572 if (xen_feature(XENFEAT_auto_translated_physmap)) 1476 x86_init.resources.memory_setup = xen_memory_setup;
1573 x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
1574 else
1575 x86_init.resources.memory_setup = xen_memory_setup;
1576 x86_init.oem.arch_setup = xen_arch_setup; 1477 x86_init.oem.arch_setup = xen_arch_setup;
1577 x86_init.oem.banner = xen_banner; 1478 x86_init.oem.banner = xen_banner;
1578 1479
@@ -1665,18 +1566,15 @@ asmlinkage __visible void __init xen_start_kernel(void)
1665 /* set the limit of our address space */ 1566 /* set the limit of our address space */
1666 xen_reserve_top(); 1567 xen_reserve_top();
1667 1568
1668 /* PVH: runs at default kernel iopl of 0 */ 1569 /*
1669 if (!xen_pvh_domain()) { 1570 * We used to do this in xen_arch_setup, but that is too late
1670 /* 1571 * on AMD were early_cpu_init (run before ->arch_setup()) calls
1671 * We used to do this in xen_arch_setup, but that is too late 1572 * early_amd_init which pokes 0xcf8 port.
1672 * on AMD were early_cpu_init (run before ->arch_setup()) calls 1573 */
1673 * early_amd_init which pokes 0xcf8 port. 1574 set_iopl.iopl = 1;
1674 */ 1575 rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
1675 set_iopl.iopl = 1; 1576 if (rc != 0)
1676 rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); 1577 xen_raw_printk("physdev_op failed %d\n", rc);
1677 if (rc != 0)
1678 xen_raw_printk("physdev_op failed %d\n", rc);
1679 }
1680 1578
1681#ifdef CONFIG_X86_32 1579#ifdef CONFIG_X86_32
1682 /* set up basic CPUID stuff */ 1580 /* set up basic CPUID stuff */
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 7d5afdb417cc..f6740b5b1738 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1792,10 +1792,6 @@ static void __init set_page_prot_flags(void *addr, pgprot_t prot,
1792 unsigned long pfn = __pa(addr) >> PAGE_SHIFT; 1792 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1793 pte_t pte = pfn_pte(pfn, prot); 1793 pte_t pte = pfn_pte(pfn, prot);
1794 1794
1795 /* For PVH no need to set R/O or R/W to pin them or unpin them. */
1796 if (xen_feature(XENFEAT_auto_translated_physmap))
1797 return;
1798
1799 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags)) 1795 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1800 BUG(); 1796 BUG();
1801} 1797}
@@ -1902,8 +1898,7 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1902 * level2_ident_pgt, and level2_kernel_pgt. This means that only the 1898 * level2_ident_pgt, and level2_kernel_pgt. This means that only the
1903 * kernel has a physical mapping to start with - but that's enough to 1899 * kernel has a physical mapping to start with - but that's enough to
1904 * get __va working. We need to fill in the rest of the physical 1900 * get __va working. We need to fill in the rest of the physical
1905 * mapping once some sort of allocator has been set up. NOTE: for 1901 * mapping once some sort of allocator has been set up.
1906 * PVH, the page tables are native.
1907 */ 1902 */
1908void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) 1903void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1909{ 1904{
@@ -2812,16 +2807,6 @@ static int do_remap_gfn(struct vm_area_struct *vma,
2812 2807
2813 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); 2808 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
2814 2809
2815 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2816#ifdef CONFIG_XEN_PVH
2817 /* We need to update the local page tables and the xen HAP */
2818 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
2819 prot, domid, pages);
2820#else
2821 return -EINVAL;
2822#endif
2823 }
2824
2825 rmd.mfn = gfn; 2810 rmd.mfn = gfn;
2826 rmd.prot = prot; 2811 rmd.prot = prot;
2827 /* We use the err_ptr to indicate if there we are doing a contiguous 2812 /* We use the err_ptr to indicate if there we are doing a contiguous
@@ -2915,10 +2900,6 @@ int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
2915 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap)) 2900 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
2916 return 0; 2901 return 0;
2917 2902
2918#ifdef CONFIG_XEN_PVH
2919 return xen_xlate_unmap_gfn_range(vma, numpgs, pages);
2920#else
2921 return -EINVAL; 2903 return -EINVAL;
2922#endif
2923} 2904}
2924EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range); 2905EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index f3f7b41116f7..a8c306cf8868 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -915,39 +915,6 @@ char * __init xen_memory_setup(void)
915} 915}
916 916
917/* 917/*
918 * Machine specific memory setup for auto-translated guests.
919 */
920char * __init xen_auto_xlated_memory_setup(void)
921{
922 struct xen_memory_map memmap;
923 int i;
924 int rc;
925
926 memmap.nr_entries = ARRAY_SIZE(xen_e820_map);
927 set_xen_guest_handle(memmap.buffer, xen_e820_map);
928
929 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
930 if (rc < 0)
931 panic("No memory map (%d)\n", rc);
932
933 xen_e820_map_entries = memmap.nr_entries;
934
935 sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
936 &xen_e820_map_entries);
937
938 for (i = 0; i < xen_e820_map_entries; i++)
939 e820_add_region(xen_e820_map[i].addr, xen_e820_map[i].size,
940 xen_e820_map[i].type);
941
942 /* Remove p2m info, it is not needed. */
943 xen_start_info->mfn_list = 0;
944 xen_start_info->first_p2m_pfn = 0;
945 xen_start_info->nr_p2m_frames = 0;
946
947 return "Xen";
948}
949
950/*
951 * Set the bit indicating "nosegneg" library variants should be used. 918 * Set the bit indicating "nosegneg" library variants should be used.
952 * We only need to bother in pure 32-bit mode; compat 32-bit processes 919 * We only need to bother in pure 32-bit mode; compat 32-bit processes
953 * can have un-truncated segments, so wrapping around is allowed. 920 * can have un-truncated segments, so wrapping around is allowed.
@@ -1032,8 +999,8 @@ void __init xen_pvmmu_arch_setup(void)
1032void __init xen_arch_setup(void) 999void __init xen_arch_setup(void)
1033{ 1000{
1034 xen_panic_handler_init(); 1001 xen_panic_handler_init();
1035 if (!xen_feature(XENFEAT_auto_translated_physmap)) 1002
1036 xen_pvmmu_arch_setup(); 1003 xen_pvmmu_arch_setup();
1037 1004
1038#ifdef CONFIG_ACPI 1005#ifdef CONFIG_ACPI
1039 if (!(xen_start_info->flags & SIF_INITDOMAIN)) { 1006 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 311acad7dad2..0dee6f59ea82 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -99,18 +99,8 @@ static void cpu_bringup(void)
99 local_irq_enable(); 99 local_irq_enable();
100} 100}
101 101
102/* 102asmlinkage __visible void cpu_bringup_and_idle(void)
103 * Note: cpu parameter is only relevant for PVH. The reason for passing it
104 * is we can't do smp_processor_id until the percpu segments are loaded, for
105 * which we need the cpu number! So we pass it in rdi as first parameter.
106 */
107asmlinkage __visible void cpu_bringup_and_idle(int cpu)
108{ 103{
109#ifdef CONFIG_XEN_PVH
110 if (xen_feature(XENFEAT_auto_translated_physmap) &&
111 xen_feature(XENFEAT_supervisor_mode_kernel))
112 xen_pvh_secondary_vcpu_init(cpu);
113#endif
114 cpu_bringup(); 104 cpu_bringup();
115 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 105 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
116} 106}
@@ -404,61 +394,47 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
404 gdt = get_cpu_gdt_table(cpu); 394 gdt = get_cpu_gdt_table(cpu);
405 395
406#ifdef CONFIG_X86_32 396#ifdef CONFIG_X86_32
407 /* Note: PVH is not yet supported on x86_32. */
408 ctxt->user_regs.fs = __KERNEL_PERCPU; 397 ctxt->user_regs.fs = __KERNEL_PERCPU;
409 ctxt->user_regs.gs = __KERNEL_STACK_CANARY; 398 ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
410#endif 399#endif
411 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); 400 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
412 401
413 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 402 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
414 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; 403 ctxt->flags = VGCF_IN_KERNEL;
415 ctxt->flags = VGCF_IN_KERNEL; 404 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
416 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ 405 ctxt->user_regs.ds = __USER_DS;
417 ctxt->user_regs.ds = __USER_DS; 406 ctxt->user_regs.es = __USER_DS;
418 ctxt->user_regs.es = __USER_DS; 407 ctxt->user_regs.ss = __KERNEL_DS;
419 ctxt->user_regs.ss = __KERNEL_DS;
420 408
421 xen_copy_trap_info(ctxt->trap_ctxt); 409 xen_copy_trap_info(ctxt->trap_ctxt);
422 410
423 ctxt->ldt_ents = 0; 411 ctxt->ldt_ents = 0;
424 412
425 BUG_ON((unsigned long)gdt & ~PAGE_MASK); 413 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
426 414
427 gdt_mfn = arbitrary_virt_to_mfn(gdt); 415 gdt_mfn = arbitrary_virt_to_mfn(gdt);
428 make_lowmem_page_readonly(gdt); 416 make_lowmem_page_readonly(gdt);
429 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn)); 417 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
430 418
431 ctxt->gdt_frames[0] = gdt_mfn; 419 ctxt->gdt_frames[0] = gdt_mfn;
432 ctxt->gdt_ents = GDT_ENTRIES; 420 ctxt->gdt_ents = GDT_ENTRIES;
433 421
434 ctxt->kernel_ss = __KERNEL_DS; 422 ctxt->kernel_ss = __KERNEL_DS;
435 ctxt->kernel_sp = idle->thread.sp0; 423 ctxt->kernel_sp = idle->thread.sp0;
436 424
437#ifdef CONFIG_X86_32 425#ifdef CONFIG_X86_32
438 ctxt->event_callback_cs = __KERNEL_CS; 426 ctxt->event_callback_cs = __KERNEL_CS;
439 ctxt->failsafe_callback_cs = __KERNEL_CS; 427 ctxt->failsafe_callback_cs = __KERNEL_CS;
440#else 428#else
441 ctxt->gs_base_kernel = per_cpu_offset(cpu); 429 ctxt->gs_base_kernel = per_cpu_offset(cpu);
442#endif
443 ctxt->event_callback_eip =
444 (unsigned long)xen_hypervisor_callback;
445 ctxt->failsafe_callback_eip =
446 (unsigned long)xen_failsafe_callback;
447 ctxt->user_regs.cs = __KERNEL_CS;
448 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
449 }
450#ifdef CONFIG_XEN_PVH
451 else {
452 /*
453 * The vcpu comes on kernel page tables which have the NX pte
454 * bit set. This means before DS/SS is touched, NX in
455 * EFER must be set. Hence the following assembly glue code.
456 */
457 ctxt->user_regs.eip = (unsigned long)xen_pvh_early_cpu_init;
458 ctxt->user_regs.rdi = cpu;
459 ctxt->user_regs.rsi = true; /* entry == true */
460 }
461#endif 430#endif
431 ctxt->event_callback_eip =
432 (unsigned long)xen_hypervisor_callback;
433 ctxt->failsafe_callback_eip =
434 (unsigned long)xen_failsafe_callback;
435 ctxt->user_regs.cs = __KERNEL_CS;
436 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
437
462 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); 438 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
463 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir)); 439 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
464 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt)) 440 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
index c5c16dc4f694..9beef333584a 100644
--- a/arch/x86/xen/smp.h
+++ b/arch/x86/xen/smp.h
@@ -21,12 +21,4 @@ static inline int xen_smp_intr_init(unsigned int cpu)
21static inline void xen_smp_intr_free(unsigned int cpu) {} 21static inline void xen_smp_intr_free(unsigned int cpu) {}
22#endif /* CONFIG_SMP */ 22#endif /* CONFIG_SMP */
23 23
24#ifdef CONFIG_XEN_PVH
25extern void xen_pvh_early_cpu_init(int cpu, bool entry);
26#else
27static inline void xen_pvh_early_cpu_init(int cpu, bool entry)
28{
29}
30#endif
31
32#endif 24#endif
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 7f8d8abf4c1a..37794e42b67d 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -16,25 +16,6 @@
16#include <xen/interface/xen-mca.h> 16#include <xen/interface/xen-mca.h>
17#include <asm/xen/interface.h> 17#include <asm/xen/interface.h>
18 18
19#ifdef CONFIG_XEN_PVH
20#define PVH_FEATURES_STR "|writable_descriptor_tables|auto_translated_physmap|supervisor_mode_kernel"
21/* Note the lack of 'hvm_callback_vector'. Older hypervisor will
22 * balk at this being part of XEN_ELFNOTE_FEATURES, so we put it in
23 * XEN_ELFNOTE_SUPPORTED_FEATURES which older hypervisors will ignore.
24 */
25#define PVH_FEATURES ((1 << XENFEAT_writable_page_tables) | \
26 (1 << XENFEAT_auto_translated_physmap) | \
27 (1 << XENFEAT_supervisor_mode_kernel) | \
28 (1 << XENFEAT_hvm_callback_vector))
29/* The XENFEAT_writable_page_tables is not stricly necessary as we set that
30 * up regardless whether this CONFIG option is enabled or not, but it
31 * clarifies what the right flags need to be.
32 */
33#else
34#define PVH_FEATURES_STR ""
35#define PVH_FEATURES (0)
36#endif
37
38 __INIT 19 __INIT
39ENTRY(startup_xen) 20ENTRY(startup_xen)
40 cld 21 cld
@@ -54,41 +35,6 @@ ENTRY(startup_xen)
54 35
55 __FINIT 36 __FINIT
56 37
57#ifdef CONFIG_XEN_PVH
58/*
59 * xen_pvh_early_cpu_init() - early PVH VCPU initialization
60 * @cpu: this cpu number (%rdi)
61 * @entry: true if this is a secondary vcpu coming up on this entry
62 * point, false if this is the boot CPU being initialized for
63 * the first time (%rsi)
64 *
65 * Note: This is called as a function on the boot CPU, and is the entry point
66 * on the secondary CPU.
67 */
68ENTRY(xen_pvh_early_cpu_init)
69 mov %rsi, %r11
70
71 /* Gather features to see if NX implemented. */
72 mov $0x80000001, %eax
73 cpuid
74 mov %edx, %esi
75
76 mov $MSR_EFER, %ecx
77 rdmsr
78 bts $_EFER_SCE, %eax
79
80 bt $20, %esi
81 jnc 1f /* No NX, skip setting it */
82 bts $_EFER_NX, %eax
831: wrmsr
84#ifdef CONFIG_SMP
85 cmp $0, %r11b
86 jne cpu_bringup_and_idle
87#endif
88 ret
89
90#endif /* CONFIG_XEN_PVH */
91
92.pushsection .text 38.pushsection .text
93 .balign PAGE_SIZE 39 .balign PAGE_SIZE
94ENTRY(hypercall_page) 40ENTRY(hypercall_page)
@@ -114,10 +60,10 @@ ENTRY(hypercall_page)
114#endif 60#endif
115 ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, _ASM_PTR startup_xen) 61 ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, _ASM_PTR startup_xen)
116 ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page) 62 ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page)
117 ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .ascii "!writable_page_tables|pae_pgdir_above_4gb"; .asciz PVH_FEATURES_STR) 63 ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,
118 ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES, .long (PVH_FEATURES) | 64 .ascii "!writable_page_tables|pae_pgdir_above_4gb")
119 (1 << XENFEAT_writable_page_tables) | 65 ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES,
120 (1 << XENFEAT_dom0)) 66 .long (1 << XENFEAT_writable_page_tables) | (1 << XENFEAT_dom0))
121 ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes") 67 ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes")
122 ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic") 68 ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")
123 ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, 69 ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID,
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index ac0a2b0f9e62..f6a41c41ebc7 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -146,5 +146,4 @@ __visible void xen_adjust_exception_frame(void);
146 146
147extern int xen_panic_handler_init(void); 147extern int xen_panic_handler_init(void);
148 148
149void xen_pvh_secondary_vcpu_init(int cpu);
150#endif /* XEN_OPS_H */ 149#endif /* XEN_OPS_H */
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index fd8e872d2943..6a53577772c9 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1704,7 +1704,6 @@ void __init xen_init_IRQ(void)
1704 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 1704 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
1705 eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map); 1705 eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
1706 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); 1706 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
1707 /* TODO: No PVH support for PIRQ EOI */
1708 if (rc != 0) { 1707 if (rc != 0) {
1709 free_page((unsigned long) pirq_eoi_map); 1708 free_page((unsigned long) pirq_eoi_map);
1710 pirq_eoi_map = NULL; 1709 pirq_eoi_map = NULL;
diff --git a/include/xen/xen.h b/include/xen/xen.h
index f0f0252cff9a..d0f96840f71f 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -29,17 +29,6 @@ extern enum xen_domain_type xen_domain_type;
29#define xen_initial_domain() (0) 29#define xen_initial_domain() (0)
30#endif /* CONFIG_XEN_DOM0 */ 30#endif /* CONFIG_XEN_DOM0 */
31 31
32#ifdef CONFIG_XEN_PVH
33/* This functionality exists only for x86. The XEN_PVHVM support exists
34 * only in x86 world - hence on ARM it will be always disabled.
35 * N.B. ARM guests are neither PV nor HVM nor PVHVM.
36 * It's a bit like PVH but is different also (it's further towards the H
37 * end of the spectrum than even PVH).
38 */
39#include <xen/features.h>
40#define xen_pvh_domain() (xen_pv_domain() && \
41 xen_feature(XENFEAT_auto_translated_physmap))
42#else
43#define xen_pvh_domain() (0) 32#define xen_pvh_domain() (0)
44#endif 33
45#endif /* _XEN_XEN_H */ 34#endif /* _XEN_XEN_H */