diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/x86/xen/enlighten.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/x86/xen/enlighten.c')
-rw-r--r-- | arch/x86/xen/enlighten.c | 152 |
1 files changed, 87 insertions, 65 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 7d46c8441418..5525163a0398 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/console.h> | 30 | #include <linux/console.h> |
31 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
32 | #include <linux/gfp.h> | 32 | #include <linux/gfp.h> |
33 | #include <linux/memblock.h> | ||
33 | 34 | ||
34 | #include <xen/xen.h> | 35 | #include <xen/xen.h> |
35 | #include <xen/interface/xen.h> | 36 | #include <xen/interface/xen.h> |
@@ -45,6 +46,7 @@ | |||
45 | #include <asm/paravirt.h> | 46 | #include <asm/paravirt.h> |
46 | #include <asm/apic.h> | 47 | #include <asm/apic.h> |
47 | #include <asm/page.h> | 48 | #include <asm/page.h> |
49 | #include <asm/xen/pci.h> | ||
48 | #include <asm/xen/hypercall.h> | 50 | #include <asm/xen/hypercall.h> |
49 | #include <asm/xen/hypervisor.h> | 51 | #include <asm/xen/hypervisor.h> |
50 | #include <asm/fixmap.h> | 52 | #include <asm/fixmap.h> |
@@ -58,7 +60,6 @@ | |||
58 | #include <asm/pgtable.h> | 60 | #include <asm/pgtable.h> |
59 | #include <asm/tlbflush.h> | 61 | #include <asm/tlbflush.h> |
60 | #include <asm/reboot.h> | 62 | #include <asm/reboot.h> |
61 | #include <asm/setup.h> | ||
62 | #include <asm/stackprotector.h> | 63 | #include <asm/stackprotector.h> |
63 | #include <asm/hypervisor.h> | 64 | #include <asm/hypervisor.h> |
64 | 65 | ||
@@ -74,6 +75,11 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); | |||
74 | enum xen_domain_type xen_domain_type = XEN_NATIVE; | 75 | enum xen_domain_type xen_domain_type = XEN_NATIVE; |
75 | EXPORT_SYMBOL_GPL(xen_domain_type); | 76 | EXPORT_SYMBOL_GPL(xen_domain_type); |
76 | 77 | ||
78 | unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START; | ||
79 | EXPORT_SYMBOL(machine_to_phys_mapping); | ||
80 | unsigned int machine_to_phys_order; | ||
81 | EXPORT_SYMBOL(machine_to_phys_order); | ||
82 | |||
77 | struct start_info *xen_start_info; | 83 | struct start_info *xen_start_info; |
78 | EXPORT_SYMBOL_GPL(xen_start_info); | 84 | EXPORT_SYMBOL_GPL(xen_start_info); |
79 | 85 | ||
@@ -135,9 +141,6 @@ static void xen_vcpu_setup(int cpu) | |||
135 | info.mfn = arbitrary_virt_to_mfn(vcpup); | 141 | info.mfn = arbitrary_virt_to_mfn(vcpup); |
136 | info.offset = offset_in_page(vcpup); | 142 | info.offset = offset_in_page(vcpup); |
137 | 143 | ||
138 | printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n", | ||
139 | cpu, vcpup, info.mfn, info.offset); | ||
140 | |||
141 | /* Check to see if the hypervisor will put the vcpu_info | 144 | /* Check to see if the hypervisor will put the vcpu_info |
142 | structure where we want it, which allows direct access via | 145 | structure where we want it, which allows direct access via |
143 | a percpu-variable. */ | 146 | a percpu-variable. */ |
@@ -151,9 +154,6 @@ static void xen_vcpu_setup(int cpu) | |||
151 | /* This cpu is using the registered vcpu info, even if | 154 | /* This cpu is using the registered vcpu info, even if |
152 | later ones fail to. */ | 155 | later ones fail to. */ |
153 | per_cpu(xen_vcpu, cpu) = vcpup; | 156 | per_cpu(xen_vcpu, cpu) = vcpup; |
154 | |||
155 | printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n", | ||
156 | cpu, vcpup); | ||
157 | } | 157 | } |
158 | } | 158 | } |
159 | 159 | ||
@@ -235,37 +235,31 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, | |||
235 | *dx &= maskedx; | 235 | *dx &= maskedx; |
236 | } | 236 | } |
237 | 237 | ||
238 | static __init void xen_init_cpuid_mask(void) | 238 | static void __init xen_init_cpuid_mask(void) |
239 | { | 239 | { |
240 | unsigned int ax, bx, cx, dx; | 240 | unsigned int ax, bx, cx, dx; |
241 | unsigned int xsave_mask; | ||
241 | 242 | ||
242 | cpuid_leaf1_edx_mask = | 243 | cpuid_leaf1_edx_mask = |
243 | ~((1 << X86_FEATURE_MCE) | /* disable MCE */ | 244 | ~((1 << X86_FEATURE_MCE) | /* disable MCE */ |
244 | (1 << X86_FEATURE_MCA) | /* disable MCA */ | 245 | (1 << X86_FEATURE_MCA) | /* disable MCA */ |
246 | (1 << X86_FEATURE_MTRR) | /* disable MTRR */ | ||
245 | (1 << X86_FEATURE_ACC)); /* thermal monitoring */ | 247 | (1 << X86_FEATURE_ACC)); /* thermal monitoring */ |
246 | 248 | ||
247 | if (!xen_initial_domain()) | 249 | if (!xen_initial_domain()) |
248 | cpuid_leaf1_edx_mask &= | 250 | cpuid_leaf1_edx_mask &= |
249 | ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ | 251 | ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ |
250 | (1 << X86_FEATURE_ACPI)); /* disable ACPI */ | 252 | (1 << X86_FEATURE_ACPI)); /* disable ACPI */ |
251 | |||
252 | ax = 1; | 253 | ax = 1; |
253 | cx = 0; | ||
254 | xen_cpuid(&ax, &bx, &cx, &dx); | 254 | xen_cpuid(&ax, &bx, &cx, &dx); |
255 | 255 | ||
256 | /* cpuid claims we support xsave; try enabling it to see what happens */ | 256 | xsave_mask = |
257 | if (cx & (1 << (X86_FEATURE_XSAVE % 32))) { | 257 | (1 << (X86_FEATURE_XSAVE % 32)) | |
258 | unsigned long cr4; | 258 | (1 << (X86_FEATURE_OSXSAVE % 32)); |
259 | |||
260 | set_in_cr4(X86_CR4_OSXSAVE); | ||
261 | |||
262 | cr4 = read_cr4(); | ||
263 | |||
264 | if ((cr4 & X86_CR4_OSXSAVE) == 0) | ||
265 | cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32)); | ||
266 | 259 | ||
267 | clear_in_cr4(X86_CR4_OSXSAVE); | 260 | /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ |
268 | } | 261 | if ((cx & xsave_mask) != xsave_mask) |
262 | cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ | ||
269 | } | 263 | } |
270 | 264 | ||
271 | static void xen_set_debugreg(int reg, unsigned long val) | 265 | static void xen_set_debugreg(int reg, unsigned long val) |
@@ -406,7 +400,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr) | |||
406 | /* | 400 | /* |
407 | * load_gdt for early boot, when the gdt is only mapped once | 401 | * load_gdt for early boot, when the gdt is only mapped once |
408 | */ | 402 | */ |
409 | static __init void xen_load_gdt_boot(const struct desc_ptr *dtr) | 403 | static void __init xen_load_gdt_boot(const struct desc_ptr *dtr) |
410 | { | 404 | { |
411 | unsigned long va = dtr->address; | 405 | unsigned long va = dtr->address; |
412 | unsigned int size = dtr->size + 1; | 406 | unsigned int size = dtr->size + 1; |
@@ -573,8 +567,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) | |||
573 | 567 | ||
574 | preempt_disable(); | 568 | preempt_disable(); |
575 | 569 | ||
576 | start = __get_cpu_var(idt_desc).address; | 570 | start = __this_cpu_read(idt_desc.address); |
577 | end = start + __get_cpu_var(idt_desc).size + 1; | 571 | end = start + __this_cpu_read(idt_desc.size) + 1; |
578 | 572 | ||
579 | xen_mc_flush(); | 573 | xen_mc_flush(); |
580 | 574 | ||
@@ -668,7 +662,7 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry, | |||
668 | * Version of write_gdt_entry for use at early boot-time needed to | 662 | * Version of write_gdt_entry for use at early boot-time needed to |
669 | * update an entry as simply as possible. | 663 | * update an entry as simply as possible. |
670 | */ | 664 | */ |
671 | static __init void xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, | 665 | static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, |
672 | const void *desc, int type) | 666 | const void *desc, int type) |
673 | { | 667 | { |
674 | switch (type) { | 668 | switch (type) { |
@@ -835,6 +829,11 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) | |||
835 | Xen console noise. */ | 829 | Xen console noise. */ |
836 | break; | 830 | break; |
837 | 831 | ||
832 | case MSR_IA32_CR_PAT: | ||
833 | if (smp_processor_id() == 0) | ||
834 | xen_set_pat(((u64)high << 32) | low); | ||
835 | break; | ||
836 | |||
838 | default: | 837 | default: |
839 | ret = native_write_msr_safe(msr, low, high); | 838 | ret = native_write_msr_safe(msr, low, high); |
840 | } | 839 | } |
@@ -873,8 +872,6 @@ void xen_setup_vcpu_info_placement(void) | |||
873 | /* xen_vcpu_setup managed to place the vcpu_info within the | 872 | /* xen_vcpu_setup managed to place the vcpu_info within the |
874 | percpu area for all cpus, so make use of it */ | 873 | percpu area for all cpus, so make use of it */ |
875 | if (have_vcpu_info_placement) { | 874 | if (have_vcpu_info_placement) { |
876 | printk(KERN_INFO "Xen: using vcpu_info placement\n"); | ||
877 | |||
878 | pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); | 875 | pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); |
879 | pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct); | 876 | pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct); |
880 | pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); | 877 | pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); |
@@ -936,18 +933,18 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, | |||
936 | return ret; | 933 | return ret; |
937 | } | 934 | } |
938 | 935 | ||
939 | static const struct pv_info xen_info __initdata = { | 936 | static const struct pv_info xen_info __initconst = { |
940 | .paravirt_enabled = 1, | 937 | .paravirt_enabled = 1, |
941 | .shared_kernel_pmd = 0, | 938 | .shared_kernel_pmd = 0, |
942 | 939 | ||
943 | .name = "Xen", | 940 | .name = "Xen", |
944 | }; | 941 | }; |
945 | 942 | ||
946 | static const struct pv_init_ops xen_init_ops __initdata = { | 943 | static const struct pv_init_ops xen_init_ops __initconst = { |
947 | .patch = xen_patch, | 944 | .patch = xen_patch, |
948 | }; | 945 | }; |
949 | 946 | ||
950 | static const struct pv_cpu_ops xen_cpu_ops __initdata = { | 947 | static const struct pv_cpu_ops xen_cpu_ops __initconst = { |
951 | .cpuid = xen_cpuid, | 948 | .cpuid = xen_cpuid, |
952 | 949 | ||
953 | .set_debugreg = xen_set_debugreg, | 950 | .set_debugreg = xen_set_debugreg, |
@@ -1007,7 +1004,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { | |||
1007 | .end_context_switch = xen_end_context_switch, | 1004 | .end_context_switch = xen_end_context_switch, |
1008 | }; | 1005 | }; |
1009 | 1006 | ||
1010 | static const struct pv_apic_ops xen_apic_ops __initdata = { | 1007 | static const struct pv_apic_ops xen_apic_ops __initconst = { |
1011 | #ifdef CONFIG_X86_LOCAL_APIC | 1008 | #ifdef CONFIG_X86_LOCAL_APIC |
1012 | .startup_ipi_hook = paravirt_nop, | 1009 | .startup_ipi_hook = paravirt_nop, |
1013 | #endif | 1010 | #endif |
@@ -1017,10 +1014,6 @@ static void xen_reboot(int reason) | |||
1017 | { | 1014 | { |
1018 | struct sched_shutdown r = { .reason = reason }; | 1015 | struct sched_shutdown r = { .reason = reason }; |
1019 | 1016 | ||
1020 | #ifdef CONFIG_SMP | ||
1021 | smp_send_stop(); | ||
1022 | #endif | ||
1023 | |||
1024 | if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) | 1017 | if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) |
1025 | BUG(); | 1018 | BUG(); |
1026 | } | 1019 | } |
@@ -1040,6 +1033,13 @@ static void xen_machine_halt(void) | |||
1040 | xen_reboot(SHUTDOWN_poweroff); | 1033 | xen_reboot(SHUTDOWN_poweroff); |
1041 | } | 1034 | } |
1042 | 1035 | ||
1036 | static void xen_machine_power_off(void) | ||
1037 | { | ||
1038 | if (pm_power_off) | ||
1039 | pm_power_off(); | ||
1040 | xen_reboot(SHUTDOWN_poweroff); | ||
1041 | } | ||
1042 | |||
1043 | static void xen_crash_shutdown(struct pt_regs *regs) | 1043 | static void xen_crash_shutdown(struct pt_regs *regs) |
1044 | { | 1044 | { |
1045 | xen_reboot(SHUTDOWN_crash); | 1045 | xen_reboot(SHUTDOWN_crash); |
@@ -1062,10 +1062,10 @@ int xen_panic_handler_init(void) | |||
1062 | return 0; | 1062 | return 0; |
1063 | } | 1063 | } |
1064 | 1064 | ||
1065 | static const struct machine_ops __initdata xen_machine_ops = { | 1065 | static const struct machine_ops xen_machine_ops __initconst = { |
1066 | .restart = xen_restart, | 1066 | .restart = xen_restart, |
1067 | .halt = xen_machine_halt, | 1067 | .halt = xen_machine_halt, |
1068 | .power_off = xen_machine_halt, | 1068 | .power_off = xen_machine_power_off, |
1069 | .shutdown = xen_machine_halt, | 1069 | .shutdown = xen_machine_halt, |
1070 | .crash_shutdown = xen_crash_shutdown, | 1070 | .crash_shutdown = xen_crash_shutdown, |
1071 | .emergency_restart = xen_emergency_restart, | 1071 | .emergency_restart = xen_emergency_restart, |
@@ -1091,6 +1091,8 @@ static void __init xen_setup_stackprotector(void) | |||
1091 | /* First C function to be called on Xen boot */ | 1091 | /* First C function to be called on Xen boot */ |
1092 | asmlinkage void __init xen_start_kernel(void) | 1092 | asmlinkage void __init xen_start_kernel(void) |
1093 | { | 1093 | { |
1094 | struct physdev_set_iopl set_iopl; | ||
1095 | int rc; | ||
1094 | pgd_t *pgd; | 1096 | pgd_t *pgd; |
1095 | 1097 | ||
1096 | if (!xen_start_info) | 1098 | if (!xen_start_info) |
@@ -1098,6 +1100,8 @@ asmlinkage void __init xen_start_kernel(void) | |||
1098 | 1100 | ||
1099 | xen_domain_type = XEN_PV_DOMAIN; | 1101 | xen_domain_type = XEN_PV_DOMAIN; |
1100 | 1102 | ||
1103 | xen_setup_machphys_mapping(); | ||
1104 | |||
1101 | /* Install Xen paravirt ops */ | 1105 | /* Install Xen paravirt ops */ |
1102 | pv_info = xen_info; | 1106 | pv_info = xen_info; |
1103 | pv_init_ops = xen_init_ops; | 1107 | pv_init_ops = xen_init_ops; |
@@ -1170,6 +1174,15 @@ asmlinkage void __init xen_start_kernel(void) | |||
1170 | 1174 | ||
1171 | xen_smp_init(); | 1175 | xen_smp_init(); |
1172 | 1176 | ||
1177 | #ifdef CONFIG_ACPI_NUMA | ||
1178 | /* | ||
1179 | * The pages we from Xen are not related to machine pages, so | ||
1180 | * any NUMA information the kernel tries to get from ACPI will | ||
1181 | * be meaningless. Prevent it from trying. | ||
1182 | */ | ||
1183 | acpi_numa = -1; | ||
1184 | #endif | ||
1185 | |||
1173 | pgd = (pgd_t *)xen_start_info->pt_base; | 1186 | pgd = (pgd_t *)xen_start_info->pt_base; |
1174 | 1187 | ||
1175 | if (!xen_initial_domain()) | 1188 | if (!xen_initial_domain()) |
@@ -1181,12 +1194,16 @@ asmlinkage void __init xen_start_kernel(void) | |||
1181 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; | 1194 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; |
1182 | 1195 | ||
1183 | local_irq_disable(); | 1196 | local_irq_disable(); |
1184 | early_boot_irqs_off(); | 1197 | early_boot_irqs_disabled = true; |
1198 | |||
1199 | memblock_init(); | ||
1185 | 1200 | ||
1186 | xen_raw_console_write("mapping kernel into physical memory\n"); | 1201 | xen_raw_console_write("mapping kernel into physical memory\n"); |
1187 | pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); | 1202 | pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); |
1203 | xen_ident_map_ISA(); | ||
1188 | 1204 | ||
1189 | init_mm.pgd = pgd; | 1205 | /* Allocate and initialize top and mid mfn levels for p2m structure */ |
1206 | xen_build_mfn_list_list(); | ||
1190 | 1207 | ||
1191 | /* keep using Xen gdt for now; no urgent need to change it */ | 1208 | /* keep using Xen gdt for now; no urgent need to change it */ |
1192 | 1209 | ||
@@ -1197,10 +1214,18 @@ asmlinkage void __init xen_start_kernel(void) | |||
1197 | #else | 1214 | #else |
1198 | pv_info.kernel_rpl = 0; | 1215 | pv_info.kernel_rpl = 0; |
1199 | #endif | 1216 | #endif |
1200 | |||
1201 | /* set the limit of our address space */ | 1217 | /* set the limit of our address space */ |
1202 | xen_reserve_top(); | 1218 | xen_reserve_top(); |
1203 | 1219 | ||
1220 | /* We used to do this in xen_arch_setup, but that is too late on AMD | ||
1221 | * were early_cpu_init (run before ->arch_setup()) calls early_amd_init | ||
1222 | * which pokes 0xcf8 port. | ||
1223 | */ | ||
1224 | set_iopl.iopl = 1; | ||
1225 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); | ||
1226 | if (rc != 0) | ||
1227 | xen_raw_printk("physdev_op failed %d\n", rc); | ||
1228 | |||
1204 | #ifdef CONFIG_X86_32 | 1229 | #ifdef CONFIG_X86_32 |
1205 | /* set up basic CPUID stuff */ | 1230 | /* set up basic CPUID stuff */ |
1206 | cpu_detect(&new_cpu_data); | 1231 | cpu_detect(&new_cpu_data); |
@@ -1220,6 +1245,8 @@ asmlinkage void __init xen_start_kernel(void) | |||
1220 | add_preferred_console("xenboot", 0, NULL); | 1245 | add_preferred_console("xenboot", 0, NULL); |
1221 | add_preferred_console("tty", 0, NULL); | 1246 | add_preferred_console("tty", 0, NULL); |
1222 | add_preferred_console("hvc", 0, NULL); | 1247 | add_preferred_console("hvc", 0, NULL); |
1248 | if (pci_xen) | ||
1249 | x86_init.pci.arch_init = pci_xen_init; | ||
1223 | } else { | 1250 | } else { |
1224 | /* Make sure ACS will be enabled */ | 1251 | /* Make sure ACS will be enabled */ |
1225 | pci_request_acs(); | 1252 | pci_request_acs(); |
@@ -1238,25 +1265,6 @@ asmlinkage void __init xen_start_kernel(void) | |||
1238 | #endif | 1265 | #endif |
1239 | } | 1266 | } |
1240 | 1267 | ||
1241 | static uint32_t xen_cpuid_base(void) | ||
1242 | { | ||
1243 | uint32_t base, eax, ebx, ecx, edx; | ||
1244 | char signature[13]; | ||
1245 | |||
1246 | for (base = 0x40000000; base < 0x40010000; base += 0x100) { | ||
1247 | cpuid(base, &eax, &ebx, &ecx, &edx); | ||
1248 | *(uint32_t *)(signature + 0) = ebx; | ||
1249 | *(uint32_t *)(signature + 4) = ecx; | ||
1250 | *(uint32_t *)(signature + 8) = edx; | ||
1251 | signature[12] = 0; | ||
1252 | |||
1253 | if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2)) | ||
1254 | return base; | ||
1255 | } | ||
1256 | |||
1257 | return 0; | ||
1258 | } | ||
1259 | |||
1260 | static int init_hvm_pv_info(int *major, int *minor) | 1268 | static int init_hvm_pv_info(int *major, int *minor) |
1261 | { | 1269 | { |
1262 | uint32_t eax, ebx, ecx, edx, pages, msr, base; | 1270 | uint32_t eax, ebx, ecx, edx, pages, msr, base; |
@@ -1276,15 +1284,14 @@ static int init_hvm_pv_info(int *major, int *minor) | |||
1276 | 1284 | ||
1277 | xen_setup_features(); | 1285 | xen_setup_features(); |
1278 | 1286 | ||
1279 | pv_info = xen_info; | 1287 | pv_info.name = "Xen HVM"; |
1280 | pv_info.kernel_rpl = 0; | ||
1281 | 1288 | ||
1282 | xen_domain_type = XEN_HVM_DOMAIN; | 1289 | xen_domain_type = XEN_HVM_DOMAIN; |
1283 | 1290 | ||
1284 | return 0; | 1291 | return 0; |
1285 | } | 1292 | } |
1286 | 1293 | ||
1287 | void xen_hvm_init_shared_info(void) | 1294 | void __ref xen_hvm_init_shared_info(void) |
1288 | { | 1295 | { |
1289 | int cpu; | 1296 | int cpu; |
1290 | struct xen_add_to_physmap xatp; | 1297 | struct xen_add_to_physmap xatp; |
@@ -1323,6 +1330,8 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, | |||
1323 | switch (action) { | 1330 | switch (action) { |
1324 | case CPU_UP_PREPARE: | 1331 | case CPU_UP_PREPARE: |
1325 | per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; | 1332 | per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; |
1333 | if (xen_have_vector_callback) | ||
1334 | xen_init_lock_cpu(cpu); | ||
1326 | break; | 1335 | break; |
1327 | default: | 1336 | default: |
1328 | break; | 1337 | break; |
@@ -1330,7 +1339,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, | |||
1330 | return NOTIFY_OK; | 1339 | return NOTIFY_OK; |
1331 | } | 1340 | } |
1332 | 1341 | ||
1333 | static struct notifier_block __cpuinitdata xen_hvm_cpu_notifier = { | 1342 | static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = { |
1334 | .notifier_call = xen_hvm_cpu_notify, | 1343 | .notifier_call = xen_hvm_cpu_notify, |
1335 | }; | 1344 | }; |
1336 | 1345 | ||
@@ -1347,6 +1356,7 @@ static void __init xen_hvm_guest_init(void) | |||
1347 | 1356 | ||
1348 | if (xen_feature(XENFEAT_hvm_callback_vector)) | 1357 | if (xen_feature(XENFEAT_hvm_callback_vector)) |
1349 | xen_have_vector_callback = 1; | 1358 | xen_have_vector_callback = 1; |
1359 | xen_hvm_smp_init(); | ||
1350 | register_cpu_notifier(&xen_hvm_cpu_notifier); | 1360 | register_cpu_notifier(&xen_hvm_cpu_notifier); |
1351 | xen_unplug_emulated_devices(); | 1361 | xen_unplug_emulated_devices(); |
1352 | have_vcpu_info_placement = 0; | 1362 | have_vcpu_info_placement = 0; |
@@ -1366,7 +1376,19 @@ static bool __init xen_hvm_platform(void) | |||
1366 | return true; | 1376 | return true; |
1367 | } | 1377 | } |
1368 | 1378 | ||
1369 | const __refconst struct hypervisor_x86 x86_hyper_xen_hvm = { | 1379 | bool xen_hvm_need_lapic(void) |
1380 | { | ||
1381 | if (xen_pv_domain()) | ||
1382 | return false; | ||
1383 | if (!xen_hvm_domain()) | ||
1384 | return false; | ||
1385 | if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback) | ||
1386 | return false; | ||
1387 | return true; | ||
1388 | } | ||
1389 | EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); | ||
1390 | |||
1391 | const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = { | ||
1370 | .name = "Xen HVM", | 1392 | .name = "Xen HVM", |
1371 | .detect = xen_hvm_platform, | 1393 | .detect = xen_hvm_platform, |
1372 | .init_platform = xen_hvm_guest_init, | 1394 | .init_platform = xen_hvm_guest_init, |