aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-10-02 19:41:31 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-03 04:04:10 -0400
commitdb053b86f4b1ec790da2dafe2acb93be76288bb9 (patch)
tree95748320c09bbbe9b24434f4f023979bee0f9c11 /arch/x86/xen
parent08115ab4d98cb577a83971ebd57cdfbcc6f50b68 (diff)
xen: clean up x86-64 warnings
There are a couple of Xen features which rely on directly accessing per-cpu data via a segment register, which is not yet available on x86-64. In the meantime, just disable direct access to the vcpu info structure; this leaves some of the code as dead, but it will come to life in time, and the warnings are suppressed. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/enlighten.c61
-rw-r--r--arch/x86/xen/xen-asm_64.S20
2 files changed, 27 insertions, 54 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 8ca2f88bde1..85692c9f649 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -112,7 +112,14 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
112 * 112 *
113 * 0: not available, 1: available 113 * 0: not available, 1: available
114 */ 114 */
115static int have_vcpu_info_placement = 1; 115static int have_vcpu_info_placement =
116#ifdef CONFIG_X86_32
117 1
118#else
119 0
120#endif
121 ;
122
116 123
117static void xen_vcpu_setup(int cpu) 124static void xen_vcpu_setup(int cpu)
118{ 125{
@@ -941,6 +948,7 @@ static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
941} 948}
942#endif 949#endif
943 950
951#ifdef CONFIG_X86_32
944static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) 952static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
945{ 953{
946 /* If there's an existing pte, then don't allow _PAGE_RW to be set */ 954 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
@@ -959,6 +967,7 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
959 967
960 xen_set_pte(ptep, pte); 968 xen_set_pte(ptep, pte);
961} 969}
970#endif
962 971
963static __init void xen_pagetable_setup_start(pgd_t *base) 972static __init void xen_pagetable_setup_start(pgd_t *base)
964{ 973{
@@ -1025,7 +1034,6 @@ void xen_setup_vcpu_info_placement(void)
1025 1034
1026 /* xen_vcpu_setup managed to place the vcpu_info within the 1035 /* xen_vcpu_setup managed to place the vcpu_info within the
1027 percpu area for all cpus, so make use of it */ 1036 percpu area for all cpus, so make use of it */
1028#ifdef CONFIG_X86_32
1029 if (have_vcpu_info_placement) { 1037 if (have_vcpu_info_placement) {
1030 printk(KERN_INFO "Xen: using vcpu_info placement\n"); 1038 printk(KERN_INFO "Xen: using vcpu_info placement\n");
1031 1039
@@ -1035,7 +1043,6 @@ void xen_setup_vcpu_info_placement(void)
1035 pv_irq_ops.irq_enable = xen_irq_enable_direct; 1043 pv_irq_ops.irq_enable = xen_irq_enable_direct;
1036 pv_mmu_ops.read_cr2 = xen_read_cr2_direct; 1044 pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
1037 } 1045 }
1038#endif
1039} 1046}
1040 1047
1041static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, 1048static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
@@ -1056,12 +1063,10 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
1056 goto patch_site 1063 goto patch_site
1057 1064
1058 switch (type) { 1065 switch (type) {
1059#ifdef CONFIG_X86_32
1060 SITE(pv_irq_ops, irq_enable); 1066 SITE(pv_irq_ops, irq_enable);
1061 SITE(pv_irq_ops, irq_disable); 1067 SITE(pv_irq_ops, irq_disable);
1062 SITE(pv_irq_ops, save_fl); 1068 SITE(pv_irq_ops, save_fl);
1063 SITE(pv_irq_ops, restore_fl); 1069 SITE(pv_irq_ops, restore_fl);
1064#endif /* CONFIG_X86_32 */
1065#undef SITE 1070#undef SITE
1066 1071
1067 patch_site: 1072 patch_site:
@@ -1399,48 +1404,11 @@ static void *m2v(phys_addr_t maddr)
1399 return __ka(m2p(maddr)); 1404 return __ka(m2p(maddr));
1400} 1405}
1401 1406
1402#ifdef CONFIG_X86_64
1403static void walk(pgd_t *pgd, unsigned long addr)
1404{
1405 unsigned l4idx = pgd_index(addr);
1406 unsigned l3idx = pud_index(addr);
1407 unsigned l2idx = pmd_index(addr);
1408 unsigned l1idx = pte_index(addr);
1409 pgd_t l4;
1410 pud_t l3;
1411 pmd_t l2;
1412 pte_t l1;
1413
1414 xen_raw_printk("walk %p, %lx -> %d %d %d %d\n",
1415 pgd, addr, l4idx, l3idx, l2idx, l1idx);
1416
1417 l4 = pgd[l4idx];
1418 xen_raw_printk(" l4: %016lx\n", l4.pgd);
1419 xen_raw_printk(" %016lx\n", pgd_val(l4));
1420
1421 l3 = ((pud_t *)(m2v(l4.pgd)))[l3idx];
1422 xen_raw_printk(" l3: %016lx\n", l3.pud);
1423 xen_raw_printk(" %016lx\n", pud_val(l3));
1424
1425 l2 = ((pmd_t *)(m2v(l3.pud)))[l2idx];
1426 xen_raw_printk(" l2: %016lx\n", l2.pmd);
1427 xen_raw_printk(" %016lx\n", pmd_val(l2));
1428
1429 l1 = ((pte_t *)(m2v(l2.pmd)))[l1idx];
1430 xen_raw_printk(" l1: %016lx\n", l1.pte);
1431 xen_raw_printk(" %016lx\n", pte_val(l1));
1432}
1433#endif
1434
1435static void set_page_prot(void *addr, pgprot_t prot) 1407static void set_page_prot(void *addr, pgprot_t prot)
1436{ 1408{
1437 unsigned long pfn = __pa(addr) >> PAGE_SHIFT; 1409 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1438 pte_t pte = pfn_pte(pfn, prot); 1410 pte_t pte = pfn_pte(pfn, prot);
1439 1411
1440 xen_raw_printk("addr=%p pfn=%lx mfn=%lx prot=%016llx pte=%016llx\n",
1441 addr, pfn, get_phys_to_machine(pfn),
1442 pgprot_val(prot), pte.pte);
1443
1444 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) 1412 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1445 BUG(); 1413 BUG();
1446} 1414}
@@ -1698,15 +1666,6 @@ asmlinkage void __init xen_start_kernel(void)
1698 1666
1699 xen_raw_console_write("about to get started...\n"); 1667 xen_raw_console_write("about to get started...\n");
1700 1668
1701#if 0
1702 xen_raw_printk("&boot_params=%p __pa(&boot_params)=%lx __va(__pa(&boot_params))=%lx\n",
1703 &boot_params, __pa_symbol(&boot_params),
1704 __va(__pa_symbol(&boot_params)));
1705
1706 walk(pgd, &boot_params);
1707 walk(pgd, __va(__pa(&boot_params)));
1708#endif
1709
1710 /* Start the world */ 1669 /* Start the world */
1711#ifdef CONFIG_X86_32 1670#ifdef CONFIG_X86_32
1712 i386_start_kernel(); 1671 i386_start_kernel();
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 3b9bda46487..05794c566e8 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -26,8 +26,15 @@
26/* Pseudo-flag used for virtual NMI, which we don't implement yet */ 26/* Pseudo-flag used for virtual NMI, which we don't implement yet */
27#define XEN_EFLAGS_NMI 0x80000000 27#define XEN_EFLAGS_NMI 0x80000000
28 28
29#if 0 29#if 1
30#include <asm/percpu.h> 30/*
31 x86-64 does not yet support direct access to percpu variables
32 via a segment override, so we just need to make sure this code
33 never gets used
34 */
35#define BUG ud2a
36#define PER_CPU_VAR(var, off) 0xdeadbeef
37#endif
31 38
32/* 39/*
33 Enable events. This clears the event mask and tests the pending 40 Enable events. This clears the event mask and tests the pending
@@ -35,6 +42,8 @@
35 events, then enter the hypervisor to get them handled. 42 events, then enter the hypervisor to get them handled.
36 */ 43 */
37ENTRY(xen_irq_enable_direct) 44ENTRY(xen_irq_enable_direct)
45 BUG
46
38 /* Unmask events */ 47 /* Unmask events */
39 movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) 48 movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
40 49
@@ -58,6 +67,8 @@ ENDPATCH(xen_irq_enable_direct)
58 non-zero. 67 non-zero.
59 */ 68 */
60ENTRY(xen_irq_disable_direct) 69ENTRY(xen_irq_disable_direct)
70 BUG
71
61 movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) 72 movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
62ENDPATCH(xen_irq_disable_direct) 73ENDPATCH(xen_irq_disable_direct)
63 ret 74 ret
@@ -74,6 +85,8 @@ ENDPATCH(xen_irq_disable_direct)
74 Xen and x86 use opposite senses (mask vs enable). 85 Xen and x86 use opposite senses (mask vs enable).
75 */ 86 */
76ENTRY(xen_save_fl_direct) 87ENTRY(xen_save_fl_direct)
88 BUG
89
77 testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) 90 testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
78 setz %ah 91 setz %ah
79 addb %ah,%ah 92 addb %ah,%ah
@@ -91,6 +104,8 @@ ENDPATCH(xen_save_fl_direct)
91 if so. 104 if so.
92 */ 105 */
93ENTRY(xen_restore_fl_direct) 106ENTRY(xen_restore_fl_direct)
107 BUG
108
94 testb $X86_EFLAGS_IF>>8, %ah 109 testb $X86_EFLAGS_IF>>8, %ah
95 setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) 110 setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
96 /* Preempt here doesn't matter because that will deal with 111 /* Preempt here doesn't matter because that will deal with
@@ -133,7 +148,6 @@ check_events:
133 pop %rcx 148 pop %rcx
134 pop %rax 149 pop %rax
135 ret 150 ret
136#endif
137 151
138ENTRY(xen_adjust_exception_frame) 152ENTRY(xen_adjust_exception_frame)
139 mov 8+0(%rsp),%rcx 153 mov 8+0(%rsp),%rcx