diff options
author | Len Brown <len.brown@intel.com> | 2008-10-22 23:57:26 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2008-10-23 00:11:07 -0400 |
commit | 057316cc6a5b521b332a1d7ccc871cd60c904c74 (patch) | |
tree | 4333e608da237c73ff69b10878025cca96dcb4c8 /arch/ia64/kvm | |
parent | 3e2dab9a1c2deb03c311eb3f83466009147ed4d3 (diff) | |
parent | 2515ddc6db8eb49a79f0fe5e67ff09ac7c81eab4 (diff) |
Merge branch 'linus' into test
Conflicts:
MAINTAINERS
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/acpi/sleep.c
drivers/acpi/Kconfig
drivers/pnp/Makefile
drivers/pnp/quirks.c
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'arch/ia64/kvm')
-rw-r--r-- | arch/ia64/kvm/Kconfig | 2 | ||||
-rw-r--r-- | arch/ia64/kvm/Makefile | 6 | ||||
-rw-r--r-- | arch/ia64/kvm/irq.h | 31 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 66 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm_minstate.h | 23 | ||||
-rw-r--r-- | arch/ia64/kvm/optvfault.S | 181 | ||||
-rw-r--r-- | arch/ia64/kvm/process.c | 4 | ||||
-rw-r--r-- | arch/ia64/kvm/vcpu.h | 26 | ||||
-rw-r--r-- | arch/ia64/kvm/vmm_ivt.S | 39 | ||||
-rw-r--r-- | arch/ia64/kvm/vtlb.c | 23 |
10 files changed, 301 insertions, 100 deletions
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig index 7914e4828504..8e99fed6b3fd 100644 --- a/arch/ia64/kvm/Kconfig +++ b/arch/ia64/kvm/Kconfig | |||
@@ -46,4 +46,6 @@ config KVM_INTEL | |||
46 | config KVM_TRACE | 46 | config KVM_TRACE |
47 | bool | 47 | bool |
48 | 48 | ||
49 | source drivers/virtio/Kconfig | ||
50 | |||
49 | endif # VIRTUALIZATION | 51 | endif # VIRTUALIZATION |
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile index bf22fb9e6dcf..cf37f8f490c0 100644 --- a/arch/ia64/kvm/Makefile +++ b/arch/ia64/kvm/Makefile | |||
@@ -44,7 +44,11 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ | |||
44 | EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ | 44 | EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ |
45 | 45 | ||
46 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ | 46 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ |
47 | coalesced_mmio.o) | 47 | coalesced_mmio.o irq_comm.o) |
48 | |||
49 | ifeq ($(CONFIG_DMAR),y) | ||
50 | common-objs += $(addprefix ../../../virt/kvm/, vtd.o) | ||
51 | endif | ||
48 | 52 | ||
49 | kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o | 53 | kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o |
50 | obj-$(CONFIG_KVM) += kvm.o | 54 | obj-$(CONFIG_KVM) += kvm.o |
diff --git a/arch/ia64/kvm/irq.h b/arch/ia64/kvm/irq.h new file mode 100644 index 000000000000..c6786e8b1bf4 --- /dev/null +++ b/arch/ia64/kvm/irq.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * irq.h: In-kernel interrupt controller related definitions | ||
3 | * Copyright (c) 2008, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
17 | * | ||
18 | * Authors: | ||
19 | * Xiantao Zhang <xiantao.zhang@intel.com> | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef __IRQ_H | ||
24 | #define __IRQ_H | ||
25 | |||
26 | static inline int irqchip_in_kernel(struct kvm *kvm) | ||
27 | { | ||
28 | return 1; | ||
29 | } | ||
30 | |||
31 | #endif | ||
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index cd0d1a7284b7..c0699f0e35a9 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/bitops.h> | 31 | #include <linux/bitops.h> |
32 | #include <linux/hrtimer.h> | 32 | #include <linux/hrtimer.h> |
33 | #include <linux/uaccess.h> | 33 | #include <linux/uaccess.h> |
34 | #include <linux/intel-iommu.h> | ||
34 | 35 | ||
35 | #include <asm/pgtable.h> | 36 | #include <asm/pgtable.h> |
36 | #include <asm/gcc_intrin.h> | 37 | #include <asm/gcc_intrin.h> |
@@ -45,6 +46,7 @@ | |||
45 | #include "iodev.h" | 46 | #include "iodev.h" |
46 | #include "ioapic.h" | 47 | #include "ioapic.h" |
47 | #include "lapic.h" | 48 | #include "lapic.h" |
49 | #include "irq.h" | ||
48 | 50 | ||
49 | static unsigned long kvm_vmm_base; | 51 | static unsigned long kvm_vmm_base; |
50 | static unsigned long kvm_vsa_base; | 52 | static unsigned long kvm_vsa_base; |
@@ -179,12 +181,16 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
179 | switch (ext) { | 181 | switch (ext) { |
180 | case KVM_CAP_IRQCHIP: | 182 | case KVM_CAP_IRQCHIP: |
181 | case KVM_CAP_USER_MEMORY: | 183 | case KVM_CAP_USER_MEMORY: |
184 | case KVM_CAP_MP_STATE: | ||
182 | 185 | ||
183 | r = 1; | 186 | r = 1; |
184 | break; | 187 | break; |
185 | case KVM_CAP_COALESCED_MMIO: | 188 | case KVM_CAP_COALESCED_MMIO: |
186 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 189 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
187 | break; | 190 | break; |
191 | case KVM_CAP_IOMMU: | ||
192 | r = intel_iommu_found(); | ||
193 | break; | ||
188 | default: | 194 | default: |
189 | r = 0; | 195 | r = 0; |
190 | } | 196 | } |
@@ -771,6 +777,7 @@ static void kvm_init_vm(struct kvm *kvm) | |||
771 | */ | 777 | */ |
772 | kvm_build_io_pmt(kvm); | 778 | kvm_build_io_pmt(kvm); |
773 | 779 | ||
780 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); | ||
774 | } | 781 | } |
775 | 782 | ||
776 | struct kvm *kvm_arch_create_vm(void) | 783 | struct kvm *kvm_arch_create_vm(void) |
@@ -1334,6 +1341,10 @@ static void kvm_release_vm_pages(struct kvm *kvm) | |||
1334 | 1341 | ||
1335 | void kvm_arch_destroy_vm(struct kvm *kvm) | 1342 | void kvm_arch_destroy_vm(struct kvm *kvm) |
1336 | { | 1343 | { |
1344 | kvm_iommu_unmap_guest(kvm); | ||
1345 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | ||
1346 | kvm_free_all_assigned_devices(kvm); | ||
1347 | #endif | ||
1337 | kfree(kvm->arch.vioapic); | 1348 | kfree(kvm->arch.vioapic); |
1338 | kvm_release_vm_pages(kvm); | 1349 | kvm_release_vm_pages(kvm); |
1339 | kvm_free_physmem(kvm); | 1350 | kvm_free_physmem(kvm); |
@@ -1435,17 +1446,24 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
1435 | int user_alloc) | 1446 | int user_alloc) |
1436 | { | 1447 | { |
1437 | unsigned long i; | 1448 | unsigned long i; |
1438 | struct page *page; | 1449 | unsigned long pfn; |
1439 | int npages = mem->memory_size >> PAGE_SHIFT; | 1450 | int npages = mem->memory_size >> PAGE_SHIFT; |
1440 | struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; | 1451 | struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; |
1441 | unsigned long base_gfn = memslot->base_gfn; | 1452 | unsigned long base_gfn = memslot->base_gfn; |
1442 | 1453 | ||
1443 | for (i = 0; i < npages; i++) { | 1454 | for (i = 0; i < npages; i++) { |
1444 | page = gfn_to_page(kvm, base_gfn + i); | 1455 | pfn = gfn_to_pfn(kvm, base_gfn + i); |
1445 | kvm_set_pmt_entry(kvm, base_gfn + i, | 1456 | if (!kvm_is_mmio_pfn(pfn)) { |
1446 | page_to_pfn(page) << PAGE_SHIFT, | 1457 | kvm_set_pmt_entry(kvm, base_gfn + i, |
1447 | _PAGE_AR_RWX|_PAGE_MA_WB); | 1458 | pfn << PAGE_SHIFT, |
1448 | memslot->rmap[i] = (unsigned long)page; | 1459 | _PAGE_AR_RWX | _PAGE_MA_WB); |
1460 | memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); | ||
1461 | } else { | ||
1462 | kvm_set_pmt_entry(kvm, base_gfn + i, | ||
1463 | GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT), | ||
1464 | _PAGE_MA_UC); | ||
1465 | memslot->rmap[i] = 0; | ||
1466 | } | ||
1449 | } | 1467 | } |
1450 | 1468 | ||
1451 | return 0; | 1469 | return 0; |
@@ -1789,11 +1807,43 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | |||
1789 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 1807 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
1790 | struct kvm_mp_state *mp_state) | 1808 | struct kvm_mp_state *mp_state) |
1791 | { | 1809 | { |
1792 | return -EINVAL; | 1810 | vcpu_load(vcpu); |
1811 | mp_state->mp_state = vcpu->arch.mp_state; | ||
1812 | vcpu_put(vcpu); | ||
1813 | return 0; | ||
1814 | } | ||
1815 | |||
1816 | static int vcpu_reset(struct kvm_vcpu *vcpu) | ||
1817 | { | ||
1818 | int r; | ||
1819 | long psr; | ||
1820 | local_irq_save(psr); | ||
1821 | r = kvm_insert_vmm_mapping(vcpu); | ||
1822 | if (r) | ||
1823 | goto fail; | ||
1824 | |||
1825 | vcpu->arch.launched = 0; | ||
1826 | kvm_arch_vcpu_uninit(vcpu); | ||
1827 | r = kvm_arch_vcpu_init(vcpu); | ||
1828 | if (r) | ||
1829 | goto fail; | ||
1830 | |||
1831 | kvm_purge_vmm_mapping(vcpu); | ||
1832 | r = 0; | ||
1833 | fail: | ||
1834 | local_irq_restore(psr); | ||
1835 | return r; | ||
1793 | } | 1836 | } |
1794 | 1837 | ||
1795 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | 1838 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
1796 | struct kvm_mp_state *mp_state) | 1839 | struct kvm_mp_state *mp_state) |
1797 | { | 1840 | { |
1798 | return -EINVAL; | 1841 | int r = 0; |
1842 | |||
1843 | vcpu_load(vcpu); | ||
1844 | vcpu->arch.mp_state = mp_state->mp_state; | ||
1845 | if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED) | ||
1846 | r = vcpu_reset(vcpu); | ||
1847 | vcpu_put(vcpu); | ||
1848 | return r; | ||
1799 | } | 1849 | } |
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h index 13980d9b8bcf..2cc41d17cf99 100644 --- a/arch/ia64/kvm/kvm_minstate.h +++ b/arch/ia64/kvm/kvm_minstate.h | |||
@@ -50,27 +50,18 @@ | |||
50 | 50 | ||
51 | #define PAL_VSA_SYNC_READ \ | 51 | #define PAL_VSA_SYNC_READ \ |
52 | /* begin to call pal vps sync_read */ \ | 52 | /* begin to call pal vps sync_read */ \ |
53 | {.mii; \ | ||
53 | add r25 = VMM_VPD_BASE_OFFSET, r21; \ | 54 | add r25 = VMM_VPD_BASE_OFFSET, r21; \ |
54 | adds r20 = VMM_VCPU_VSA_BASE_OFFSET, r21; /* entry point */ \ | 55 | nop 0x0; \ |
56 | mov r24=ip; \ | ||
55 | ;; \ | 57 | ;; \ |
58 | } \ | ||
59 | {.mmb \ | ||
60 | add r24=0x20, r24; \ | ||
56 | ld8 r25 = [r25]; /* read vpd base */ \ | 61 | ld8 r25 = [r25]; /* read vpd base */ \ |
57 | ld8 r20 = [r20]; \ | 62 | br.cond.sptk kvm_vps_sync_read; /*call the service*/ \ |
58 | ;; \ | ||
59 | add r20 = PAL_VPS_SYNC_READ,r20; \ | ||
60 | ;; \ | ||
61 | { .mii; \ | ||
62 | nop 0x0; \ | ||
63 | mov r24 = ip; \ | ||
64 | mov b0 = r20; \ | ||
65 | ;; \ | 63 | ;; \ |
66 | }; \ | 64 | }; \ |
67 | { .mmb; \ | ||
68 | add r24 = 0x20, r24; \ | ||
69 | nop 0x0; \ | ||
70 | br.cond.sptk b0; /* call the service */ \ | ||
71 | ;; \ | ||
72 | }; | ||
73 | |||
74 | 65 | ||
75 | 66 | ||
76 | #define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21 | 67 | #define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21 |
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S index e4f15d641b22..634abad979b5 100644 --- a/arch/ia64/kvm/optvfault.S +++ b/arch/ia64/kvm/optvfault.S | |||
@@ -1,9 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * arch/ia64/vmx/optvfault.S | 2 | * arch/ia64/kvm/optvfault.S |
3 | * optimize virtualization fault handler | 3 | * optimize virtualization fault handler |
4 | * | 4 | * |
5 | * Copyright (C) 2006 Intel Co | 5 | * Copyright (C) 2006 Intel Co |
6 | * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> | 6 | * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> |
7 | * Copyright (C) 2008 Intel Co | ||
8 | * Add the support for Tukwila processors. | ||
9 | * Xiantao Zhang <xiantao.zhang@intel.com> | ||
7 | */ | 10 | */ |
8 | 11 | ||
9 | #include <asm/asmmacro.h> | 12 | #include <asm/asmmacro.h> |
@@ -20,6 +23,98 @@ | |||
20 | #define ACCE_MOV_TO_PSR | 23 | #define ACCE_MOV_TO_PSR |
21 | #define ACCE_THASH | 24 | #define ACCE_THASH |
22 | 25 | ||
26 | #define VMX_VPS_SYNC_READ \ | ||
27 | add r16=VMM_VPD_BASE_OFFSET,r21; \ | ||
28 | mov r17 = b0; \ | ||
29 | mov r18 = r24; \ | ||
30 | mov r19 = r25; \ | ||
31 | mov r20 = r31; \ | ||
32 | ;; \ | ||
33 | {.mii; \ | ||
34 | ld8 r16 = [r16]; \ | ||
35 | nop 0x0; \ | ||
36 | mov r24 = ip; \ | ||
37 | ;; \ | ||
38 | }; \ | ||
39 | {.mmb; \ | ||
40 | add r24=0x20, r24; \ | ||
41 | mov r25 =r16; \ | ||
42 | br.sptk.many kvm_vps_sync_read; \ | ||
43 | }; \ | ||
44 | mov b0 = r17; \ | ||
45 | mov r24 = r18; \ | ||
46 | mov r25 = r19; \ | ||
47 | mov r31 = r20 | ||
48 | |||
49 | ENTRY(kvm_vps_entry) | ||
50 | adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21 | ||
51 | ;; | ||
52 | ld8 r29 = [r29] | ||
53 | ;; | ||
54 | add r29 = r29, r30 | ||
55 | ;; | ||
56 | mov b0 = r29 | ||
57 | br.sptk.many b0 | ||
58 | END(kvm_vps_entry) | ||
59 | |||
60 | /* | ||
61 | * Inputs: | ||
62 | * r24 : return address | ||
63 | * r25 : vpd | ||
64 | * r29 : scratch | ||
65 | * | ||
66 | */ | ||
67 | GLOBAL_ENTRY(kvm_vps_sync_read) | ||
68 | movl r30 = PAL_VPS_SYNC_READ | ||
69 | ;; | ||
70 | br.sptk.many kvm_vps_entry | ||
71 | END(kvm_vps_sync_read) | ||
72 | |||
73 | /* | ||
74 | * Inputs: | ||
75 | * r24 : return address | ||
76 | * r25 : vpd | ||
77 | * r29 : scratch | ||
78 | * | ||
79 | */ | ||
80 | GLOBAL_ENTRY(kvm_vps_sync_write) | ||
81 | movl r30 = PAL_VPS_SYNC_WRITE | ||
82 | ;; | ||
83 | br.sptk.many kvm_vps_entry | ||
84 | END(kvm_vps_sync_write) | ||
85 | |||
86 | /* | ||
87 | * Inputs: | ||
88 | * r23 : pr | ||
89 | * r24 : guest b0 | ||
90 | * r25 : vpd | ||
91 | * | ||
92 | */ | ||
93 | GLOBAL_ENTRY(kvm_vps_resume_normal) | ||
94 | movl r30 = PAL_VPS_RESUME_NORMAL | ||
95 | ;; | ||
96 | mov pr=r23,-2 | ||
97 | br.sptk.many kvm_vps_entry | ||
98 | END(kvm_vps_resume_normal) | ||
99 | |||
100 | /* | ||
101 | * Inputs: | ||
102 | * r23 : pr | ||
103 | * r24 : guest b0 | ||
104 | * r25 : vpd | ||
105 | * r17 : isr | ||
106 | */ | ||
107 | GLOBAL_ENTRY(kvm_vps_resume_handler) | ||
108 | movl r30 = PAL_VPS_RESUME_HANDLER | ||
109 | ;; | ||
110 | ld8 r27=[r25] | ||
111 | shr r17=r17,IA64_ISR_IR_BIT | ||
112 | ;; | ||
113 | dep r27=r17,r27,63,1 // bit 63 of r27 indicate whether enable CFLE | ||
114 | mov pr=r23,-2 | ||
115 | br.sptk.many kvm_vps_entry | ||
116 | END(kvm_vps_resume_handler) | ||
117 | |||
23 | //mov r1=ar3 | 118 | //mov r1=ar3 |
24 | GLOBAL_ENTRY(kvm_asm_mov_from_ar) | 119 | GLOBAL_ENTRY(kvm_asm_mov_from_ar) |
25 | #ifndef ACCE_MOV_FROM_AR | 120 | #ifndef ACCE_MOV_FROM_AR |
@@ -157,11 +252,11 @@ GLOBAL_ENTRY(kvm_asm_rsm) | |||
157 | #ifndef ACCE_RSM | 252 | #ifndef ACCE_RSM |
158 | br.many kvm_virtualization_fault_back | 253 | br.many kvm_virtualization_fault_back |
159 | #endif | 254 | #endif |
160 | add r16=VMM_VPD_BASE_OFFSET,r21 | 255 | VMX_VPS_SYNC_READ |
256 | ;; | ||
161 | extr.u r26=r25,6,21 | 257 | extr.u r26=r25,6,21 |
162 | extr.u r27=r25,31,2 | 258 | extr.u r27=r25,31,2 |
163 | ;; | 259 | ;; |
164 | ld8 r16=[r16] | ||
165 | extr.u r28=r25,36,1 | 260 | extr.u r28=r25,36,1 |
166 | dep r26=r27,r26,21,2 | 261 | dep r26=r27,r26,21,2 |
167 | ;; | 262 | ;; |
@@ -196,7 +291,7 @@ GLOBAL_ENTRY(kvm_asm_rsm) | |||
196 | tbit.nz p6,p0=r23,0 | 291 | tbit.nz p6,p0=r23,0 |
197 | ;; | 292 | ;; |
198 | tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT | 293 | tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT |
199 | (p6) br.dptk kvm_resume_to_guest | 294 | (p6) br.dptk kvm_resume_to_guest_with_sync |
200 | ;; | 295 | ;; |
201 | add r26=VMM_VCPU_META_RR0_OFFSET,r21 | 296 | add r26=VMM_VCPU_META_RR0_OFFSET,r21 |
202 | add r27=VMM_VCPU_META_RR0_OFFSET+8,r21 | 297 | add r27=VMM_VCPU_META_RR0_OFFSET+8,r21 |
@@ -212,7 +307,7 @@ GLOBAL_ENTRY(kvm_asm_rsm) | |||
212 | mov rr[r28]=r27 | 307 | mov rr[r28]=r27 |
213 | ;; | 308 | ;; |
214 | srlz.d | 309 | srlz.d |
215 | br.many kvm_resume_to_guest | 310 | br.many kvm_resume_to_guest_with_sync |
216 | END(kvm_asm_rsm) | 311 | END(kvm_asm_rsm) |
217 | 312 | ||
218 | 313 | ||
@@ -221,11 +316,11 @@ GLOBAL_ENTRY(kvm_asm_ssm) | |||
221 | #ifndef ACCE_SSM | 316 | #ifndef ACCE_SSM |
222 | br.many kvm_virtualization_fault_back | 317 | br.many kvm_virtualization_fault_back |
223 | #endif | 318 | #endif |
224 | add r16=VMM_VPD_BASE_OFFSET,r21 | 319 | VMX_VPS_SYNC_READ |
320 | ;; | ||
225 | extr.u r26=r25,6,21 | 321 | extr.u r26=r25,6,21 |
226 | extr.u r27=r25,31,2 | 322 | extr.u r27=r25,31,2 |
227 | ;; | 323 | ;; |
228 | ld8 r16=[r16] | ||
229 | extr.u r28=r25,36,1 | 324 | extr.u r28=r25,36,1 |
230 | dep r26=r27,r26,21,2 | 325 | dep r26=r27,r26,21,2 |
231 | ;; //r26 is imm24 | 326 | ;; //r26 is imm24 |
@@ -271,7 +366,7 @@ kvm_asm_ssm_1: | |||
271 | tbit.nz p6,p0=r29,IA64_PSR_I_BIT | 366 | tbit.nz p6,p0=r29,IA64_PSR_I_BIT |
272 | ;; | 367 | ;; |
273 | tbit.z.or p6,p0=r19,IA64_PSR_I_BIT | 368 | tbit.z.or p6,p0=r19,IA64_PSR_I_BIT |
274 | (p6) br.dptk kvm_resume_to_guest | 369 | (p6) br.dptk kvm_resume_to_guest_with_sync |
275 | ;; | 370 | ;; |
276 | add r29=VPD_VTPR_START_OFFSET,r16 | 371 | add r29=VPD_VTPR_START_OFFSET,r16 |
277 | add r30=VPD_VHPI_START_OFFSET,r16 | 372 | add r30=VPD_VHPI_START_OFFSET,r16 |
@@ -286,7 +381,7 @@ kvm_asm_ssm_1: | |||
286 | ;; | 381 | ;; |
287 | cmp.gt p6,p0=r30,r17 | 382 | cmp.gt p6,p0=r30,r17 |
288 | (p6) br.dpnt.few kvm_asm_dispatch_vexirq | 383 | (p6) br.dpnt.few kvm_asm_dispatch_vexirq |
289 | br.many kvm_resume_to_guest | 384 | br.many kvm_resume_to_guest_with_sync |
290 | END(kvm_asm_ssm) | 385 | END(kvm_asm_ssm) |
291 | 386 | ||
292 | 387 | ||
@@ -295,10 +390,9 @@ GLOBAL_ENTRY(kvm_asm_mov_to_psr) | |||
295 | #ifndef ACCE_MOV_TO_PSR | 390 | #ifndef ACCE_MOV_TO_PSR |
296 | br.many kvm_virtualization_fault_back | 391 | br.many kvm_virtualization_fault_back |
297 | #endif | 392 | #endif |
298 | add r16=VMM_VPD_BASE_OFFSET,r21 | 393 | VMX_VPS_SYNC_READ |
299 | extr.u r26=r25,13,7 //r2 | ||
300 | ;; | 394 | ;; |
301 | ld8 r16=[r16] | 395 | extr.u r26=r25,13,7 //r2 |
302 | addl r20=@gprel(asm_mov_from_reg),gp | 396 | addl r20=@gprel(asm_mov_from_reg),gp |
303 | ;; | 397 | ;; |
304 | adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20 | 398 | adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20 |
@@ -374,7 +468,7 @@ kvm_asm_mov_to_psr_1: | |||
374 | ;; | 468 | ;; |
375 | tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT | 469 | tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT |
376 | tbit.z.or p6,p0=r30,IA64_PSR_I_BIT | 470 | tbit.z.or p6,p0=r30,IA64_PSR_I_BIT |
377 | (p6) br.dpnt.few kvm_resume_to_guest | 471 | (p6) br.dpnt.few kvm_resume_to_guest_with_sync |
378 | ;; | 472 | ;; |
379 | add r29=VPD_VTPR_START_OFFSET,r16 | 473 | add r29=VPD_VTPR_START_OFFSET,r16 |
380 | add r30=VPD_VHPI_START_OFFSET,r16 | 474 | add r30=VPD_VHPI_START_OFFSET,r16 |
@@ -389,13 +483,29 @@ kvm_asm_mov_to_psr_1: | |||
389 | ;; | 483 | ;; |
390 | cmp.gt p6,p0=r30,r17 | 484 | cmp.gt p6,p0=r30,r17 |
391 | (p6) br.dpnt.few kvm_asm_dispatch_vexirq | 485 | (p6) br.dpnt.few kvm_asm_dispatch_vexirq |
392 | br.many kvm_resume_to_guest | 486 | br.many kvm_resume_to_guest_with_sync |
393 | END(kvm_asm_mov_to_psr) | 487 | END(kvm_asm_mov_to_psr) |
394 | 488 | ||
395 | 489 | ||
396 | ENTRY(kvm_asm_dispatch_vexirq) | 490 | ENTRY(kvm_asm_dispatch_vexirq) |
397 | //increment iip | 491 | //increment iip |
492 | mov r17 = b0 | ||
493 | mov r18 = r31 | ||
494 | {.mii | ||
495 | add r25=VMM_VPD_BASE_OFFSET,r21 | ||
496 | nop 0x0 | ||
497 | mov r24 = ip | ||
498 | ;; | ||
499 | } | ||
500 | {.mmb | ||
501 | add r24 = 0x20, r24 | ||
502 | ld8 r25 = [r25] | ||
503 | br.sptk.many kvm_vps_sync_write | ||
504 | } | ||
505 | mov b0 =r17 | ||
398 | mov r16=cr.ipsr | 506 | mov r16=cr.ipsr |
507 | mov r31 = r18 | ||
508 | mov r19 = 37 | ||
399 | ;; | 509 | ;; |
400 | extr.u r17=r16,IA64_PSR_RI_BIT,2 | 510 | extr.u r17=r16,IA64_PSR_RI_BIT,2 |
401 | tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 | 511 | tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 |
@@ -435,25 +545,31 @@ GLOBAL_ENTRY(kvm_asm_thash) | |||
435 | ;; | 545 | ;; |
436 | kvm_asm_thash_back1: | 546 | kvm_asm_thash_back1: |
437 | shr.u r23=r19,61 // get RR number | 547 | shr.u r23=r19,61 // get RR number |
438 | adds r25=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr | 548 | adds r28=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr |
439 | adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta | 549 | adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta |
440 | ;; | 550 | ;; |
441 | shladd r27=r23,3,r25 // get vcpu->arch.vrr[r23]'s addr | 551 | shladd r27=r23,3,r28 // get vcpu->arch.vrr[r23]'s addr |
442 | ld8 r17=[r16] // get PTA | 552 | ld8 r17=[r16] // get PTA |
443 | mov r26=1 | 553 | mov r26=1 |
444 | ;; | 554 | ;; |
445 | extr.u r29=r17,2,6 // get pta.size | 555 | extr.u r29=r17,2,6 // get pta.size |
446 | ld8 r25=[r27] // get vcpu->arch.vrr[r23]'s value | 556 | ld8 r28=[r27] // get vcpu->arch.vrr[r23]'s value |
447 | ;; | 557 | ;; |
448 | extr.u r25=r25,2,6 // get rr.ps | 558 | mov b0=r24 |
559 | //Fallback to C if pta.vf is set | ||
560 | tbit.nz p6,p0=r17, 8 | ||
561 | ;; | ||
562 | (p6) mov r24=EVENT_THASH | ||
563 | (p6) br.cond.dpnt.many kvm_virtualization_fault_back | ||
564 | extr.u r28=r28,2,6 // get rr.ps | ||
449 | shl r22=r26,r29 // 1UL << pta.size | 565 | shl r22=r26,r29 // 1UL << pta.size |
450 | ;; | 566 | ;; |
451 | shr.u r23=r19,r25 // vaddr >> rr.ps | 567 | shr.u r23=r19,r28 // vaddr >> rr.ps |
452 | adds r26=3,r29 // pta.size + 3 | 568 | adds r26=3,r29 // pta.size + 3 |
453 | shl r27=r17,3 // pta << 3 | 569 | shl r27=r17,3 // pta << 3 |
454 | ;; | 570 | ;; |
455 | shl r23=r23,3 // (vaddr >> rr.ps) << 3 | 571 | shl r23=r23,3 // (vaddr >> rr.ps) << 3 |
456 | shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3) | 572 | shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3) |
457 | movl r16=7<<61 | 573 | movl r16=7<<61 |
458 | ;; | 574 | ;; |
459 | adds r22=-1,r22 // (1UL << pta.size) - 1 | 575 | adds r22=-1,r22 // (1UL << pta.size) - 1 |
@@ -724,6 +840,29 @@ END(asm_mov_from_reg) | |||
724 | * r31: pr | 840 | * r31: pr |
725 | * r24: b0 | 841 | * r24: b0 |
726 | */ | 842 | */ |
843 | ENTRY(kvm_resume_to_guest_with_sync) | ||
844 | adds r19=VMM_VPD_BASE_OFFSET,r21 | ||
845 | mov r16 = r31 | ||
846 | mov r17 = r24 | ||
847 | ;; | ||
848 | {.mii | ||
849 | ld8 r25 =[r19] | ||
850 | nop 0x0 | ||
851 | mov r24 = ip | ||
852 | ;; | ||
853 | } | ||
854 | {.mmb | ||
855 | add r24 =0x20, r24 | ||
856 | nop 0x0 | ||
857 | br.sptk.many kvm_vps_sync_write | ||
858 | } | ||
859 | |||
860 | mov r31 = r16 | ||
861 | mov r24 =r17 | ||
862 | ;; | ||
863 | br.sptk.many kvm_resume_to_guest | ||
864 | END(kvm_resume_to_guest_with_sync) | ||
865 | |||
727 | ENTRY(kvm_resume_to_guest) | 866 | ENTRY(kvm_resume_to_guest) |
728 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 | 867 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 |
729 | ;; | 868 | ;; |
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c index 5a33f7ed29a0..3417783ae164 100644 --- a/arch/ia64/kvm/process.c +++ b/arch/ia64/kvm/process.c | |||
@@ -962,9 +962,9 @@ static void kvm_do_resume_op(struct kvm_vcpu *vcpu) | |||
962 | void vmm_transition(struct kvm_vcpu *vcpu) | 962 | void vmm_transition(struct kvm_vcpu *vcpu) |
963 | { | 963 | { |
964 | ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd, | 964 | ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd, |
965 | 0, 0, 0, 0, 0, 0); | 965 | 1, 0, 0, 0, 0, 0); |
966 | vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host); | 966 | vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host); |
967 | ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd, | 967 | ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd, |
968 | 0, 0, 0, 0, 0, 0); | 968 | 1, 0, 0, 0, 0, 0); |
969 | kvm_do_resume_op(vcpu); | 969 | kvm_do_resume_op(vcpu); |
970 | } | 970 | } |
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index b0fcfb62c49e..341e3fee280c 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h | |||
@@ -313,21 +313,21 @@ static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir, | |||
313 | trp->rid = rid; | 313 | trp->rid = rid; |
314 | } | 314 | } |
315 | 315 | ||
316 | extern u64 kvm_lookup_mpa(u64 gpfn); | 316 | extern u64 kvm_get_mpt_entry(u64 gpfn); |
317 | extern u64 kvm_gpa_to_mpa(u64 gpa); | ||
318 | |||
319 | /* Return I/O type if trye */ | ||
320 | #define __gpfn_is_io(gpfn) \ | ||
321 | ({ \ | ||
322 | u64 pte, ret = 0; \ | ||
323 | pte = kvm_lookup_mpa(gpfn); \ | ||
324 | if (!(pte & GPFN_INV_MASK)) \ | ||
325 | ret = pte & GPFN_IO_MASK; \ | ||
326 | ret; \ | ||
327 | }) | ||
328 | 317 | ||
318 | /* Return I/ */ | ||
319 | static inline u64 __gpfn_is_io(u64 gpfn) | ||
320 | { | ||
321 | u64 pte; | ||
322 | pte = kvm_get_mpt_entry(gpfn); | ||
323 | if (!(pte & GPFN_INV_MASK)) { | ||
324 | pte = pte & GPFN_IO_MASK; | ||
325 | if (pte != GPFN_PHYS_MMIO) | ||
326 | return pte; | ||
327 | } | ||
328 | return 0; | ||
329 | } | ||
329 | #endif | 330 | #endif |
330 | |||
331 | #define IA64_NO_FAULT 0 | 331 | #define IA64_NO_FAULT 0 |
332 | #define IA64_FAULT 1 | 332 | #define IA64_FAULT 1 |
333 | 333 | ||
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S index 3ee5f481c06d..c1d7251a1480 100644 --- a/arch/ia64/kvm/vmm_ivt.S +++ b/arch/ia64/kvm/vmm_ivt.S | |||
@@ -1261,11 +1261,6 @@ kvm_rse_clear_invalid: | |||
1261 | adds r19=VMM_VPD_VPSR_OFFSET,r18 | 1261 | adds r19=VMM_VPD_VPSR_OFFSET,r18 |
1262 | ;; | 1262 | ;; |
1263 | ld8 r19=[r19] //vpsr | 1263 | ld8 r19=[r19] //vpsr |
1264 | adds r20=VMM_VCPU_VSA_BASE_OFFSET,r21 | ||
1265 | ;; | ||
1266 | ld8 r20=[r20] | ||
1267 | ;; | ||
1268 | //vsa_sync_write_start | ||
1269 | mov r25=r18 | 1264 | mov r25=r18 |
1270 | adds r16= VMM_VCPU_GP_OFFSET,r21 | 1265 | adds r16= VMM_VCPU_GP_OFFSET,r21 |
1271 | ;; | 1266 | ;; |
@@ -1274,10 +1269,7 @@ kvm_rse_clear_invalid: | |||
1274 | ;; | 1269 | ;; |
1275 | add r24=r24,r16 | 1270 | add r24=r24,r16 |
1276 | ;; | 1271 | ;; |
1277 | add r16=PAL_VPS_SYNC_WRITE,r20 | 1272 | br.sptk.many kvm_vps_sync_write // call the service |
1278 | ;; | ||
1279 | mov b0=r16 | ||
1280 | br.cond.sptk b0 // call the service | ||
1281 | ;; | 1273 | ;; |
1282 | END(ia64_leave_hypervisor) | 1274 | END(ia64_leave_hypervisor) |
1283 | // fall through | 1275 | // fall through |
@@ -1288,28 +1280,15 @@ GLOBAL_ENTRY(ia64_vmm_entry) | |||
1288 | * r17:cr.isr | 1280 | * r17:cr.isr |
1289 | * r18:vpd | 1281 | * r18:vpd |
1290 | * r19:vpsr | 1282 | * r19:vpsr |
1291 | * r20:__vsa_base | ||
1292 | * r22:b0 | 1283 | * r22:b0 |
1293 | * r23:predicate | 1284 | * r23:predicate |
1294 | */ | 1285 | */ |
1295 | mov r24=r22 | 1286 | mov r24=r22 |
1296 | mov r25=r18 | 1287 | mov r25=r18 |
1297 | tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic | 1288 | tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic |
1289 | (p1) br.cond.sptk.few kvm_vps_resume_normal | ||
1290 | (p2) br.cond.sptk.many kvm_vps_resume_handler | ||
1298 | ;; | 1291 | ;; |
1299 | (p1) add r29=PAL_VPS_RESUME_NORMAL,r20 | ||
1300 | (p1) br.sptk.many ia64_vmm_entry_out | ||
1301 | ;; | ||
1302 | tbit.nz p1,p2 = r17,IA64_ISR_IR_BIT //p1=cr.isr.ir | ||
1303 | ;; | ||
1304 | (p1) add r29=PAL_VPS_RESUME_NORMAL,r20 | ||
1305 | (p2) add r29=PAL_VPS_RESUME_HANDLER,r20 | ||
1306 | (p2) ld8 r26=[r25] | ||
1307 | ;; | ||
1308 | ia64_vmm_entry_out: | ||
1309 | mov pr=r23,-2 | ||
1310 | mov b0=r29 | ||
1311 | ;; | ||
1312 | br.cond.sptk b0 // call pal service | ||
1313 | END(ia64_vmm_entry) | 1292 | END(ia64_vmm_entry) |
1314 | 1293 | ||
1315 | 1294 | ||
@@ -1376,6 +1355,9 @@ GLOBAL_ENTRY(vmm_reset_entry) | |||
1376 | //set up ipsr, iip, vpd.vpsr, dcr | 1355 | //set up ipsr, iip, vpd.vpsr, dcr |
1377 | // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 | 1356 | // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 |
1378 | // For DCR: all bits 0 | 1357 | // For DCR: all bits 0 |
1358 | bsw.0 | ||
1359 | ;; | ||
1360 | mov r21 =r13 | ||
1379 | adds r14=-VMM_PT_REGS_SIZE, r12 | 1361 | adds r14=-VMM_PT_REGS_SIZE, r12 |
1380 | ;; | 1362 | ;; |
1381 | movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 | 1363 | movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 |
@@ -1387,12 +1369,6 @@ GLOBAL_ENTRY(vmm_reset_entry) | |||
1387 | ;; | 1369 | ;; |
1388 | srlz.i | 1370 | srlz.i |
1389 | ;; | 1371 | ;; |
1390 | bsw.0 | ||
1391 | ;; | ||
1392 | mov r21 =r13 | ||
1393 | ;; | ||
1394 | bsw.1 | ||
1395 | ;; | ||
1396 | mov ar.rsc = 0 | 1372 | mov ar.rsc = 0 |
1397 | ;; | 1373 | ;; |
1398 | flushrs | 1374 | flushrs |
@@ -1406,12 +1382,9 @@ GLOBAL_ENTRY(vmm_reset_entry) | |||
1406 | ld8 r1 = [r20] | 1382 | ld8 r1 = [r20] |
1407 | ;; | 1383 | ;; |
1408 | mov cr.iip=r4 | 1384 | mov cr.iip=r4 |
1409 | ;; | ||
1410 | adds r16=VMM_VPD_BASE_OFFSET,r13 | 1385 | adds r16=VMM_VPD_BASE_OFFSET,r13 |
1411 | adds r20=VMM_VCPU_VSA_BASE_OFFSET,r13 | ||
1412 | ;; | 1386 | ;; |
1413 | ld8 r18=[r16] | 1387 | ld8 r18=[r16] |
1414 | ld8 r20=[r20] | ||
1415 | ;; | 1388 | ;; |
1416 | adds r19=VMM_VPD_VPSR_OFFSET,r18 | 1389 | adds r19=VMM_VPD_VPSR_OFFSET,r18 |
1417 | ;; | 1390 | ;; |
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index def4576d22b1..e22b93361e08 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c | |||
@@ -390,7 +390,7 @@ void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps) | |||
390 | 390 | ||
391 | u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) | 391 | u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) |
392 | { | 392 | { |
393 | u64 ps, ps_mask, paddr, maddr; | 393 | u64 ps, ps_mask, paddr, maddr, io_mask; |
394 | union pte_flags phy_pte; | 394 | union pte_flags phy_pte; |
395 | 395 | ||
396 | ps = itir_ps(itir); | 396 | ps = itir_ps(itir); |
@@ -398,8 +398,9 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) | |||
398 | phy_pte.val = *pte; | 398 | phy_pte.val = *pte; |
399 | paddr = *pte; | 399 | paddr = *pte; |
400 | paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); | 400 | paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); |
401 | maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT); | 401 | maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT); |
402 | if (maddr & GPFN_IO_MASK) { | 402 | io_mask = maddr & GPFN_IO_MASK; |
403 | if (io_mask && (io_mask != GPFN_PHYS_MMIO)) { | ||
403 | *pte |= VTLB_PTE_IO; | 404 | *pte |= VTLB_PTE_IO; |
404 | return -1; | 405 | return -1; |
405 | } | 406 | } |
@@ -418,7 +419,7 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | |||
418 | u64 ifa, int type) | 419 | u64 ifa, int type) |
419 | { | 420 | { |
420 | u64 ps; | 421 | u64 ps; |
421 | u64 phy_pte; | 422 | u64 phy_pte, io_mask, index; |
422 | union ia64_rr vrr, mrr; | 423 | union ia64_rr vrr, mrr; |
423 | int ret = 0; | 424 | int ret = 0; |
424 | 425 | ||
@@ -426,13 +427,16 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | |||
426 | vrr.val = vcpu_get_rr(v, ifa); | 427 | vrr.val = vcpu_get_rr(v, ifa); |
427 | mrr.val = ia64_get_rr(ifa); | 428 | mrr.val = ia64_get_rr(ifa); |
428 | 429 | ||
430 | index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT; | ||
431 | io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK; | ||
429 | phy_pte = translate_phy_pte(&pte, itir, ifa); | 432 | phy_pte = translate_phy_pte(&pte, itir, ifa); |
430 | 433 | ||
431 | /* Ensure WB attribute if pte is related to a normal mem page, | 434 | /* Ensure WB attribute if pte is related to a normal mem page, |
432 | * which is required by vga acceleration since qemu maps shared | 435 | * which is required by vga acceleration since qemu maps shared |
433 | * vram buffer with WB. | 436 | * vram buffer with WB. |
434 | */ | 437 | */ |
435 | if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) { | 438 | if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) && |
439 | io_mask != GPFN_PHYS_MMIO) { | ||
436 | pte &= ~_PAGE_MA_MASK; | 440 | pte &= ~_PAGE_MA_MASK; |
437 | phy_pte &= ~_PAGE_MA_MASK; | 441 | phy_pte &= ~_PAGE_MA_MASK; |
438 | } | 442 | } |
@@ -566,12 +570,19 @@ void thash_init(struct thash_cb *hcb, u64 sz) | |||
566 | } | 570 | } |
567 | } | 571 | } |
568 | 572 | ||
569 | u64 kvm_lookup_mpa(u64 gpfn) | 573 | u64 kvm_get_mpt_entry(u64 gpfn) |
570 | { | 574 | { |
571 | u64 *base = (u64 *) KVM_P2M_BASE; | 575 | u64 *base = (u64 *) KVM_P2M_BASE; |
572 | return *(base + gpfn); | 576 | return *(base + gpfn); |
573 | } | 577 | } |
574 | 578 | ||
579 | u64 kvm_lookup_mpa(u64 gpfn) | ||
580 | { | ||
581 | u64 maddr; | ||
582 | maddr = kvm_get_mpt_entry(gpfn); | ||
583 | return maddr&_PAGE_PPN_MASK; | ||
584 | } | ||
585 | |||
575 | u64 kvm_gpa_to_mpa(u64 gpa) | 586 | u64 kvm_gpa_to_mpa(u64 gpa) |
576 | { | 587 | { |
577 | u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT); | 588 | u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT); |