aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2007-12-20 19:18:22 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 11:01:20 -0500
commit10589a4699bb978c781ce73bbae8ca942c5250c9 (patch)
tree5585ed87fff0a2ba259fcc6f998022481da75f68 /arch/x86/kvm/x86.c
parent774ead3ad9bcbc05ef6aaebb9bdf8b4c3126923b (diff)
KVM: MMU: Concurrent guest walkers
Do not hold kvm->lock mutex across the entire pagefault code, only acquire it in places where it is necessary, such as mmu hash list, active list, rmap and parent pte handling. Allow concurrent guest walkers by switching walk_addr() to use mmap_sem in read-mode. And get rid of the lockless __gfn_to_page. [avi: move kvm_mmu_pte_write() locking inside the function] [avi: add locking for real mode] [avi: fix cmpxchg locking] Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c92
1 files changed, 55 insertions, 37 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1f48ec871035..e3b3141db13c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -181,7 +181,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
181 int ret; 181 int ret;
182 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; 182 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
183 183
184 mutex_lock(&vcpu->kvm->lock); 184 down_read(&current->mm->mmap_sem);
185 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, 185 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
186 offset * sizeof(u64), sizeof(pdpte)); 186 offset * sizeof(u64), sizeof(pdpte));
187 if (ret < 0) { 187 if (ret < 0) {
@@ -198,7 +198,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
198 198
199 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); 199 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
200out: 200out:
201 mutex_unlock(&vcpu->kvm->lock); 201 up_read(&current->mm->mmap_sem);
202 202
203 return ret; 203 return ret;
204} 204}
@@ -212,13 +212,13 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
212 if (is_long_mode(vcpu) || !is_pae(vcpu)) 212 if (is_long_mode(vcpu) || !is_pae(vcpu))
213 return false; 213 return false;
214 214
215 mutex_lock(&vcpu->kvm->lock); 215 down_read(&current->mm->mmap_sem);
216 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); 216 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
217 if (r < 0) 217 if (r < 0)
218 goto out; 218 goto out;
219 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; 219 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
220out: 220out:
221 mutex_unlock(&vcpu->kvm->lock); 221 up_read(&current->mm->mmap_sem);
222 222
223 return changed; 223 return changed;
224} 224}
@@ -278,9 +278,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
278 kvm_x86_ops->set_cr0(vcpu, cr0); 278 kvm_x86_ops->set_cr0(vcpu, cr0);
279 vcpu->arch.cr0 = cr0; 279 vcpu->arch.cr0 = cr0;
280 280
281 mutex_lock(&vcpu->kvm->lock);
282 kvm_mmu_reset_context(vcpu); 281 kvm_mmu_reset_context(vcpu);
283 mutex_unlock(&vcpu->kvm->lock);
284 return; 282 return;
285} 283}
286EXPORT_SYMBOL_GPL(set_cr0); 284EXPORT_SYMBOL_GPL(set_cr0);
@@ -320,9 +318,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
320 } 318 }
321 kvm_x86_ops->set_cr4(vcpu, cr4); 319 kvm_x86_ops->set_cr4(vcpu, cr4);
322 vcpu->arch.cr4 = cr4; 320 vcpu->arch.cr4 = cr4;
323 mutex_lock(&vcpu->kvm->lock);
324 kvm_mmu_reset_context(vcpu); 321 kvm_mmu_reset_context(vcpu);
325 mutex_unlock(&vcpu->kvm->lock);
326} 322}
327EXPORT_SYMBOL_GPL(set_cr4); 323EXPORT_SYMBOL_GPL(set_cr4);
328 324
@@ -360,7 +356,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
360 */ 356 */
361 } 357 }
362 358
363 mutex_lock(&vcpu->kvm->lock); 359 down_read(&current->mm->mmap_sem);
364 /* 360 /*
365 * Does the new cr3 value map to physical memory? (Note, we 361 * Does the new cr3 value map to physical memory? (Note, we
366 * catch an invalid cr3 even in real-mode, because it would 362 * catch an invalid cr3 even in real-mode, because it would
@@ -376,7 +372,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
376 vcpu->arch.cr3 = cr3; 372 vcpu->arch.cr3 = cr3;
377 vcpu->arch.mmu.new_cr3(vcpu); 373 vcpu->arch.mmu.new_cr3(vcpu);
378 } 374 }
379 mutex_unlock(&vcpu->kvm->lock); 375 up_read(&current->mm->mmap_sem);
380} 376}
381EXPORT_SYMBOL_GPL(set_cr3); 377EXPORT_SYMBOL_GPL(set_cr3);
382 378
@@ -1211,12 +1207,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1211 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) 1207 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1212 return -EINVAL; 1208 return -EINVAL;
1213 1209
1214 mutex_lock(&kvm->lock); 1210 down_write(&current->mm->mmap_sem);
1215 1211
1216 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); 1212 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
1217 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; 1213 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1218 1214
1219 mutex_unlock(&kvm->lock); 1215 up_write(&current->mm->mmap_sem);
1220 return 0; 1216 return 0;
1221} 1217}
1222 1218
@@ -1265,7 +1261,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1265 < alias->target_phys_addr) 1261 < alias->target_phys_addr)
1266 goto out; 1262 goto out;
1267 1263
1268 mutex_lock(&kvm->lock); 1264 down_write(&current->mm->mmap_sem);
1269 1265
1270 p = &kvm->arch.aliases[alias->slot]; 1266 p = &kvm->arch.aliases[alias->slot];
1271 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; 1267 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
@@ -1279,7 +1275,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1279 1275
1280 kvm_mmu_zap_all(kvm); 1276 kvm_mmu_zap_all(kvm);
1281 1277
1282 mutex_unlock(&kvm->lock); 1278 up_write(&current->mm->mmap_sem);
1283 1279
1284 return 0; 1280 return 0;
1285 1281
@@ -1355,7 +1351,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1355 struct kvm_memory_slot *memslot; 1351 struct kvm_memory_slot *memslot;
1356 int is_dirty = 0; 1352 int is_dirty = 0;
1357 1353
1358 mutex_lock(&kvm->lock); 1354 down_write(&current->mm->mmap_sem);
1359 1355
1360 r = kvm_get_dirty_log(kvm, log, &is_dirty); 1356 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1361 if (r) 1357 if (r)
@@ -1371,7 +1367,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1371 } 1367 }
1372 r = 0; 1368 r = 0;
1373out: 1369out:
1374 mutex_unlock(&kvm->lock); 1370 up_write(&current->mm->mmap_sem);
1375 return r; 1371 return r;
1376} 1372}
1377 1373
@@ -1565,25 +1561,32 @@ int emulator_read_std(unsigned long addr,
1565 struct kvm_vcpu *vcpu) 1561 struct kvm_vcpu *vcpu)
1566{ 1562{
1567 void *data = val; 1563 void *data = val;
1564 int r = X86EMUL_CONTINUE;
1568 1565
1566 down_read(&current->mm->mmap_sem);
1569 while (bytes) { 1567 while (bytes) {
1570 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 1568 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1571 unsigned offset = addr & (PAGE_SIZE-1); 1569 unsigned offset = addr & (PAGE_SIZE-1);
1572 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset); 1570 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1573 int ret; 1571 int ret;
1574 1572
1575 if (gpa == UNMAPPED_GVA) 1573 if (gpa == UNMAPPED_GVA) {
1576 return X86EMUL_PROPAGATE_FAULT; 1574 r = X86EMUL_PROPAGATE_FAULT;
1575 goto out;
1576 }
1577 ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy); 1577 ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
1578 if (ret < 0) 1578 if (ret < 0) {
1579 return X86EMUL_UNHANDLEABLE; 1579 r = X86EMUL_UNHANDLEABLE;
1580 goto out;
1581 }
1580 1582
1581 bytes -= tocopy; 1583 bytes -= tocopy;
1582 data += tocopy; 1584 data += tocopy;
1583 addr += tocopy; 1585 addr += tocopy;
1584 } 1586 }
1585 1587out:
1586 return X86EMUL_CONTINUE; 1588 up_read(&current->mm->mmap_sem);
1589 return r;
1587} 1590}
1588EXPORT_SYMBOL_GPL(emulator_read_std); 1591EXPORT_SYMBOL_GPL(emulator_read_std);
1589 1592
@@ -1601,7 +1604,9 @@ static int emulator_read_emulated(unsigned long addr,
1601 return X86EMUL_CONTINUE; 1604 return X86EMUL_CONTINUE;
1602 } 1605 }
1603 1606
1607 down_read(&current->mm->mmap_sem);
1604 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 1608 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1609 up_read(&current->mm->mmap_sem);
1605 1610
1606 /* For APIC access vmexit */ 1611 /* For APIC access vmexit */
1607 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 1612 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -1617,11 +1622,14 @@ mmio:
1617 /* 1622 /*
1618 * Is this MMIO handled locally? 1623 * Is this MMIO handled locally?
1619 */ 1624 */
1625 mutex_lock(&vcpu->kvm->lock);
1620 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); 1626 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1621 if (mmio_dev) { 1627 if (mmio_dev) {
1622 kvm_iodevice_read(mmio_dev, gpa, bytes, val); 1628 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
1629 mutex_unlock(&vcpu->kvm->lock);
1623 return X86EMUL_CONTINUE; 1630 return X86EMUL_CONTINUE;
1624 } 1631 }
1632 mutex_unlock(&vcpu->kvm->lock);
1625 1633
1626 vcpu->mmio_needed = 1; 1634 vcpu->mmio_needed = 1;
1627 vcpu->mmio_phys_addr = gpa; 1635 vcpu->mmio_phys_addr = gpa;
@@ -1636,10 +1644,14 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1636{ 1644{
1637 int ret; 1645 int ret;
1638 1646
1647 down_read(&current->mm->mmap_sem);
1639 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); 1648 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
1640 if (ret < 0) 1649 if (ret < 0) {
1650 up_read(&current->mm->mmap_sem);
1641 return 0; 1651 return 0;
1652 }
1642 kvm_mmu_pte_write(vcpu, gpa, val, bytes); 1653 kvm_mmu_pte_write(vcpu, gpa, val, bytes);
1654 up_read(&current->mm->mmap_sem);
1643 return 1; 1655 return 1;
1644} 1656}
1645 1657
@@ -1649,7 +1661,11 @@ static int emulator_write_emulated_onepage(unsigned long addr,
1649 struct kvm_vcpu *vcpu) 1661 struct kvm_vcpu *vcpu)
1650{ 1662{
1651 struct kvm_io_device *mmio_dev; 1663 struct kvm_io_device *mmio_dev;
1652 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 1664 gpa_t gpa;
1665
1666 down_read(&current->mm->mmap_sem);
1667 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1668 up_read(&current->mm->mmap_sem);
1653 1669
1654 if (gpa == UNMAPPED_GVA) { 1670 if (gpa == UNMAPPED_GVA) {
1655 kvm_inject_page_fault(vcpu, addr, 2); 1671 kvm_inject_page_fault(vcpu, addr, 2);
@@ -1667,11 +1683,14 @@ mmio:
1667 /* 1683 /*
1668 * Is this MMIO handled locally? 1684 * Is this MMIO handled locally?
1669 */ 1685 */
1686 mutex_lock(&vcpu->kvm->lock);
1670 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); 1687 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1671 if (mmio_dev) { 1688 if (mmio_dev) {
1672 kvm_iodevice_write(mmio_dev, gpa, bytes, val); 1689 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
1690 mutex_unlock(&vcpu->kvm->lock);
1673 return X86EMUL_CONTINUE; 1691 return X86EMUL_CONTINUE;
1674 } 1692 }
1693 mutex_unlock(&vcpu->kvm->lock);
1675 1694
1676 vcpu->mmio_needed = 1; 1695 vcpu->mmio_needed = 1;
1677 vcpu->mmio_phys_addr = gpa; 1696 vcpu->mmio_phys_addr = gpa;
@@ -1718,11 +1737,14 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
1718#ifndef CONFIG_X86_64 1737#ifndef CONFIG_X86_64
1719 /* guests cmpxchg8b have to be emulated atomically */ 1738 /* guests cmpxchg8b have to be emulated atomically */
1720 if (bytes == 8) { 1739 if (bytes == 8) {
1721 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 1740 gpa_t gpa;
1722 struct page *page; 1741 struct page *page;
1723 char *addr; 1742 char *addr;
1724 u64 val; 1743 u64 val;
1725 1744
1745 down_read(&current->mm->mmap_sem);
1746 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1747
1726 if (gpa == UNMAPPED_GVA || 1748 if (gpa == UNMAPPED_GVA ||
1727 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 1749 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1728 goto emul_write; 1750 goto emul_write;
@@ -1736,8 +1758,9 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
1736 set_64bit((u64 *)(addr + offset_in_page(gpa)), val); 1758 set_64bit((u64 *)(addr + offset_in_page(gpa)), val);
1737 kunmap_atomic(addr, KM_USER0); 1759 kunmap_atomic(addr, KM_USER0);
1738 kvm_release_page_dirty(page); 1760 kvm_release_page_dirty(page);
1761 emul_write:
1762 up_read(&current->mm->mmap_sem);
1739 } 1763 }
1740emul_write:
1741#endif 1764#endif
1742 1765
1743 return emulator_write_emulated(addr, new, bytes, vcpu); 1766 return emulator_write_emulated(addr, new, bytes, vcpu);
@@ -2118,10 +2141,10 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2118 kvm_x86_ops->skip_emulated_instruction(vcpu); 2141 kvm_x86_ops->skip_emulated_instruction(vcpu);
2119 2142
2120 for (i = 0; i < nr_pages; ++i) { 2143 for (i = 0; i < nr_pages; ++i) {
2121 mutex_lock(&vcpu->kvm->lock); 2144 down_read(&current->mm->mmap_sem);
2122 page = gva_to_page(vcpu, address + i * PAGE_SIZE); 2145 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
2123 vcpu->arch.pio.guest_pages[i] = page; 2146 vcpu->arch.pio.guest_pages[i] = page;
2124 mutex_unlock(&vcpu->kvm->lock); 2147 up_read(&current->mm->mmap_sem);
2125 if (!page) { 2148 if (!page) {
2126 kvm_inject_gp(vcpu, 0); 2149 kvm_inject_gp(vcpu, 0);
2127 free_pio_guest_pages(vcpu); 2150 free_pio_guest_pages(vcpu);
@@ -2247,7 +2270,6 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2247 char instruction[3]; 2270 char instruction[3];
2248 int ret = 0; 2271 int ret = 0;
2249 2272
2250 mutex_lock(&vcpu->kvm->lock);
2251 2273
2252 /* 2274 /*
2253 * Blow out the MMU to ensure that no other VCPU has an active mapping 2275 * Blow out the MMU to ensure that no other VCPU has an active mapping
@@ -2262,8 +2284,6 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2262 != X86EMUL_CONTINUE) 2284 != X86EMUL_CONTINUE)
2263 ret = -EFAULT; 2285 ret = -EFAULT;
2264 2286
2265 mutex_unlock(&vcpu->kvm->lock);
2266
2267 return ret; 2287 return ret;
2268} 2288}
2269 2289
@@ -2447,8 +2467,10 @@ static void vapic_enter(struct kvm_vcpu *vcpu)
2447 if (!apic || !apic->vapic_addr) 2467 if (!apic || !apic->vapic_addr)
2448 return; 2468 return;
2449 2469
2470 down_read(&current->mm->mmap_sem);
2450 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); 2471 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
2451 vcpu->arch.apic->vapic_page = page; 2472 vcpu->arch.apic->vapic_page = page;
2473 up_read(&current->mm->mmap_sem);
2452} 2474}
2453 2475
2454static void vapic_exit(struct kvm_vcpu *vcpu) 2476static void vapic_exit(struct kvm_vcpu *vcpu)
@@ -2910,13 +2932,13 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2910 gpa_t gpa; 2932 gpa_t gpa;
2911 2933
2912 vcpu_load(vcpu); 2934 vcpu_load(vcpu);
2913 mutex_lock(&vcpu->kvm->lock); 2935 down_read(&current->mm->mmap_sem);
2914 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr); 2936 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
2937 up_read(&current->mm->mmap_sem);
2915 tr->physical_address = gpa; 2938 tr->physical_address = gpa;
2916 tr->valid = gpa != UNMAPPED_GVA; 2939 tr->valid = gpa != UNMAPPED_GVA;
2917 tr->writeable = 1; 2940 tr->writeable = 1;
2918 tr->usermode = 0; 2941 tr->usermode = 0;
2919 mutex_unlock(&vcpu->kvm->lock);
2920 vcpu_put(vcpu); 2942 vcpu_put(vcpu);
2921 2943
2922 return 0; 2944 return 0;
@@ -3185,13 +3207,11 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
3185 */ 3207 */
3186 if (!user_alloc) { 3208 if (!user_alloc) {
3187 if (npages && !old.rmap) { 3209 if (npages && !old.rmap) {
3188 down_write(&current->mm->mmap_sem);
3189 memslot->userspace_addr = do_mmap(NULL, 0, 3210 memslot->userspace_addr = do_mmap(NULL, 0,
3190 npages * PAGE_SIZE, 3211 npages * PAGE_SIZE,
3191 PROT_READ | PROT_WRITE, 3212 PROT_READ | PROT_WRITE,
3192 MAP_SHARED | MAP_ANONYMOUS, 3213 MAP_SHARED | MAP_ANONYMOUS,
3193 0); 3214 0);
3194 up_write(&current->mm->mmap_sem);
3195 3215
3196 if (IS_ERR((void *)memslot->userspace_addr)) 3216 if (IS_ERR((void *)memslot->userspace_addr))
3197 return PTR_ERR((void *)memslot->userspace_addr); 3217 return PTR_ERR((void *)memslot->userspace_addr);
@@ -3199,10 +3219,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
3199 if (!old.user_alloc && old.rmap) { 3219 if (!old.user_alloc && old.rmap) {
3200 int ret; 3220 int ret;
3201 3221
3202 down_write(&current->mm->mmap_sem);
3203 ret = do_munmap(current->mm, old.userspace_addr, 3222 ret = do_munmap(current->mm, old.userspace_addr,
3204 old.npages * PAGE_SIZE); 3223 old.npages * PAGE_SIZE);
3205 up_write(&current->mm->mmap_sem);
3206 if (ret < 0) 3224 if (ret < 0)
3207 printk(KERN_WARNING 3225 printk(KERN_WARNING
3208 "kvm_vm_ioctl_set_memory_region: " 3226 "kvm_vm_ioctl_set_memory_region: "