aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/Kconfig3
-rw-r--r--virt/kvm/ioapic.c10
-rw-r--r--virt/kvm/ioapic.h1
-rw-r--r--virt/kvm/irq_comm.c14
-rw-r--r--virt/kvm/kvm_main.c132
5 files changed, 112 insertions, 48 deletions
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index f63ccb0a5982..28694f4a9139 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -18,3 +18,6 @@ config KVM_MMIO
18 18
19config KVM_ASYNC_PF 19config KVM_ASYNC_PF
20 bool 20 bool
21
22config HAVE_KVM_MSI
23 bool
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index dcaf272c26c0..26fd54dc459e 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -254,13 +254,17 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
254 } 254 }
255} 255}
256 256
257bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
258{
259 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
260 smp_rmb();
261 return test_bit(vector, ioapic->handled_vectors);
262}
263
257void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) 264void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
258{ 265{
259 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 266 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
260 267
261 smp_rmb();
262 if (!test_bit(vector, ioapic->handled_vectors))
263 return;
264 spin_lock(&ioapic->lock); 268 spin_lock(&ioapic->lock);
265 __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); 269 __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
266 spin_unlock(&ioapic->lock); 270 spin_unlock(&ioapic->lock);
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 0b190c34ccc3..32872a09b63f 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -71,6 +71,7 @@ int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
71 int short_hand, int dest, int dest_mode); 71 int short_hand, int dest, int dest_mode);
72int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); 72int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
73void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); 73void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
74bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector);
74int kvm_ioapic_init(struct kvm *kvm); 75int kvm_ioapic_init(struct kvm *kvm);
75void kvm_ioapic_destroy(struct kvm *kvm); 76void kvm_ioapic_destroy(struct kvm *kvm);
76int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); 77int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 9f614b4e365f..a6a0365475ed 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -138,6 +138,20 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
138 return kvm_irq_delivery_to_apic(kvm, NULL, &irq); 138 return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
139} 139}
140 140
141int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
142{
143 struct kvm_kernel_irq_routing_entry route;
144
145 if (!irqchip_in_kernel(kvm) || msi->flags != 0)
146 return -EINVAL;
147
148 route.msi.address_lo = msi->address_lo;
149 route.msi.address_hi = msi->address_hi;
150 route.msi.data = msi->data;
151
152 return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
153}
154
141/* 155/*
142 * Return value: 156 * Return value:
143 * < 0 Interrupt was ignored (masked or not delivered for other reasons) 157 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9739b533ca2e..7e140683ff14 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -522,12 +522,11 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
522 return; 522 return;
523 523
524 if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE) 524 if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
525 vfree(memslot->dirty_bitmap_head); 525 vfree(memslot->dirty_bitmap);
526 else 526 else
527 kfree(memslot->dirty_bitmap_head); 527 kfree(memslot->dirty_bitmap);
528 528
529 memslot->dirty_bitmap = NULL; 529 memslot->dirty_bitmap = NULL;
530 memslot->dirty_bitmap_head = NULL;
531} 530}
532 531
533/* 532/*
@@ -611,8 +610,7 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
611 610
612/* 611/*
613 * Allocation size is twice as large as the actual dirty bitmap size. 612 * Allocation size is twice as large as the actual dirty bitmap size.
614 * This makes it possible to do double buffering: see x86's 613 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
615 * kvm_vm_ioctl_get_dirty_log().
616 */ 614 */
617static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) 615static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
618{ 616{
@@ -627,8 +625,6 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
627 if (!memslot->dirty_bitmap) 625 if (!memslot->dirty_bitmap)
628 return -ENOMEM; 626 return -ENOMEM;
629 627
630 memslot->dirty_bitmap_head = memslot->dirty_bitmap;
631 memslot->nr_dirty_pages = 0;
632#endif /* !CONFIG_S390 */ 628#endif /* !CONFIG_S390 */
633 return 0; 629 return 0;
634} 630}
@@ -1477,8 +1473,8 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
1477 if (memslot && memslot->dirty_bitmap) { 1473 if (memslot && memslot->dirty_bitmap) {
1478 unsigned long rel_gfn = gfn - memslot->base_gfn; 1474 unsigned long rel_gfn = gfn - memslot->base_gfn;
1479 1475
1480 if (!test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap)) 1476 /* TODO: introduce set_bit_le() and use it */
1481 memslot->nr_dirty_pages++; 1477 test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap);
1482 } 1478 }
1483} 1479}
1484 1480
@@ -1515,6 +1511,30 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1515 finish_wait(&vcpu->wq, &wait); 1511 finish_wait(&vcpu->wq, &wait);
1516} 1512}
1517 1513
1514#ifndef CONFIG_S390
1515/*
1516 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
1517 */
1518void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1519{
1520 int me;
1521 int cpu = vcpu->cpu;
1522 wait_queue_head_t *wqp;
1523
1524 wqp = kvm_arch_vcpu_wq(vcpu);
1525 if (waitqueue_active(wqp)) {
1526 wake_up_interruptible(wqp);
1527 ++vcpu->stat.halt_wakeup;
1528 }
1529
1530 me = get_cpu();
1531 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
1532 if (kvm_arch_vcpu_should_kick(vcpu))
1533 smp_send_reschedule(cpu);
1534 put_cpu();
1535}
1536#endif /* !CONFIG_S390 */
1537
1518void kvm_resched(struct kvm_vcpu *vcpu) 1538void kvm_resched(struct kvm_vcpu *vcpu)
1519{ 1539{
1520 if (!need_resched()) 1540 if (!need_resched())
@@ -1523,6 +1543,31 @@ void kvm_resched(struct kvm_vcpu *vcpu)
1523} 1543}
1524EXPORT_SYMBOL_GPL(kvm_resched); 1544EXPORT_SYMBOL_GPL(kvm_resched);
1525 1545
1546bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
1547{
1548 struct pid *pid;
1549 struct task_struct *task = NULL;
1550
1551 rcu_read_lock();
1552 pid = rcu_dereference(target->pid);
1553 if (pid)
1554 task = get_pid_task(target->pid, PIDTYPE_PID);
1555 rcu_read_unlock();
1556 if (!task)
1557 return false;
1558 if (task->flags & PF_VCPU) {
1559 put_task_struct(task);
1560 return false;
1561 }
1562 if (yield_to(task, 1)) {
1563 put_task_struct(task);
1564 return true;
1565 }
1566 put_task_struct(task);
1567 return false;
1568}
1569EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
1570
1526void kvm_vcpu_on_spin(struct kvm_vcpu *me) 1571void kvm_vcpu_on_spin(struct kvm_vcpu *me)
1527{ 1572{
1528 struct kvm *kvm = me->kvm; 1573 struct kvm *kvm = me->kvm;
@@ -1541,8 +1586,6 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
1541 */ 1586 */
1542 for (pass = 0; pass < 2 && !yielded; pass++) { 1587 for (pass = 0; pass < 2 && !yielded; pass++) {
1543 kvm_for_each_vcpu(i, vcpu, kvm) { 1588 kvm_for_each_vcpu(i, vcpu, kvm) {
1544 struct task_struct *task = NULL;
1545 struct pid *pid;
1546 if (!pass && i < last_boosted_vcpu) { 1589 if (!pass && i < last_boosted_vcpu) {
1547 i = last_boosted_vcpu; 1590 i = last_boosted_vcpu;
1548 continue; 1591 continue;
@@ -1552,24 +1595,11 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
1552 continue; 1595 continue;
1553 if (waitqueue_active(&vcpu->wq)) 1596 if (waitqueue_active(&vcpu->wq))
1554 continue; 1597 continue;
1555 rcu_read_lock(); 1598 if (kvm_vcpu_yield_to(vcpu)) {
1556 pid = rcu_dereference(vcpu->pid);
1557 if (pid)
1558 task = get_pid_task(vcpu->pid, PIDTYPE_PID);
1559 rcu_read_unlock();
1560 if (!task)
1561 continue;
1562 if (task->flags & PF_VCPU) {
1563 put_task_struct(task);
1564 continue;
1565 }
1566 if (yield_to(task, 1)) {
1567 put_task_struct(task);
1568 kvm->last_boosted_vcpu = i; 1599 kvm->last_boosted_vcpu = i;
1569 yielded = 1; 1600 yielded = 1;
1570 break; 1601 break;
1571 } 1602 }
1572 put_task_struct(task);
1573 } 1603 }
1574 } 1604 }
1575} 1605}
@@ -2040,6 +2070,17 @@ static long kvm_vm_ioctl(struct file *filp,
2040 mutex_unlock(&kvm->lock); 2070 mutex_unlock(&kvm->lock);
2041 break; 2071 break;
2042#endif 2072#endif
2073#ifdef CONFIG_HAVE_KVM_MSI
2074 case KVM_SIGNAL_MSI: {
2075 struct kvm_msi msi;
2076
2077 r = -EFAULT;
2078 if (copy_from_user(&msi, argp, sizeof msi))
2079 goto out;
2080 r = kvm_send_userspace_msi(kvm, &msi);
2081 break;
2082 }
2083#endif
2043 default: 2084 default:
2044 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 2085 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
2045 if (r == -ENOTTY) 2086 if (r == -ENOTTY)
@@ -2168,6 +2209,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
2168 case KVM_CAP_SET_BOOT_CPU_ID: 2209 case KVM_CAP_SET_BOOT_CPU_ID:
2169#endif 2210#endif
2170 case KVM_CAP_INTERNAL_ERROR_DATA: 2211 case KVM_CAP_INTERNAL_ERROR_DATA:
2212#ifdef CONFIG_HAVE_KVM_MSI
2213 case KVM_CAP_SIGNAL_MSI:
2214#endif
2171 return 1; 2215 return 1;
2172#ifdef CONFIG_HAVE_KVM_IRQCHIP 2216#ifdef CONFIG_HAVE_KVM_IRQCHIP
2173 case KVM_CAP_IRQ_ROUTING: 2217 case KVM_CAP_IRQ_ROUTING:
@@ -2394,9 +2438,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
2394int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, 2438int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
2395 gpa_t addr, int len) 2439 gpa_t addr, int len)
2396{ 2440{
2397 if (bus->dev_count == NR_IOBUS_DEVS)
2398 return -ENOSPC;
2399
2400 bus->range[bus->dev_count++] = (struct kvm_io_range) { 2441 bus->range[bus->dev_count++] = (struct kvm_io_range) {
2401 .addr = addr, 2442 .addr = addr,
2402 .len = len, 2443 .len = len,
@@ -2496,12 +2537,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2496 struct kvm_io_bus *new_bus, *bus; 2537 struct kvm_io_bus *new_bus, *bus;
2497 2538
2498 bus = kvm->buses[bus_idx]; 2539 bus = kvm->buses[bus_idx];
2499 if (bus->dev_count > NR_IOBUS_DEVS-1) 2540 if (bus->dev_count > NR_IOBUS_DEVS - 1)
2500 return -ENOSPC; 2541 return -ENOSPC;
2501 2542
2502 new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL); 2543 new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
2544 sizeof(struct kvm_io_range)), GFP_KERNEL);
2503 if (!new_bus) 2545 if (!new_bus)
2504 return -ENOMEM; 2546 return -ENOMEM;
2547 memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
2548 sizeof(struct kvm_io_range)));
2505 kvm_io_bus_insert_dev(new_bus, dev, addr, len); 2549 kvm_io_bus_insert_dev(new_bus, dev, addr, len);
2506 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2550 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2507 synchronize_srcu_expedited(&kvm->srcu); 2551 synchronize_srcu_expedited(&kvm->srcu);
@@ -2518,27 +2562,25 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2518 struct kvm_io_bus *new_bus, *bus; 2562 struct kvm_io_bus *new_bus, *bus;
2519 2563
2520 bus = kvm->buses[bus_idx]; 2564 bus = kvm->buses[bus_idx];
2521
2522 new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
2523 if (!new_bus)
2524 return -ENOMEM;
2525
2526 r = -ENOENT; 2565 r = -ENOENT;
2527 for (i = 0; i < new_bus->dev_count; i++) 2566 for (i = 0; i < bus->dev_count; i++)
2528 if (new_bus->range[i].dev == dev) { 2567 if (bus->range[i].dev == dev) {
2529 r = 0; 2568 r = 0;
2530 new_bus->dev_count--;
2531 new_bus->range[i] = new_bus->range[new_bus->dev_count];
2532 sort(new_bus->range, new_bus->dev_count,
2533 sizeof(struct kvm_io_range),
2534 kvm_io_bus_sort_cmp, NULL);
2535 break; 2569 break;
2536 } 2570 }
2537 2571
2538 if (r) { 2572 if (r)
2539 kfree(new_bus);
2540 return r; 2573 return r;
2541 } 2574
2575 new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
2576 sizeof(struct kvm_io_range)), GFP_KERNEL);
2577 if (!new_bus)
2578 return -ENOMEM;
2579
2580 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
2581 new_bus->dev_count--;
2582 memcpy(new_bus->range + i, bus->range + i + 1,
2583 (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
2542 2584
2543 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2585 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2544 synchronize_srcu_expedited(&kvm->srcu); 2586 synchronize_srcu_expedited(&kvm->srcu);