diff options
author | Gregory Haskins <ghaskins@novell.com> | 2007-05-31 14:08:53 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-07-16 05:05:47 -0400 |
commit | 2eeb2e94eb6232f0895da696c10e6636093ff72b (patch) | |
tree | 2b60e438899054f50ce0e93c33a7dcfee4dc5edc /drivers/kvm/kvm_main.c | |
parent | d9413cd757a7c96d97ddb46ab4e3e04760ae4c55 (diff) |
KVM: Adds support for in-kernel mmio handlers
Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r-- | drivers/kvm/kvm_main.c | 94 |
1 files changed, 82 insertions, 12 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 633c2eded08d..e157e282fcff 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -366,6 +366,7 @@ static struct kvm *kvm_create_vm(void) | |||
366 | spin_lock(&kvm_lock); | 366 | spin_lock(&kvm_lock); |
367 | list_add(&kvm->vm_list, &vm_list); | 367 | list_add(&kvm->vm_list, &vm_list); |
368 | spin_unlock(&kvm_lock); | 368 | spin_unlock(&kvm_lock); |
369 | kvm_io_bus_init(&kvm->mmio_bus); | ||
369 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 370 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
370 | struct kvm_vcpu *vcpu = &kvm->vcpus[i]; | 371 | struct kvm_vcpu *vcpu = &kvm->vcpus[i]; |
371 | 372 | ||
@@ -474,6 +475,7 @@ static void kvm_destroy_vm(struct kvm *kvm) | |||
474 | spin_lock(&kvm_lock); | 475 | spin_lock(&kvm_lock); |
475 | list_del(&kvm->vm_list); | 476 | list_del(&kvm->vm_list); |
476 | spin_unlock(&kvm_lock); | 477 | spin_unlock(&kvm_lock); |
478 | kvm_io_bus_destroy(&kvm->mmio_bus); | ||
477 | kvm_free_vcpus(kvm); | 479 | kvm_free_vcpus(kvm); |
478 | kvm_free_physmem(kvm); | 480 | kvm_free_physmem(kvm); |
479 | kfree(kvm); | 481 | kfree(kvm); |
@@ -1097,12 +1099,25 @@ static int emulator_write_std(unsigned long addr, | |||
1097 | return X86EMUL_UNHANDLEABLE; | 1099 | return X86EMUL_UNHANDLEABLE; |
1098 | } | 1100 | } |
1099 | 1101 | ||
1102 | static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, | ||
1103 | gpa_t addr) | ||
1104 | { | ||
1105 | /* | ||
1106 | * Note that its important to have this wrapper function because | ||
1107 | * in the very near future we will be checking for MMIOs against | ||
1108 | * the LAPIC as well as the general MMIO bus | ||
1109 | */ | ||
1110 | return kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); | ||
1111 | } | ||
1112 | |||
1100 | static int emulator_read_emulated(unsigned long addr, | 1113 | static int emulator_read_emulated(unsigned long addr, |
1101 | void *val, | 1114 | void *val, |
1102 | unsigned int bytes, | 1115 | unsigned int bytes, |
1103 | struct x86_emulate_ctxt *ctxt) | 1116 | struct x86_emulate_ctxt *ctxt) |
1104 | { | 1117 | { |
1105 | struct kvm_vcpu *vcpu = ctxt->vcpu; | 1118 | struct kvm_vcpu *vcpu = ctxt->vcpu; |
1119 | struct kvm_io_device *mmio_dev; | ||
1120 | gpa_t gpa; | ||
1106 | 1121 | ||
1107 | if (vcpu->mmio_read_completed) { | 1122 | if (vcpu->mmio_read_completed) { |
1108 | memcpy(val, vcpu->mmio_data, bytes); | 1123 | memcpy(val, vcpu->mmio_data, bytes); |
@@ -1111,18 +1126,26 @@ static int emulator_read_emulated(unsigned long addr, | |||
1111 | } else if (emulator_read_std(addr, val, bytes, ctxt) | 1126 | } else if (emulator_read_std(addr, val, bytes, ctxt) |
1112 | == X86EMUL_CONTINUE) | 1127 | == X86EMUL_CONTINUE) |
1113 | return X86EMUL_CONTINUE; | 1128 | return X86EMUL_CONTINUE; |
1114 | else { | ||
1115 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); | ||
1116 | 1129 | ||
1117 | if (gpa == UNMAPPED_GVA) | 1130 | gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); |
1118 | return X86EMUL_PROPAGATE_FAULT; | 1131 | if (gpa == UNMAPPED_GVA) |
1119 | vcpu->mmio_needed = 1; | 1132 | return X86EMUL_PROPAGATE_FAULT; |
1120 | vcpu->mmio_phys_addr = gpa; | ||
1121 | vcpu->mmio_size = bytes; | ||
1122 | vcpu->mmio_is_write = 0; | ||
1123 | 1133 | ||
1124 | return X86EMUL_UNHANDLEABLE; | 1134 | /* |
1135 | * Is this MMIO handled locally? | ||
1136 | */ | ||
1137 | mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); | ||
1138 | if (mmio_dev) { | ||
1139 | kvm_iodevice_read(mmio_dev, gpa, bytes, val); | ||
1140 | return X86EMUL_CONTINUE; | ||
1125 | } | 1141 | } |
1142 | |||
1143 | vcpu->mmio_needed = 1; | ||
1144 | vcpu->mmio_phys_addr = gpa; | ||
1145 | vcpu->mmio_size = bytes; | ||
1146 | vcpu->mmio_is_write = 0; | ||
1147 | |||
1148 | return X86EMUL_UNHANDLEABLE; | ||
1126 | } | 1149 | } |
1127 | 1150 | ||
1128 | static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | 1151 | static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, |
@@ -1150,8 +1173,9 @@ static int emulator_write_emulated(unsigned long addr, | |||
1150 | unsigned int bytes, | 1173 | unsigned int bytes, |
1151 | struct x86_emulate_ctxt *ctxt) | 1174 | struct x86_emulate_ctxt *ctxt) |
1152 | { | 1175 | { |
1153 | struct kvm_vcpu *vcpu = ctxt->vcpu; | 1176 | struct kvm_vcpu *vcpu = ctxt->vcpu; |
1154 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); | 1177 | struct kvm_io_device *mmio_dev; |
1178 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); | ||
1155 | 1179 | ||
1156 | if (gpa == UNMAPPED_GVA) { | 1180 | if (gpa == UNMAPPED_GVA) { |
1157 | kvm_arch_ops->inject_page_fault(vcpu, addr, 2); | 1181 | kvm_arch_ops->inject_page_fault(vcpu, addr, 2); |
@@ -1161,6 +1185,15 @@ static int emulator_write_emulated(unsigned long addr, | |||
1161 | if (emulator_write_phys(vcpu, gpa, val, bytes)) | 1185 | if (emulator_write_phys(vcpu, gpa, val, bytes)) |
1162 | return X86EMUL_CONTINUE; | 1186 | return X86EMUL_CONTINUE; |
1163 | 1187 | ||
1188 | /* | ||
1189 | * Is this MMIO handled locally? | ||
1190 | */ | ||
1191 | mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); | ||
1192 | if (mmio_dev) { | ||
1193 | kvm_iodevice_write(mmio_dev, gpa, bytes, val); | ||
1194 | return X86EMUL_CONTINUE; | ||
1195 | } | ||
1196 | |||
1164 | vcpu->mmio_needed = 1; | 1197 | vcpu->mmio_needed = 1; |
1165 | vcpu->mmio_phys_addr = gpa; | 1198 | vcpu->mmio_phys_addr = gpa; |
1166 | vcpu->mmio_size = bytes; | 1199 | vcpu->mmio_size = bytes; |
@@ -3031,6 +3064,43 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | |||
3031 | return NOTIFY_OK; | 3064 | return NOTIFY_OK; |
3032 | } | 3065 | } |
3033 | 3066 | ||
3067 | void kvm_io_bus_init(struct kvm_io_bus *bus) | ||
3068 | { | ||
3069 | memset(bus, 0, sizeof(*bus)); | ||
3070 | } | ||
3071 | |||
3072 | void kvm_io_bus_destroy(struct kvm_io_bus *bus) | ||
3073 | { | ||
3074 | int i; | ||
3075 | |||
3076 | for (i = 0; i < bus->dev_count; i++) { | ||
3077 | struct kvm_io_device *pos = bus->devs[i]; | ||
3078 | |||
3079 | kvm_iodevice_destructor(pos); | ||
3080 | } | ||
3081 | } | ||
3082 | |||
3083 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) | ||
3084 | { | ||
3085 | int i; | ||
3086 | |||
3087 | for (i = 0; i < bus->dev_count; i++) { | ||
3088 | struct kvm_io_device *pos = bus->devs[i]; | ||
3089 | |||
3090 | if (pos->in_range(pos, addr)) | ||
3091 | return pos; | ||
3092 | } | ||
3093 | |||
3094 | return NULL; | ||
3095 | } | ||
3096 | |||
3097 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev) | ||
3098 | { | ||
3099 | BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1)); | ||
3100 | |||
3101 | bus->devs[bus->dev_count++] = dev; | ||
3102 | } | ||
3103 | |||
3034 | static struct notifier_block kvm_cpu_notifier = { | 3104 | static struct notifier_block kvm_cpu_notifier = { |
3035 | .notifier_call = kvm_cpu_hotplug, | 3105 | .notifier_call = kvm_cpu_hotplug, |
3036 | .priority = 20, /* must be > scheduler priority */ | 3106 | .priority = 20, /* must be > scheduler priority */ |