diff options
| -rw-r--r-- | drivers/kvm/kvm.h | 60 | ||||
| -rw-r--r-- | drivers/kvm/kvm_main.c | 94 |
2 files changed, 142 insertions, 12 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index b08272bce213..31846b1c162f 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
| @@ -265,6 +265,65 @@ struct kvm_stat { | |||
| 265 | u32 efer_reload; | 265 | u32 efer_reload; |
| 266 | }; | 266 | }; |
| 267 | 267 | ||
| 268 | struct kvm_io_device { | ||
| 269 | void (*read)(struct kvm_io_device *this, | ||
| 270 | gpa_t addr, | ||
| 271 | int len, | ||
| 272 | void *val); | ||
| 273 | void (*write)(struct kvm_io_device *this, | ||
| 274 | gpa_t addr, | ||
| 275 | int len, | ||
| 276 | const void *val); | ||
| 277 | int (*in_range)(struct kvm_io_device *this, gpa_t addr); | ||
| 278 | void (*destructor)(struct kvm_io_device *this); | ||
| 279 | |||
| 280 | void *private; | ||
| 281 | }; | ||
| 282 | |||
| 283 | static inline void kvm_iodevice_read(struct kvm_io_device *dev, | ||
| 284 | gpa_t addr, | ||
| 285 | int len, | ||
| 286 | void *val) | ||
| 287 | { | ||
| 288 | dev->read(dev, addr, len, val); | ||
| 289 | } | ||
| 290 | |||
| 291 | static inline void kvm_iodevice_write(struct kvm_io_device *dev, | ||
| 292 | gpa_t addr, | ||
| 293 | int len, | ||
| 294 | const void *val) | ||
| 295 | { | ||
| 296 | dev->write(dev, addr, len, val); | ||
| 297 | } | ||
| 298 | |||
| 299 | static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr) | ||
| 300 | { | ||
| 301 | return dev->in_range(dev, addr); | ||
| 302 | } | ||
| 303 | |||
| 304 | static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) | ||
| 305 | { | ||
| 306 | dev->destructor(dev); | ||
| 307 | } | ||
| 308 | |||
| 309 | /* | ||
| 310 | * It would be nice to use something smarter than a linear search, TBD... | ||
| 311 | * Thankfully we dont expect many devices to register (famous last words :), | ||
| 312 | * so until then it will suffice. At least its abstracted so we can change | ||
| 313 | * in one place. | ||
| 314 | */ | ||
| 315 | struct kvm_io_bus { | ||
| 316 | int dev_count; | ||
| 317 | #define NR_IOBUS_DEVS 6 | ||
| 318 | struct kvm_io_device *devs[NR_IOBUS_DEVS]; | ||
| 319 | }; | ||
| 320 | |||
| 321 | void kvm_io_bus_init(struct kvm_io_bus *bus); | ||
| 322 | void kvm_io_bus_destroy(struct kvm_io_bus *bus); | ||
| 323 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr); | ||
| 324 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, | ||
| 325 | struct kvm_io_device *dev); | ||
| 326 | |||
| 268 | struct kvm_vcpu { | 327 | struct kvm_vcpu { |
| 269 | struct kvm *kvm; | 328 | struct kvm *kvm; |
| 270 | union { | 329 | union { |
| @@ -393,6 +452,7 @@ struct kvm { | |||
| 393 | unsigned long rmap_overflow; | 452 | unsigned long rmap_overflow; |
| 394 | struct list_head vm_list; | 453 | struct list_head vm_list; |
| 395 | struct file *filp; | 454 | struct file *filp; |
| 455 | struct kvm_io_bus mmio_bus; | ||
| 396 | }; | 456 | }; |
| 397 | 457 | ||
| 398 | struct descriptor_table { | 458 | struct descriptor_table { |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 633c2eded08d..e157e282fcff 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
| @@ -366,6 +366,7 @@ static struct kvm *kvm_create_vm(void) | |||
| 366 | spin_lock(&kvm_lock); | 366 | spin_lock(&kvm_lock); |
| 367 | list_add(&kvm->vm_list, &vm_list); | 367 | list_add(&kvm->vm_list, &vm_list); |
| 368 | spin_unlock(&kvm_lock); | 368 | spin_unlock(&kvm_lock); |
| 369 | kvm_io_bus_init(&kvm->mmio_bus); | ||
| 369 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 370 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
| 370 | struct kvm_vcpu *vcpu = &kvm->vcpus[i]; | 371 | struct kvm_vcpu *vcpu = &kvm->vcpus[i]; |
| 371 | 372 | ||
| @@ -474,6 +475,7 @@ static void kvm_destroy_vm(struct kvm *kvm) | |||
| 474 | spin_lock(&kvm_lock); | 475 | spin_lock(&kvm_lock); |
| 475 | list_del(&kvm->vm_list); | 476 | list_del(&kvm->vm_list); |
| 476 | spin_unlock(&kvm_lock); | 477 | spin_unlock(&kvm_lock); |
| 478 | kvm_io_bus_destroy(&kvm->mmio_bus); | ||
| 477 | kvm_free_vcpus(kvm); | 479 | kvm_free_vcpus(kvm); |
| 478 | kvm_free_physmem(kvm); | 480 | kvm_free_physmem(kvm); |
| 479 | kfree(kvm); | 481 | kfree(kvm); |
| @@ -1097,12 +1099,25 @@ static int emulator_write_std(unsigned long addr, | |||
| 1097 | return X86EMUL_UNHANDLEABLE; | 1099 | return X86EMUL_UNHANDLEABLE; |
| 1098 | } | 1100 | } |
| 1099 | 1101 | ||
| 1102 | static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, | ||
| 1103 | gpa_t addr) | ||
| 1104 | { | ||
| 1105 | /* | ||
| 1106 | * Note that its important to have this wrapper function because | ||
| 1107 | * in the very near future we will be checking for MMIOs against | ||
| 1108 | * the LAPIC as well as the general MMIO bus | ||
| 1109 | */ | ||
| 1110 | return kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); | ||
| 1111 | } | ||
| 1112 | |||
| 1100 | static int emulator_read_emulated(unsigned long addr, | 1113 | static int emulator_read_emulated(unsigned long addr, |
| 1101 | void *val, | 1114 | void *val, |
| 1102 | unsigned int bytes, | 1115 | unsigned int bytes, |
| 1103 | struct x86_emulate_ctxt *ctxt) | 1116 | struct x86_emulate_ctxt *ctxt) |
| 1104 | { | 1117 | { |
| 1105 | struct kvm_vcpu *vcpu = ctxt->vcpu; | 1118 | struct kvm_vcpu *vcpu = ctxt->vcpu; |
| 1119 | struct kvm_io_device *mmio_dev; | ||
| 1120 | gpa_t gpa; | ||
| 1106 | 1121 | ||
| 1107 | if (vcpu->mmio_read_completed) { | 1122 | if (vcpu->mmio_read_completed) { |
| 1108 | memcpy(val, vcpu->mmio_data, bytes); | 1123 | memcpy(val, vcpu->mmio_data, bytes); |
| @@ -1111,18 +1126,26 @@ static int emulator_read_emulated(unsigned long addr, | |||
| 1111 | } else if (emulator_read_std(addr, val, bytes, ctxt) | 1126 | } else if (emulator_read_std(addr, val, bytes, ctxt) |
| 1112 | == X86EMUL_CONTINUE) | 1127 | == X86EMUL_CONTINUE) |
| 1113 | return X86EMUL_CONTINUE; | 1128 | return X86EMUL_CONTINUE; |
| 1114 | else { | ||
| 1115 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); | ||
| 1116 | 1129 | ||
| 1117 | if (gpa == UNMAPPED_GVA) | 1130 | gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); |
| 1118 | return X86EMUL_PROPAGATE_FAULT; | 1131 | if (gpa == UNMAPPED_GVA) |
| 1119 | vcpu->mmio_needed = 1; | 1132 | return X86EMUL_PROPAGATE_FAULT; |
| 1120 | vcpu->mmio_phys_addr = gpa; | ||
| 1121 | vcpu->mmio_size = bytes; | ||
| 1122 | vcpu->mmio_is_write = 0; | ||
| 1123 | 1133 | ||
| 1124 | return X86EMUL_UNHANDLEABLE; | 1134 | /* |
| 1135 | * Is this MMIO handled locally? | ||
| 1136 | */ | ||
| 1137 | mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); | ||
| 1138 | if (mmio_dev) { | ||
| 1139 | kvm_iodevice_read(mmio_dev, gpa, bytes, val); | ||
| 1140 | return X86EMUL_CONTINUE; | ||
| 1125 | } | 1141 | } |
| 1142 | |||
| 1143 | vcpu->mmio_needed = 1; | ||
| 1144 | vcpu->mmio_phys_addr = gpa; | ||
| 1145 | vcpu->mmio_size = bytes; | ||
| 1146 | vcpu->mmio_is_write = 0; | ||
| 1147 | |||
| 1148 | return X86EMUL_UNHANDLEABLE; | ||
| 1126 | } | 1149 | } |
| 1127 | 1150 | ||
| 1128 | static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | 1151 | static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, |
| @@ -1150,8 +1173,9 @@ static int emulator_write_emulated(unsigned long addr, | |||
| 1150 | unsigned int bytes, | 1173 | unsigned int bytes, |
| 1151 | struct x86_emulate_ctxt *ctxt) | 1174 | struct x86_emulate_ctxt *ctxt) |
| 1152 | { | 1175 | { |
| 1153 | struct kvm_vcpu *vcpu = ctxt->vcpu; | 1176 | struct kvm_vcpu *vcpu = ctxt->vcpu; |
| 1154 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); | 1177 | struct kvm_io_device *mmio_dev; |
| 1178 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); | ||
| 1155 | 1179 | ||
| 1156 | if (gpa == UNMAPPED_GVA) { | 1180 | if (gpa == UNMAPPED_GVA) { |
| 1157 | kvm_arch_ops->inject_page_fault(vcpu, addr, 2); | 1181 | kvm_arch_ops->inject_page_fault(vcpu, addr, 2); |
| @@ -1161,6 +1185,15 @@ static int emulator_write_emulated(unsigned long addr, | |||
| 1161 | if (emulator_write_phys(vcpu, gpa, val, bytes)) | 1185 | if (emulator_write_phys(vcpu, gpa, val, bytes)) |
| 1162 | return X86EMUL_CONTINUE; | 1186 | return X86EMUL_CONTINUE; |
| 1163 | 1187 | ||
| 1188 | /* | ||
| 1189 | * Is this MMIO handled locally? | ||
| 1190 | */ | ||
| 1191 | mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); | ||
| 1192 | if (mmio_dev) { | ||
| 1193 | kvm_iodevice_write(mmio_dev, gpa, bytes, val); | ||
| 1194 | return X86EMUL_CONTINUE; | ||
| 1195 | } | ||
| 1196 | |||
| 1164 | vcpu->mmio_needed = 1; | 1197 | vcpu->mmio_needed = 1; |
| 1165 | vcpu->mmio_phys_addr = gpa; | 1198 | vcpu->mmio_phys_addr = gpa; |
| 1166 | vcpu->mmio_size = bytes; | 1199 | vcpu->mmio_size = bytes; |
| @@ -3031,6 +3064,43 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | |||
| 3031 | return NOTIFY_OK; | 3064 | return NOTIFY_OK; |
| 3032 | } | 3065 | } |
| 3033 | 3066 | ||
| 3067 | void kvm_io_bus_init(struct kvm_io_bus *bus) | ||
| 3068 | { | ||
| 3069 | memset(bus, 0, sizeof(*bus)); | ||
| 3070 | } | ||
| 3071 | |||
| 3072 | void kvm_io_bus_destroy(struct kvm_io_bus *bus) | ||
| 3073 | { | ||
| 3074 | int i; | ||
| 3075 | |||
| 3076 | for (i = 0; i < bus->dev_count; i++) { | ||
| 3077 | struct kvm_io_device *pos = bus->devs[i]; | ||
| 3078 | |||
| 3079 | kvm_iodevice_destructor(pos); | ||
| 3080 | } | ||
| 3081 | } | ||
| 3082 | |||
| 3083 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) | ||
| 3084 | { | ||
| 3085 | int i; | ||
| 3086 | |||
| 3087 | for (i = 0; i < bus->dev_count; i++) { | ||
| 3088 | struct kvm_io_device *pos = bus->devs[i]; | ||
| 3089 | |||
| 3090 | if (pos->in_range(pos, addr)) | ||
| 3091 | return pos; | ||
| 3092 | } | ||
| 3093 | |||
| 3094 | return NULL; | ||
| 3095 | } | ||
| 3096 | |||
| 3097 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev) | ||
| 3098 | { | ||
| 3099 | BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1)); | ||
| 3100 | |||
| 3101 | bus->devs[bus->dev_count++] = dev; | ||
| 3102 | } | ||
| 3103 | |||
| 3034 | static struct notifier_block kvm_cpu_notifier = { | 3104 | static struct notifier_block kvm_cpu_notifier = { |
| 3035 | .notifier_call = kvm_cpu_hotplug, | 3105 | .notifier_call = kvm_cpu_hotplug, |
| 3036 | .priority = 20, /* must be > scheduler priority */ | 3106 | .priority = 20, /* must be > scheduler priority */ |
