aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@de.ibm.com>2015-07-22 09:52:10 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2015-07-29 05:02:36 -0400
commitc92ea7b9f7d256cabf7ee08a7627a5227e356dec (patch)
tree422a4fa2cd8bed59bed081722d86e6af37db18a2 /arch/s390/kvm
parent78f2613168eca83a218272aa12b680a365ee58d6 (diff)
KVM: s390: log capability enablement and vm attribute changes
Depending on user space, some capabilities and vm attributes are enabled at runtime. Let's log those events and while we're at it, log querying the vm attributes as well. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/kvm-s390.c19
1 files changed, 19 insertions, 0 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 762103653a29..924b1ae86caf 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -299,10 +299,12 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
299 299
300 switch (cap->cap) { 300 switch (cap->cap) {
301 case KVM_CAP_S390_IRQCHIP: 301 case KVM_CAP_S390_IRQCHIP:
302 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
302 kvm->arch.use_irqchip = 1; 303 kvm->arch.use_irqchip = 1;
303 r = 0; 304 r = 0;
304 break; 305 break;
305 case KVM_CAP_S390_USER_SIGP: 306 case KVM_CAP_S390_USER_SIGP:
307 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
306 kvm->arch.user_sigp = 1; 308 kvm->arch.user_sigp = 1;
307 r = 0; 309 r = 0;
308 break; 310 break;
@@ -313,8 +315,11 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
313 r = 0; 315 r = 0;
314 } else 316 } else
315 r = -EINVAL; 317 r = -EINVAL;
318 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
319 r ? "(not available)" : "(success)");
316 break; 320 break;
317 case KVM_CAP_S390_USER_STSI: 321 case KVM_CAP_S390_USER_STSI:
322 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
318 kvm->arch.user_stsi = 1; 323 kvm->arch.user_stsi = 1;
319 r = 0; 324 r = 0;
320 break; 325 break;
@@ -332,6 +337,8 @@ static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *att
332 switch (attr->attr) { 337 switch (attr->attr) {
333 case KVM_S390_VM_MEM_LIMIT_SIZE: 338 case KVM_S390_VM_MEM_LIMIT_SIZE:
334 ret = 0; 339 ret = 0;
340 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
341 kvm->arch.gmap->asce_end);
335 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) 342 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
336 ret = -EFAULT; 343 ret = -EFAULT;
337 break; 344 break;
@@ -354,6 +361,7 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
354 break; 361 break;
355 362
356 ret = -EBUSY; 363 ret = -EBUSY;
364 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
357 mutex_lock(&kvm->lock); 365 mutex_lock(&kvm->lock);
358 if (atomic_read(&kvm->online_vcpus) == 0) { 366 if (atomic_read(&kvm->online_vcpus) == 0) {
359 kvm->arch.use_cmma = 1; 367 kvm->arch.use_cmma = 1;
@@ -366,6 +374,7 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
366 if (!kvm->arch.use_cmma) 374 if (!kvm->arch.use_cmma)
367 break; 375 break;
368 376
377 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
369 mutex_lock(&kvm->lock); 378 mutex_lock(&kvm->lock);
370 idx = srcu_read_lock(&kvm->srcu); 379 idx = srcu_read_lock(&kvm->srcu);
371 s390_reset_cmma(kvm->arch.gmap->mm); 380 s390_reset_cmma(kvm->arch.gmap->mm);
@@ -401,6 +410,7 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
401 } 410 }
402 } 411 }
403 mutex_unlock(&kvm->lock); 412 mutex_unlock(&kvm->lock);
413 VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
404 break; 414 break;
405 } 415 }
406 default: 416 default:
@@ -427,22 +437,26 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
427 kvm->arch.crypto.crycb->aes_wrapping_key_mask, 437 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
428 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 438 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
429 kvm->arch.crypto.aes_kw = 1; 439 kvm->arch.crypto.aes_kw = 1;
440 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
430 break; 441 break;
431 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: 442 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
432 get_random_bytes( 443 get_random_bytes(
433 kvm->arch.crypto.crycb->dea_wrapping_key_mask, 444 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
434 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 445 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
435 kvm->arch.crypto.dea_kw = 1; 446 kvm->arch.crypto.dea_kw = 1;
447 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
436 break; 448 break;
437 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: 449 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
438 kvm->arch.crypto.aes_kw = 0; 450 kvm->arch.crypto.aes_kw = 0;
439 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, 451 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
440 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 452 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
453 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
441 break; 454 break;
442 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: 455 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
443 kvm->arch.crypto.dea_kw = 0; 456 kvm->arch.crypto.dea_kw = 0;
444 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, 457 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
445 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 458 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
459 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
446 break; 460 break;
447 default: 461 default:
448 mutex_unlock(&kvm->lock); 462 mutex_unlock(&kvm->lock);
@@ -467,6 +481,7 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
467 481
468 if (gtod_high != 0) 482 if (gtod_high != 0)
469 return -EINVAL; 483 return -EINVAL;
484 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
470 485
471 return 0; 486 return 0;
472} 487}
@@ -492,6 +507,7 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
492 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; 507 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
493 kvm_s390_vcpu_unblock_all(kvm); 508 kvm_s390_vcpu_unblock_all(kvm);
494 mutex_unlock(&kvm->lock); 509 mutex_unlock(&kvm->lock);
510 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
495 return 0; 511 return 0;
496} 512}
497 513
@@ -523,6 +539,7 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
523 if (copy_to_user((void __user *)attr->addr, &gtod_high, 539 if (copy_to_user((void __user *)attr->addr, &gtod_high,
524 sizeof(gtod_high))) 540 sizeof(gtod_high)))
525 return -EFAULT; 541 return -EFAULT;
542 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
526 543
527 return 0; 544 return 0;
528} 545}
@@ -539,6 +556,7 @@ static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
539 gtod = host_tod + kvm->arch.epoch; 556 gtod = host_tod + kvm->arch.epoch;
540 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod))) 557 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
541 return -EFAULT; 558 return -EFAULT;
559 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
542 560
543 return 0; 561 return 0;
544} 562}
@@ -2360,6 +2378,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2360 case KVM_CAP_S390_CSS_SUPPORT: 2378 case KVM_CAP_S390_CSS_SUPPORT:
2361 if (!vcpu->kvm->arch.css_support) { 2379 if (!vcpu->kvm->arch.css_support) {
2362 vcpu->kvm->arch.css_support = 1; 2380 vcpu->kvm->arch.css_support = 1;
2381 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2363 trace_kvm_s390_enable_css(vcpu->kvm); 2382 trace_kvm_s390_enable_css(vcpu->kvm);
2364 } 2383 }
2365 r = 0; 2384 r = 0;