diff options
author | Christoffer Dall <c.dall@virtualopensystems.com> | 2013-01-20 18:28:10 -0500 |
---|---|---|
committer | Christoffer Dall <c.dall@virtualopensystems.com> | 2013-01-23 13:29:14 -0500 |
commit | 1138245ccf9652429630c09fb068e9b12c56c3d3 (patch) | |
tree | f4ef6b0be9f30ee57e7044a5311bf6487bb8bfa1 /arch/arm/kvm | |
parent | 5b3e5e5bf230f56309706dfc05fc0cb173cc83aa (diff) |
KVM: ARM: User space API for getting/setting co-proc registers
The following three ioctls are implemented:
- KVM_GET_REG_LIST
- KVM_GET_ONE_REG
- KVM_SET_ONE_REG
Now we have a table for all the cp15 registers, we can drive a generic
API.
The register IDs carry the following encoding:
ARM registers are mapped using the lower 32 bits. The upper 16 of that
is the register group type, or coprocessor number:
ARM 32-bit CP15 registers have the following id bit patterns:
0x4002 0000 000F <zero:1> <crn:4> <crm:4> <opc1:4> <opc2:3>
ARM 64-bit CP15 registers have the following id bit patterns:
0x4003 0000 000F <zero:1> <zero:4> <crm:4> <opc1:4> <zero:3>
For futureproofing, we need to tell QEMU about the CP15 registers the
host lets the guest access.
It will need this information to restore a current guest on a future
CPU or perhaps a future KVM which allow some of these to be changed.
We use a separate table for these, as they're only for the userspace API.
Reviewed-by: Will Deacon <will.deacon@arm.com>
Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Christoffer Dall <c.dall@virtualopensystems.com>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r-- | arch/arm/kvm/coproc.c | 327 | ||||
-rw-r--r-- | arch/arm/kvm/guest.c | 9 |
2 files changed, 332 insertions, 4 deletions
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 722efe3b1675..95a0f5e5c1fc 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
20 | #include <linux/kvm_host.h> | 20 | #include <linux/kvm_host.h> |
21 | #include <linux/uaccess.h> | ||
21 | #include <asm/kvm_arm.h> | 22 | #include <asm/kvm_arm.h> |
22 | #include <asm/kvm_host.h> | 23 | #include <asm/kvm_host.h> |
23 | #include <asm/kvm_emulate.h> | 24 | #include <asm/kvm_emulate.h> |
@@ -347,6 +348,328 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
347 | return emulate_cp15(vcpu, ¶ms); | 348 | return emulate_cp15(vcpu, ¶ms); |
348 | } | 349 | } |
349 | 350 | ||
351 | /****************************************************************************** | ||
352 | * Userspace API | ||
353 | *****************************************************************************/ | ||
354 | |||
355 | static bool index_to_params(u64 id, struct coproc_params *params) | ||
356 | { | ||
357 | switch (id & KVM_REG_SIZE_MASK) { | ||
358 | case KVM_REG_SIZE_U32: | ||
359 | /* Any unused index bits means it's not valid. */ | ||
360 | if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | ||
361 | | KVM_REG_ARM_COPROC_MASK | ||
362 | | KVM_REG_ARM_32_CRN_MASK | ||
363 | | KVM_REG_ARM_CRM_MASK | ||
364 | | KVM_REG_ARM_OPC1_MASK | ||
365 | | KVM_REG_ARM_32_OPC2_MASK)) | ||
366 | return false; | ||
367 | |||
368 | params->is_64bit = false; | ||
369 | params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK) | ||
370 | >> KVM_REG_ARM_32_CRN_SHIFT); | ||
371 | params->CRm = ((id & KVM_REG_ARM_CRM_MASK) | ||
372 | >> KVM_REG_ARM_CRM_SHIFT); | ||
373 | params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) | ||
374 | >> KVM_REG_ARM_OPC1_SHIFT); | ||
375 | params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK) | ||
376 | >> KVM_REG_ARM_32_OPC2_SHIFT); | ||
377 | return true; | ||
378 | case KVM_REG_SIZE_U64: | ||
379 | /* Any unused index bits means it's not valid. */ | ||
380 | if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | ||
381 | | KVM_REG_ARM_COPROC_MASK | ||
382 | | KVM_REG_ARM_CRM_MASK | ||
383 | | KVM_REG_ARM_OPC1_MASK)) | ||
384 | return false; | ||
385 | params->is_64bit = true; | ||
386 | params->CRm = ((id & KVM_REG_ARM_CRM_MASK) | ||
387 | >> KVM_REG_ARM_CRM_SHIFT); | ||
388 | params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) | ||
389 | >> KVM_REG_ARM_OPC1_SHIFT); | ||
390 | params->Op2 = 0; | ||
391 | params->CRn = 0; | ||
392 | return true; | ||
393 | default: | ||
394 | return false; | ||
395 | } | ||
396 | } | ||
397 | |||
398 | /* Decode an index value, and find the cp15 coproc_reg entry. */ | ||
399 | static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu, | ||
400 | u64 id) | ||
401 | { | ||
402 | size_t num; | ||
403 | const struct coproc_reg *table, *r; | ||
404 | struct coproc_params params; | ||
405 | |||
406 | /* We only do cp15 for now. */ | ||
407 | if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15) | ||
408 | return NULL; | ||
409 | |||
410 | if (!index_to_params(id, ¶ms)) | ||
411 | return NULL; | ||
412 | |||
413 | table = get_target_table(vcpu->arch.target, &num); | ||
414 | r = find_reg(¶ms, table, num); | ||
415 | if (!r) | ||
416 | r = find_reg(¶ms, cp15_regs, ARRAY_SIZE(cp15_regs)); | ||
417 | |||
418 | /* Not saved in the cp15 array? */ | ||
419 | if (r && !r->reg) | ||
420 | r = NULL; | ||
421 | |||
422 | return r; | ||
423 | } | ||
424 | |||
425 | /* | ||
426 | * These are the invariant cp15 registers: we let the guest see the host | ||
427 | * versions of these, so they're part of the guest state. | ||
428 | * | ||
429 | * A future CPU may provide a mechanism to present different values to | ||
430 | * the guest, or a future kvm may trap them. | ||
431 | */ | ||
432 | /* Unfortunately, there's no register-argument for mrc, so generate. */ | ||
433 | #define FUNCTION_FOR32(crn, crm, op1, op2, name) \ | ||
434 | static void get_##name(struct kvm_vcpu *v, \ | ||
435 | const struct coproc_reg *r) \ | ||
436 | { \ | ||
437 | u32 val; \ | ||
438 | \ | ||
439 | asm volatile("mrc p15, " __stringify(op1) \ | ||
440 | ", %0, c" __stringify(crn) \ | ||
441 | ", c" __stringify(crm) \ | ||
442 | ", " __stringify(op2) "\n" : "=r" (val)); \ | ||
443 | ((struct coproc_reg *)r)->val = val; \ | ||
444 | } | ||
445 | |||
446 | FUNCTION_FOR32(0, 0, 0, 0, MIDR) | ||
447 | FUNCTION_FOR32(0, 0, 0, 1, CTR) | ||
448 | FUNCTION_FOR32(0, 0, 0, 2, TCMTR) | ||
449 | FUNCTION_FOR32(0, 0, 0, 3, TLBTR) | ||
450 | FUNCTION_FOR32(0, 0, 0, 6, REVIDR) | ||
451 | FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0) | ||
452 | FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1) | ||
453 | FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0) | ||
454 | FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0) | ||
455 | FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0) | ||
456 | FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1) | ||
457 | FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2) | ||
458 | FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3) | ||
459 | FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0) | ||
460 | FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1) | ||
461 | FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2) | ||
462 | FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3) | ||
463 | FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4) | ||
464 | FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5) | ||
465 | FUNCTION_FOR32(0, 0, 1, 1, CLIDR) | ||
466 | FUNCTION_FOR32(0, 0, 1, 7, AIDR) | ||
467 | |||
468 | /* ->val is filled in by kvm_invariant_coproc_table_init() */ | ||
469 | static struct coproc_reg invariant_cp15[] = { | ||
470 | { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR }, | ||
471 | { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR }, | ||
472 | { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR }, | ||
473 | { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR }, | ||
474 | { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR }, | ||
475 | |||
476 | { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 }, | ||
477 | { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 }, | ||
478 | { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 }, | ||
479 | { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 }, | ||
480 | { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 }, | ||
481 | { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 }, | ||
482 | { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 }, | ||
483 | { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 }, | ||
484 | |||
485 | { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 }, | ||
486 | { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 }, | ||
487 | { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 }, | ||
488 | { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 }, | ||
489 | { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 }, | ||
490 | { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 }, | ||
491 | |||
492 | { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, | ||
493 | { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, | ||
494 | }; | ||
495 | |||
496 | static int reg_from_user(void *val, const void __user *uaddr, u64 id) | ||
497 | { | ||
498 | /* This Just Works because we are little endian. */ | ||
499 | if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) | ||
500 | return -EFAULT; | ||
501 | return 0; | ||
502 | } | ||
503 | |||
504 | static int reg_to_user(void __user *uaddr, const void *val, u64 id) | ||
505 | { | ||
506 | /* This Just Works because we are little endian. */ | ||
507 | if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) | ||
508 | return -EFAULT; | ||
509 | return 0; | ||
510 | } | ||
511 | |||
512 | static int get_invariant_cp15(u64 id, void __user *uaddr) | ||
513 | { | ||
514 | struct coproc_params params; | ||
515 | const struct coproc_reg *r; | ||
516 | |||
517 | if (!index_to_params(id, ¶ms)) | ||
518 | return -ENOENT; | ||
519 | |||
520 | r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15)); | ||
521 | if (!r) | ||
522 | return -ENOENT; | ||
523 | |||
524 | return reg_to_user(uaddr, &r->val, id); | ||
525 | } | ||
526 | |||
527 | static int set_invariant_cp15(u64 id, void __user *uaddr) | ||
528 | { | ||
529 | struct coproc_params params; | ||
530 | const struct coproc_reg *r; | ||
531 | int err; | ||
532 | u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ | ||
533 | |||
534 | if (!index_to_params(id, ¶ms)) | ||
535 | return -ENOENT; | ||
536 | r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15)); | ||
537 | if (!r) | ||
538 | return -ENOENT; | ||
539 | |||
540 | err = reg_from_user(&val, uaddr, id); | ||
541 | if (err) | ||
542 | return err; | ||
543 | |||
544 | /* This is what we mean by invariant: you can't change it. */ | ||
545 | if (r->val != val) | ||
546 | return -EINVAL; | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
552 | { | ||
553 | const struct coproc_reg *r; | ||
554 | void __user *uaddr = (void __user *)(long)reg->addr; | ||
555 | |||
556 | r = index_to_coproc_reg(vcpu, reg->id); | ||
557 | if (!r) | ||
558 | return get_invariant_cp15(reg->id, uaddr); | ||
559 | |||
560 | /* Note: copies two regs if size is 64 bit. */ | ||
561 | return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); | ||
562 | } | ||
563 | |||
564 | int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
565 | { | ||
566 | const struct coproc_reg *r; | ||
567 | void __user *uaddr = (void __user *)(long)reg->addr; | ||
568 | |||
569 | r = index_to_coproc_reg(vcpu, reg->id); | ||
570 | if (!r) | ||
571 | return set_invariant_cp15(reg->id, uaddr); | ||
572 | |||
573 | /* Note: copies two regs if size is 64 bit */ | ||
574 | return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); | ||
575 | } | ||
576 | |||
577 | static u64 cp15_to_index(const struct coproc_reg *reg) | ||
578 | { | ||
579 | u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); | ||
580 | if (reg->is_64) { | ||
581 | val |= KVM_REG_SIZE_U64; | ||
582 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); | ||
583 | val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); | ||
584 | } else { | ||
585 | val |= KVM_REG_SIZE_U32; | ||
586 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); | ||
587 | val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT); | ||
588 | val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); | ||
589 | val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT); | ||
590 | } | ||
591 | return val; | ||
592 | } | ||
593 | |||
594 | static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind) | ||
595 | { | ||
596 | if (!*uind) | ||
597 | return true; | ||
598 | |||
599 | if (put_user(cp15_to_index(reg), *uind)) | ||
600 | return false; | ||
601 | |||
602 | (*uind)++; | ||
603 | return true; | ||
604 | } | ||
605 | |||
606 | /* Assumed ordered tables, see kvm_coproc_table_init. */ | ||
607 | static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind) | ||
608 | { | ||
609 | const struct coproc_reg *i1, *i2, *end1, *end2; | ||
610 | unsigned int total = 0; | ||
611 | size_t num; | ||
612 | |||
613 | /* We check for duplicates here, to allow arch-specific overrides. */ | ||
614 | i1 = get_target_table(vcpu->arch.target, &num); | ||
615 | end1 = i1 + num; | ||
616 | i2 = cp15_regs; | ||
617 | end2 = cp15_regs + ARRAY_SIZE(cp15_regs); | ||
618 | |||
619 | BUG_ON(i1 == end1 || i2 == end2); | ||
620 | |||
621 | /* Walk carefully, as both tables may refer to the same register. */ | ||
622 | while (i1 || i2) { | ||
623 | int cmp = cmp_reg(i1, i2); | ||
624 | /* target-specific overrides generic entry. */ | ||
625 | if (cmp <= 0) { | ||
626 | /* Ignore registers we trap but don't save. */ | ||
627 | if (i1->reg) { | ||
628 | if (!copy_reg_to_user(i1, &uind)) | ||
629 | return -EFAULT; | ||
630 | total++; | ||
631 | } | ||
632 | } else { | ||
633 | /* Ignore registers we trap but don't save. */ | ||
634 | if (i2->reg) { | ||
635 | if (!copy_reg_to_user(i2, &uind)) | ||
636 | return -EFAULT; | ||
637 | total++; | ||
638 | } | ||
639 | } | ||
640 | |||
641 | if (cmp <= 0 && ++i1 == end1) | ||
642 | i1 = NULL; | ||
643 | if (cmp >= 0 && ++i2 == end2) | ||
644 | i2 = NULL; | ||
645 | } | ||
646 | return total; | ||
647 | } | ||
648 | |||
649 | unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu) | ||
650 | { | ||
651 | return ARRAY_SIZE(invariant_cp15) | ||
652 | + walk_cp15(vcpu, (u64 __user *)NULL); | ||
653 | } | ||
654 | |||
655 | int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | ||
656 | { | ||
657 | unsigned int i; | ||
658 | int err; | ||
659 | |||
660 | /* Then give them all the invariant registers' indices. */ | ||
661 | for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) { | ||
662 | if (put_user(cp15_to_index(&invariant_cp15[i]), uindices)) | ||
663 | return -EFAULT; | ||
664 | uindices++; | ||
665 | } | ||
666 | |||
667 | err = walk_cp15(vcpu, uindices); | ||
668 | if (err > 0) | ||
669 | err = 0; | ||
670 | return err; | ||
671 | } | ||
672 | |||
350 | void kvm_coproc_table_init(void) | 673 | void kvm_coproc_table_init(void) |
351 | { | 674 | { |
352 | unsigned int i; | 675 | unsigned int i; |
@@ -354,6 +677,10 @@ void kvm_coproc_table_init(void) | |||
354 | /* Make sure tables are unique and in order. */ | 677 | /* Make sure tables are unique and in order. */ |
355 | for (i = 1; i < ARRAY_SIZE(cp15_regs); i++) | 678 | for (i = 1; i < ARRAY_SIZE(cp15_regs); i++) |
356 | BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0); | 679 | BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0); |
680 | |||
681 | /* We abuse the reset function to overwrite the table itself. */ | ||
682 | for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) | ||
683 | invariant_cp15[i].reset(NULL, &invariant_cp15[i]); | ||
357 | } | 684 | } |
358 | 685 | ||
359 | /** | 686 | /** |
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index a12eb229021d..2339d9609d36 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <asm/kvm.h> | 26 | #include <asm/kvm.h> |
27 | #include <asm/kvm_asm.h> | 27 | #include <asm/kvm_asm.h> |
28 | #include <asm/kvm_emulate.h> | 28 | #include <asm/kvm_emulate.h> |
29 | #include <asm/kvm_coproc.h> | ||
29 | 30 | ||
30 | #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM } | 31 | #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM } |
31 | #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } | 32 | #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } |
@@ -119,7 +120,7 @@ static unsigned long num_core_regs(void) | |||
119 | */ | 120 | */ |
120 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) | 121 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) |
121 | { | 122 | { |
122 | return num_core_regs(); | 123 | return num_core_regs() + kvm_arm_num_coproc_regs(vcpu); |
123 | } | 124 | } |
124 | 125 | ||
125 | /** | 126 | /** |
@@ -138,7 +139,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | |||
138 | uindices++; | 139 | uindices++; |
139 | } | 140 | } |
140 | 141 | ||
141 | return 0; | 142 | return kvm_arm_copy_coproc_indices(vcpu, uindices); |
142 | } | 143 | } |
143 | 144 | ||
144 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | 145 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
@@ -151,7 +152,7 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
151 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) | 152 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) |
152 | return get_core_reg(vcpu, reg); | 153 | return get_core_reg(vcpu, reg); |
153 | 154 | ||
154 | return -EINVAL; | 155 | return kvm_arm_coproc_get_reg(vcpu, reg); |
155 | } | 156 | } |
156 | 157 | ||
157 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | 158 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
@@ -164,7 +165,7 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
164 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) | 165 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) |
165 | return set_core_reg(vcpu, reg); | 166 | return set_core_reg(vcpu, reg); |
166 | 167 | ||
167 | return -EINVAL; | 168 | return kvm_arm_coproc_set_reg(vcpu, reg); |
168 | } | 169 | } |
169 | 170 | ||
170 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | 171 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |