diff options
Diffstat (limited to 'arch/s390/kvm/interrupt.c')
-rw-r--r-- | arch/s390/kvm/interrupt.c | 704 |
1 files changed, 641 insertions, 63 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 5f79d2d79ca7..200a8f9390b6 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * handling kvm guest interrupts | 2 | * handling kvm guest interrupts |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2008 | 4 | * Copyright IBM Corp. 2008,2014 |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License (version 2 only) | 7 | * it under the terms of the GNU General Public License (version 2 only) |
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/kvm_host.h> | 14 | #include <linux/kvm_host.h> |
15 | #include <linux/hrtimer.h> | 15 | #include <linux/hrtimer.h> |
16 | #include <linux/mmu_context.h> | ||
16 | #include <linux/signal.h> | 17 | #include <linux/signal.h> |
17 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
18 | #include <asm/asm-offsets.h> | 19 | #include <asm/asm-offsets.h> |
@@ -31,7 +32,7 @@ static int is_ioint(u64 type) | |||
31 | return ((type & 0xfffe0000u) != 0xfffe0000u); | 32 | return ((type & 0xfffe0000u) != 0xfffe0000u); |
32 | } | 33 | } |
33 | 34 | ||
34 | static int psw_extint_disabled(struct kvm_vcpu *vcpu) | 35 | int psw_extint_disabled(struct kvm_vcpu *vcpu) |
35 | { | 36 | { |
36 | return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); | 37 | return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); |
37 | } | 38 | } |
@@ -78,11 +79,8 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, | |||
78 | return 1; | 79 | return 1; |
79 | return 0; | 80 | return 0; |
80 | case KVM_S390_INT_SERVICE: | 81 | case KVM_S390_INT_SERVICE: |
81 | if (psw_extint_disabled(vcpu)) | 82 | case KVM_S390_INT_PFAULT_INIT: |
82 | return 0; | 83 | case KVM_S390_INT_PFAULT_DONE: |
83 | if (vcpu->arch.sie_block->gcr[0] & 0x200ul) | ||
84 | return 1; | ||
85 | return 0; | ||
86 | case KVM_S390_INT_VIRTIO: | 84 | case KVM_S390_INT_VIRTIO: |
87 | if (psw_extint_disabled(vcpu)) | 85 | if (psw_extint_disabled(vcpu)) |
88 | return 0; | 86 | return 0; |
@@ -117,14 +115,12 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, | |||
117 | 115 | ||
118 | static void __set_cpu_idle(struct kvm_vcpu *vcpu) | 116 | static void __set_cpu_idle(struct kvm_vcpu *vcpu) |
119 | { | 117 | { |
120 | BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); | ||
121 | atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); | 118 | atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); |
122 | set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); | 119 | set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); |
123 | } | 120 | } |
124 | 121 | ||
125 | static void __unset_cpu_idle(struct kvm_vcpu *vcpu) | 122 | static void __unset_cpu_idle(struct kvm_vcpu *vcpu) |
126 | { | 123 | { |
127 | BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); | ||
128 | atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); | 124 | atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); |
129 | clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); | 125 | clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); |
130 | } | 126 | } |
@@ -150,6 +146,8 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu, | |||
150 | case KVM_S390_INT_EXTERNAL_CALL: | 146 | case KVM_S390_INT_EXTERNAL_CALL: |
151 | case KVM_S390_INT_EMERGENCY: | 147 | case KVM_S390_INT_EMERGENCY: |
152 | case KVM_S390_INT_SERVICE: | 148 | case KVM_S390_INT_SERVICE: |
149 | case KVM_S390_INT_PFAULT_INIT: | ||
150 | case KVM_S390_INT_PFAULT_DONE: | ||
153 | case KVM_S390_INT_VIRTIO: | 151 | case KVM_S390_INT_VIRTIO: |
154 | if (psw_extint_disabled(vcpu)) | 152 | if (psw_extint_disabled(vcpu)) |
155 | __set_cpuflag(vcpu, CPUSTAT_EXT_INT); | 153 | __set_cpuflag(vcpu, CPUSTAT_EXT_INT); |
@@ -223,6 +221,30 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
223 | rc |= put_guest(vcpu, inti->ext.ext_params, | 221 | rc |= put_guest(vcpu, inti->ext.ext_params, |
224 | (u32 __user *)__LC_EXT_PARAMS); | 222 | (u32 __user *)__LC_EXT_PARAMS); |
225 | break; | 223 | break; |
224 | case KVM_S390_INT_PFAULT_INIT: | ||
225 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, | ||
226 | inti->ext.ext_params2); | ||
227 | rc = put_guest(vcpu, 0x2603, (u16 __user *) __LC_EXT_INT_CODE); | ||
228 | rc |= put_guest(vcpu, 0x0600, (u16 __user *) __LC_EXT_CPU_ADDR); | ||
229 | rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | ||
230 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
231 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
232 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | ||
233 | rc |= put_guest(vcpu, inti->ext.ext_params2, | ||
234 | (u64 __user *) __LC_EXT_PARAMS2); | ||
235 | break; | ||
236 | case KVM_S390_INT_PFAULT_DONE: | ||
237 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, | ||
238 | inti->ext.ext_params2); | ||
239 | rc = put_guest(vcpu, 0x2603, (u16 __user *) __LC_EXT_INT_CODE); | ||
240 | rc |= put_guest(vcpu, 0x0680, (u16 __user *) __LC_EXT_CPU_ADDR); | ||
241 | rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | ||
242 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
243 | rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
244 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | ||
245 | rc |= put_guest(vcpu, inti->ext.ext_params2, | ||
246 | (u64 __user *) __LC_EXT_PARAMS2); | ||
247 | break; | ||
226 | case KVM_S390_INT_VIRTIO: | 248 | case KVM_S390_INT_VIRTIO: |
227 | VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", | 249 | VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", |
228 | inti->ext.ext_params, inti->ext.ext_params2); | 250 | inti->ext.ext_params, inti->ext.ext_params2); |
@@ -357,7 +379,7 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) | |||
357 | return 1; | 379 | return 1; |
358 | } | 380 | } |
359 | 381 | ||
360 | static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | 382 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) |
361 | { | 383 | { |
362 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 384 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
363 | struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; | 385 | struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; |
@@ -482,11 +504,26 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) | |||
482 | struct kvm_vcpu *vcpu; | 504 | struct kvm_vcpu *vcpu; |
483 | 505 | ||
484 | vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); | 506 | vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); |
507 | vcpu->preempted = true; | ||
485 | tasklet_schedule(&vcpu->arch.tasklet); | 508 | tasklet_schedule(&vcpu->arch.tasklet); |
486 | 509 | ||
487 | return HRTIMER_NORESTART; | 510 | return HRTIMER_NORESTART; |
488 | } | 511 | } |
489 | 512 | ||
513 | void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) | ||
514 | { | ||
515 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | ||
516 | struct kvm_s390_interrupt_info *n, *inti = NULL; | ||
517 | |||
518 | spin_lock_bh(&li->lock); | ||
519 | list_for_each_entry_safe(inti, n, &li->list, list) { | ||
520 | list_del(&inti->list); | ||
521 | kfree(inti); | ||
522 | } | ||
523 | atomic_set(&li->active, 0); | ||
524 | spin_unlock_bh(&li->lock); | ||
525 | } | ||
526 | |||
490 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | 527 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) |
491 | { | 528 | { |
492 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 529 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
@@ -528,6 +565,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
528 | list_for_each_entry_safe(inti, n, &fi->list, list) { | 565 | list_for_each_entry_safe(inti, n, &fi->list, list) { |
529 | if (__interrupt_is_deliverable(vcpu, inti)) { | 566 | if (__interrupt_is_deliverable(vcpu, inti)) { |
530 | list_del(&inti->list); | 567 | list_del(&inti->list); |
568 | fi->irq_count--; | ||
531 | deliver = 1; | 569 | deliver = 1; |
532 | break; | 570 | break; |
533 | } | 571 | } |
@@ -583,6 +621,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) | |||
583 | if ((inti->type == KVM_S390_MCHK) && | 621 | if ((inti->type == KVM_S390_MCHK) && |
584 | __interrupt_is_deliverable(vcpu, inti)) { | 622 | __interrupt_is_deliverable(vcpu, inti)) { |
585 | list_del(&inti->list); | 623 | list_del(&inti->list); |
624 | fi->irq_count--; | ||
586 | deliver = 1; | 625 | deliver = 1; |
587 | break; | 626 | break; |
588 | } | 627 | } |
@@ -650,8 +689,10 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, | |||
650 | inti = iter; | 689 | inti = iter; |
651 | break; | 690 | break; |
652 | } | 691 | } |
653 | if (inti) | 692 | if (inti) { |
654 | list_del_init(&inti->list); | 693 | list_del_init(&inti->list); |
694 | fi->irq_count--; | ||
695 | } | ||
655 | if (list_empty(&fi->list)) | 696 | if (list_empty(&fi->list)) |
656 | atomic_set(&fi->active, 0); | 697 | atomic_set(&fi->active, 0); |
657 | spin_unlock(&fi->lock); | 698 | spin_unlock(&fi->lock); |
@@ -659,53 +700,101 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, | |||
659 | return inti; | 700 | return inti; |
660 | } | 701 | } |
661 | 702 | ||
662 | int kvm_s390_inject_vm(struct kvm *kvm, | 703 | static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) |
663 | struct kvm_s390_interrupt *s390int) | ||
664 | { | 704 | { |
665 | struct kvm_s390_local_interrupt *li; | 705 | struct kvm_s390_local_interrupt *li; |
666 | struct kvm_s390_float_interrupt *fi; | 706 | struct kvm_s390_float_interrupt *fi; |
667 | struct kvm_s390_interrupt_info *inti, *iter; | 707 | struct kvm_s390_interrupt_info *iter; |
708 | struct kvm_vcpu *dst_vcpu = NULL; | ||
668 | int sigcpu; | 709 | int sigcpu; |
710 | int rc = 0; | ||
711 | |||
712 | mutex_lock(&kvm->lock); | ||
713 | fi = &kvm->arch.float_int; | ||
714 | spin_lock(&fi->lock); | ||
715 | if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) { | ||
716 | rc = -EINVAL; | ||
717 | goto unlock_fi; | ||
718 | } | ||
719 | fi->irq_count++; | ||
720 | if (!is_ioint(inti->type)) { | ||
721 | list_add_tail(&inti->list, &fi->list); | ||
722 | } else { | ||
723 | u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); | ||
724 | |||
725 | /* Keep I/O interrupts sorted in isc order. */ | ||
726 | list_for_each_entry(iter, &fi->list, list) { | ||
727 | if (!is_ioint(iter->type)) | ||
728 | continue; | ||
729 | if (int_word_to_isc_bits(iter->io.io_int_word) | ||
730 | <= isc_bits) | ||
731 | continue; | ||
732 | break; | ||
733 | } | ||
734 | list_add_tail(&inti->list, &iter->list); | ||
735 | } | ||
736 | atomic_set(&fi->active, 1); | ||
737 | sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); | ||
738 | if (sigcpu == KVM_MAX_VCPUS) { | ||
739 | do { | ||
740 | sigcpu = fi->next_rr_cpu++; | ||
741 | if (sigcpu == KVM_MAX_VCPUS) | ||
742 | sigcpu = fi->next_rr_cpu = 0; | ||
743 | } while (kvm_get_vcpu(kvm, sigcpu) == NULL); | ||
744 | } | ||
745 | dst_vcpu = kvm_get_vcpu(kvm, sigcpu); | ||
746 | li = &dst_vcpu->arch.local_int; | ||
747 | spin_lock_bh(&li->lock); | ||
748 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | ||
749 | if (waitqueue_active(li->wq)) | ||
750 | wake_up_interruptible(li->wq); | ||
751 | kvm_get_vcpu(kvm, sigcpu)->preempted = true; | ||
752 | spin_unlock_bh(&li->lock); | ||
753 | unlock_fi: | ||
754 | spin_unlock(&fi->lock); | ||
755 | mutex_unlock(&kvm->lock); | ||
756 | return rc; | ||
757 | } | ||
758 | |||
759 | int kvm_s390_inject_vm(struct kvm *kvm, | ||
760 | struct kvm_s390_interrupt *s390int) | ||
761 | { | ||
762 | struct kvm_s390_interrupt_info *inti; | ||
669 | 763 | ||
670 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | 764 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); |
671 | if (!inti) | 765 | if (!inti) |
672 | return -ENOMEM; | 766 | return -ENOMEM; |
673 | 767 | ||
674 | switch (s390int->type) { | 768 | inti->type = s390int->type; |
769 | switch (inti->type) { | ||
675 | case KVM_S390_INT_VIRTIO: | 770 | case KVM_S390_INT_VIRTIO: |
676 | VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", | 771 | VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", |
677 | s390int->parm, s390int->parm64); | 772 | s390int->parm, s390int->parm64); |
678 | inti->type = s390int->type; | ||
679 | inti->ext.ext_params = s390int->parm; | 773 | inti->ext.ext_params = s390int->parm; |
680 | inti->ext.ext_params2 = s390int->parm64; | 774 | inti->ext.ext_params2 = s390int->parm64; |
681 | break; | 775 | break; |
682 | case KVM_S390_INT_SERVICE: | 776 | case KVM_S390_INT_SERVICE: |
683 | VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); | 777 | VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); |
684 | inti->type = s390int->type; | ||
685 | inti->ext.ext_params = s390int->parm; | 778 | inti->ext.ext_params = s390int->parm; |
686 | break; | 779 | break; |
687 | case KVM_S390_PROGRAM_INT: | 780 | case KVM_S390_INT_PFAULT_DONE: |
688 | case KVM_S390_SIGP_STOP: | 781 | inti->type = s390int->type; |
689 | case KVM_S390_INT_EXTERNAL_CALL: | 782 | inti->ext.ext_params2 = s390int->parm64; |
690 | case KVM_S390_INT_EMERGENCY: | 783 | break; |
691 | kfree(inti); | ||
692 | return -EINVAL; | ||
693 | case KVM_S390_MCHK: | 784 | case KVM_S390_MCHK: |
694 | VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", | 785 | VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", |
695 | s390int->parm64); | 786 | s390int->parm64); |
696 | inti->type = s390int->type; | ||
697 | inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ | 787 | inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ |
698 | inti->mchk.mcic = s390int->parm64; | 788 | inti->mchk.mcic = s390int->parm64; |
699 | break; | 789 | break; |
700 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | 790 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
701 | if (s390int->type & IOINT_AI_MASK) | 791 | if (inti->type & IOINT_AI_MASK) |
702 | VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); | 792 | VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); |
703 | else | 793 | else |
704 | VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", | 794 | VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", |
705 | s390int->type & IOINT_CSSID_MASK, | 795 | s390int->type & IOINT_CSSID_MASK, |
706 | s390int->type & IOINT_SSID_MASK, | 796 | s390int->type & IOINT_SSID_MASK, |
707 | s390int->type & IOINT_SCHID_MASK); | 797 | s390int->type & IOINT_SCHID_MASK); |
708 | inti->type = s390int->type; | ||
709 | inti->io.subchannel_id = s390int->parm >> 16; | 798 | inti->io.subchannel_id = s390int->parm >> 16; |
710 | inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; | 799 | inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; |
711 | inti->io.io_int_parm = s390int->parm64 >> 32; | 800 | inti->io.io_int_parm = s390int->parm64 >> 32; |
@@ -718,43 +807,7 @@ int kvm_s390_inject_vm(struct kvm *kvm, | |||
718 | trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, | 807 | trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, |
719 | 2); | 808 | 2); |
720 | 809 | ||
721 | mutex_lock(&kvm->lock); | 810 | return __inject_vm(kvm, inti); |
722 | fi = &kvm->arch.float_int; | ||
723 | spin_lock(&fi->lock); | ||
724 | if (!is_ioint(inti->type)) | ||
725 | list_add_tail(&inti->list, &fi->list); | ||
726 | else { | ||
727 | u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); | ||
728 | |||
729 | /* Keep I/O interrupts sorted in isc order. */ | ||
730 | list_for_each_entry(iter, &fi->list, list) { | ||
731 | if (!is_ioint(iter->type)) | ||
732 | continue; | ||
733 | if (int_word_to_isc_bits(iter->io.io_int_word) | ||
734 | <= isc_bits) | ||
735 | continue; | ||
736 | break; | ||
737 | } | ||
738 | list_add_tail(&inti->list, &iter->list); | ||
739 | } | ||
740 | atomic_set(&fi->active, 1); | ||
741 | sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); | ||
742 | if (sigcpu == KVM_MAX_VCPUS) { | ||
743 | do { | ||
744 | sigcpu = fi->next_rr_cpu++; | ||
745 | if (sigcpu == KVM_MAX_VCPUS) | ||
746 | sigcpu = fi->next_rr_cpu = 0; | ||
747 | } while (fi->local_int[sigcpu] == NULL); | ||
748 | } | ||
749 | li = fi->local_int[sigcpu]; | ||
750 | spin_lock_bh(&li->lock); | ||
751 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | ||
752 | if (waitqueue_active(li->wq)) | ||
753 | wake_up_interruptible(li->wq); | ||
754 | spin_unlock_bh(&li->lock); | ||
755 | spin_unlock(&fi->lock); | ||
756 | mutex_unlock(&kvm->lock); | ||
757 | return 0; | ||
758 | } | 811 | } |
759 | 812 | ||
760 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | 813 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, |
@@ -814,6 +867,10 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
814 | inti->type = s390int->type; | 867 | inti->type = s390int->type; |
815 | inti->mchk.mcic = s390int->parm64; | 868 | inti->mchk.mcic = s390int->parm64; |
816 | break; | 869 | break; |
870 | case KVM_S390_INT_PFAULT_INIT: | ||
871 | inti->type = s390int->type; | ||
872 | inti->ext.ext_params2 = s390int->parm64; | ||
873 | break; | ||
817 | case KVM_S390_INT_VIRTIO: | 874 | case KVM_S390_INT_VIRTIO: |
818 | case KVM_S390_INT_SERVICE: | 875 | case KVM_S390_INT_SERVICE: |
819 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | 876 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
@@ -837,7 +894,528 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
837 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 894 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); |
838 | if (waitqueue_active(&vcpu->wq)) | 895 | if (waitqueue_active(&vcpu->wq)) |
839 | wake_up_interruptible(&vcpu->wq); | 896 | wake_up_interruptible(&vcpu->wq); |
897 | vcpu->preempted = true; | ||
840 | spin_unlock_bh(&li->lock); | 898 | spin_unlock_bh(&li->lock); |
841 | mutex_unlock(&vcpu->kvm->lock); | 899 | mutex_unlock(&vcpu->kvm->lock); |
842 | return 0; | 900 | return 0; |
843 | } | 901 | } |
902 | |||
903 | static void clear_floating_interrupts(struct kvm *kvm) | ||
904 | { | ||
905 | struct kvm_s390_float_interrupt *fi; | ||
906 | struct kvm_s390_interrupt_info *n, *inti = NULL; | ||
907 | |||
908 | mutex_lock(&kvm->lock); | ||
909 | fi = &kvm->arch.float_int; | ||
910 | spin_lock(&fi->lock); | ||
911 | list_for_each_entry_safe(inti, n, &fi->list, list) { | ||
912 | list_del(&inti->list); | ||
913 | kfree(inti); | ||
914 | } | ||
915 | fi->irq_count = 0; | ||
916 | atomic_set(&fi->active, 0); | ||
917 | spin_unlock(&fi->lock); | ||
918 | mutex_unlock(&kvm->lock); | ||
919 | } | ||
920 | |||
921 | static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti, | ||
922 | u8 *addr) | ||
923 | { | ||
924 | struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; | ||
925 | struct kvm_s390_irq irq = {0}; | ||
926 | |||
927 | irq.type = inti->type; | ||
928 | switch (inti->type) { | ||
929 | case KVM_S390_INT_PFAULT_INIT: | ||
930 | case KVM_S390_INT_PFAULT_DONE: | ||
931 | case KVM_S390_INT_VIRTIO: | ||
932 | case KVM_S390_INT_SERVICE: | ||
933 | irq.u.ext = inti->ext; | ||
934 | break; | ||
935 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | ||
936 | irq.u.io = inti->io; | ||
937 | break; | ||
938 | case KVM_S390_MCHK: | ||
939 | irq.u.mchk = inti->mchk; | ||
940 | break; | ||
941 | default: | ||
942 | return -EINVAL; | ||
943 | } | ||
944 | |||
945 | if (copy_to_user(uptr, &irq, sizeof(irq))) | ||
946 | return -EFAULT; | ||
947 | |||
948 | return 0; | ||
949 | } | ||
950 | |||
951 | static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len) | ||
952 | { | ||
953 | struct kvm_s390_interrupt_info *inti; | ||
954 | struct kvm_s390_float_interrupt *fi; | ||
955 | int ret = 0; | ||
956 | int n = 0; | ||
957 | |||
958 | mutex_lock(&kvm->lock); | ||
959 | fi = &kvm->arch.float_int; | ||
960 | spin_lock(&fi->lock); | ||
961 | |||
962 | list_for_each_entry(inti, &fi->list, list) { | ||
963 | if (len < sizeof(struct kvm_s390_irq)) { | ||
964 | /* signal userspace to try again */ | ||
965 | ret = -ENOMEM; | ||
966 | break; | ||
967 | } | ||
968 | ret = copy_irq_to_user(inti, buf); | ||
969 | if (ret) | ||
970 | break; | ||
971 | buf += sizeof(struct kvm_s390_irq); | ||
972 | len -= sizeof(struct kvm_s390_irq); | ||
973 | n++; | ||
974 | } | ||
975 | |||
976 | spin_unlock(&fi->lock); | ||
977 | mutex_unlock(&kvm->lock); | ||
978 | |||
979 | return ret < 0 ? ret : n; | ||
980 | } | ||
981 | |||
982 | static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | ||
983 | { | ||
984 | int r; | ||
985 | |||
986 | switch (attr->group) { | ||
987 | case KVM_DEV_FLIC_GET_ALL_IRQS: | ||
988 | r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr, | ||
989 | attr->attr); | ||
990 | break; | ||
991 | default: | ||
992 | r = -EINVAL; | ||
993 | } | ||
994 | |||
995 | return r; | ||
996 | } | ||
997 | |||
998 | static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti, | ||
999 | u64 addr) | ||
1000 | { | ||
1001 | struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; | ||
1002 | void *target = NULL; | ||
1003 | void __user *source; | ||
1004 | u64 size; | ||
1005 | |||
1006 | if (get_user(inti->type, (u64 __user *)addr)) | ||
1007 | return -EFAULT; | ||
1008 | |||
1009 | switch (inti->type) { | ||
1010 | case KVM_S390_INT_PFAULT_INIT: | ||
1011 | case KVM_S390_INT_PFAULT_DONE: | ||
1012 | case KVM_S390_INT_VIRTIO: | ||
1013 | case KVM_S390_INT_SERVICE: | ||
1014 | target = (void *) &inti->ext; | ||
1015 | source = &uptr->u.ext; | ||
1016 | size = sizeof(inti->ext); | ||
1017 | break; | ||
1018 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | ||
1019 | target = (void *) &inti->io; | ||
1020 | source = &uptr->u.io; | ||
1021 | size = sizeof(inti->io); | ||
1022 | break; | ||
1023 | case KVM_S390_MCHK: | ||
1024 | target = (void *) &inti->mchk; | ||
1025 | source = &uptr->u.mchk; | ||
1026 | size = sizeof(inti->mchk); | ||
1027 | break; | ||
1028 | default: | ||
1029 | return -EINVAL; | ||
1030 | } | ||
1031 | |||
1032 | if (copy_from_user(target, source, size)) | ||
1033 | return -EFAULT; | ||
1034 | |||
1035 | return 0; | ||
1036 | } | ||
1037 | |||
1038 | static int enqueue_floating_irq(struct kvm_device *dev, | ||
1039 | struct kvm_device_attr *attr) | ||
1040 | { | ||
1041 | struct kvm_s390_interrupt_info *inti = NULL; | ||
1042 | int r = 0; | ||
1043 | int len = attr->attr; | ||
1044 | |||
1045 | if (len % sizeof(struct kvm_s390_irq) != 0) | ||
1046 | return -EINVAL; | ||
1047 | else if (len > KVM_S390_FLIC_MAX_BUFFER) | ||
1048 | return -EINVAL; | ||
1049 | |||
1050 | while (len >= sizeof(struct kvm_s390_irq)) { | ||
1051 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
1052 | if (!inti) | ||
1053 | return -ENOMEM; | ||
1054 | |||
1055 | r = copy_irq_from_user(inti, attr->addr); | ||
1056 | if (r) { | ||
1057 | kfree(inti); | ||
1058 | return r; | ||
1059 | } | ||
1060 | r = __inject_vm(dev->kvm, inti); | ||
1061 | if (r) { | ||
1062 | kfree(inti); | ||
1063 | return r; | ||
1064 | } | ||
1065 | len -= sizeof(struct kvm_s390_irq); | ||
1066 | attr->addr += sizeof(struct kvm_s390_irq); | ||
1067 | } | ||
1068 | |||
1069 | return r; | ||
1070 | } | ||
1071 | |||
1072 | static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id) | ||
1073 | { | ||
1074 | if (id >= MAX_S390_IO_ADAPTERS) | ||
1075 | return NULL; | ||
1076 | return kvm->arch.adapters[id]; | ||
1077 | } | ||
1078 | |||
1079 | static int register_io_adapter(struct kvm_device *dev, | ||
1080 | struct kvm_device_attr *attr) | ||
1081 | { | ||
1082 | struct s390_io_adapter *adapter; | ||
1083 | struct kvm_s390_io_adapter adapter_info; | ||
1084 | |||
1085 | if (copy_from_user(&adapter_info, | ||
1086 | (void __user *)attr->addr, sizeof(adapter_info))) | ||
1087 | return -EFAULT; | ||
1088 | |||
1089 | if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) || | ||
1090 | (dev->kvm->arch.adapters[adapter_info.id] != NULL)) | ||
1091 | return -EINVAL; | ||
1092 | |||
1093 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); | ||
1094 | if (!adapter) | ||
1095 | return -ENOMEM; | ||
1096 | |||
1097 | INIT_LIST_HEAD(&adapter->maps); | ||
1098 | init_rwsem(&adapter->maps_lock); | ||
1099 | atomic_set(&adapter->nr_maps, 0); | ||
1100 | adapter->id = adapter_info.id; | ||
1101 | adapter->isc = adapter_info.isc; | ||
1102 | adapter->maskable = adapter_info.maskable; | ||
1103 | adapter->masked = false; | ||
1104 | adapter->swap = adapter_info.swap; | ||
1105 | dev->kvm->arch.adapters[adapter->id] = adapter; | ||
1106 | |||
1107 | return 0; | ||
1108 | } | ||
1109 | |||
1110 | int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked) | ||
1111 | { | ||
1112 | int ret; | ||
1113 | struct s390_io_adapter *adapter = get_io_adapter(kvm, id); | ||
1114 | |||
1115 | if (!adapter || !adapter->maskable) | ||
1116 | return -EINVAL; | ||
1117 | ret = adapter->masked; | ||
1118 | adapter->masked = masked; | ||
1119 | return ret; | ||
1120 | } | ||
1121 | |||
1122 | static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr) | ||
1123 | { | ||
1124 | struct s390_io_adapter *adapter = get_io_adapter(kvm, id); | ||
1125 | struct s390_map_info *map; | ||
1126 | int ret; | ||
1127 | |||
1128 | if (!adapter || !addr) | ||
1129 | return -EINVAL; | ||
1130 | |||
1131 | map = kzalloc(sizeof(*map), GFP_KERNEL); | ||
1132 | if (!map) { | ||
1133 | ret = -ENOMEM; | ||
1134 | goto out; | ||
1135 | } | ||
1136 | INIT_LIST_HEAD(&map->list); | ||
1137 | map->guest_addr = addr; | ||
1138 | map->addr = gmap_translate(addr, kvm->arch.gmap); | ||
1139 | if (map->addr == -EFAULT) { | ||
1140 | ret = -EFAULT; | ||
1141 | goto out; | ||
1142 | } | ||
1143 | ret = get_user_pages_fast(map->addr, 1, 1, &map->page); | ||
1144 | if (ret < 0) | ||
1145 | goto out; | ||
1146 | BUG_ON(ret != 1); | ||
1147 | down_write(&adapter->maps_lock); | ||
1148 | if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) { | ||
1149 | list_add_tail(&map->list, &adapter->maps); | ||
1150 | ret = 0; | ||
1151 | } else { | ||
1152 | put_page(map->page); | ||
1153 | ret = -EINVAL; | ||
1154 | } | ||
1155 | up_write(&adapter->maps_lock); | ||
1156 | out: | ||
1157 | if (ret) | ||
1158 | kfree(map); | ||
1159 | return ret; | ||
1160 | } | ||
1161 | |||
1162 | static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr) | ||
1163 | { | ||
1164 | struct s390_io_adapter *adapter = get_io_adapter(kvm, id); | ||
1165 | struct s390_map_info *map, *tmp; | ||
1166 | int found = 0; | ||
1167 | |||
1168 | if (!adapter || !addr) | ||
1169 | return -EINVAL; | ||
1170 | |||
1171 | down_write(&adapter->maps_lock); | ||
1172 | list_for_each_entry_safe(map, tmp, &adapter->maps, list) { | ||
1173 | if (map->guest_addr == addr) { | ||
1174 | found = 1; | ||
1175 | atomic_dec(&adapter->nr_maps); | ||
1176 | list_del(&map->list); | ||
1177 | put_page(map->page); | ||
1178 | kfree(map); | ||
1179 | break; | ||
1180 | } | ||
1181 | } | ||
1182 | up_write(&adapter->maps_lock); | ||
1183 | |||
1184 | return found ? 0 : -EINVAL; | ||
1185 | } | ||
1186 | |||
1187 | void kvm_s390_destroy_adapters(struct kvm *kvm) | ||
1188 | { | ||
1189 | int i; | ||
1190 | struct s390_map_info *map, *tmp; | ||
1191 | |||
1192 | for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) { | ||
1193 | if (!kvm->arch.adapters[i]) | ||
1194 | continue; | ||
1195 | list_for_each_entry_safe(map, tmp, | ||
1196 | &kvm->arch.adapters[i]->maps, list) { | ||
1197 | list_del(&map->list); | ||
1198 | put_page(map->page); | ||
1199 | kfree(map); | ||
1200 | } | ||
1201 | kfree(kvm->arch.adapters[i]); | ||
1202 | } | ||
1203 | } | ||
1204 | |||
1205 | static int modify_io_adapter(struct kvm_device *dev, | ||
1206 | struct kvm_device_attr *attr) | ||
1207 | { | ||
1208 | struct kvm_s390_io_adapter_req req; | ||
1209 | struct s390_io_adapter *adapter; | ||
1210 | int ret; | ||
1211 | |||
1212 | if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) | ||
1213 | return -EFAULT; | ||
1214 | |||
1215 | adapter = get_io_adapter(dev->kvm, req.id); | ||
1216 | if (!adapter) | ||
1217 | return -EINVAL; | ||
1218 | switch (req.type) { | ||
1219 | case KVM_S390_IO_ADAPTER_MASK: | ||
1220 | ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask); | ||
1221 | if (ret > 0) | ||
1222 | ret = 0; | ||
1223 | break; | ||
1224 | case KVM_S390_IO_ADAPTER_MAP: | ||
1225 | ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr); | ||
1226 | break; | ||
1227 | case KVM_S390_IO_ADAPTER_UNMAP: | ||
1228 | ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr); | ||
1229 | break; | ||
1230 | default: | ||
1231 | ret = -EINVAL; | ||
1232 | } | ||
1233 | |||
1234 | return ret; | ||
1235 | } | ||
1236 | |||
1237 | static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | ||
1238 | { | ||
1239 | int r = 0; | ||
1240 | unsigned int i; | ||
1241 | struct kvm_vcpu *vcpu; | ||
1242 | |||
1243 | switch (attr->group) { | ||
1244 | case KVM_DEV_FLIC_ENQUEUE: | ||
1245 | r = enqueue_floating_irq(dev, attr); | ||
1246 | break; | ||
1247 | case KVM_DEV_FLIC_CLEAR_IRQS: | ||
1248 | r = 0; | ||
1249 | clear_floating_interrupts(dev->kvm); | ||
1250 | break; | ||
1251 | case KVM_DEV_FLIC_APF_ENABLE: | ||
1252 | dev->kvm->arch.gmap->pfault_enabled = 1; | ||
1253 | break; | ||
1254 | case KVM_DEV_FLIC_APF_DISABLE_WAIT: | ||
1255 | dev->kvm->arch.gmap->pfault_enabled = 0; | ||
1256 | /* | ||
1257 | * Make sure no async faults are in transition when | ||
1258 | * clearing the queues. So we don't need to worry | ||
1259 | * about late coming workers. | ||
1260 | */ | ||
1261 | synchronize_srcu(&dev->kvm->srcu); | ||
1262 | kvm_for_each_vcpu(i, vcpu, dev->kvm) | ||
1263 | kvm_clear_async_pf_completion_queue(vcpu); | ||
1264 | break; | ||
1265 | case KVM_DEV_FLIC_ADAPTER_REGISTER: | ||
1266 | r = register_io_adapter(dev, attr); | ||
1267 | break; | ||
1268 | case KVM_DEV_FLIC_ADAPTER_MODIFY: | ||
1269 | r = modify_io_adapter(dev, attr); | ||
1270 | break; | ||
1271 | default: | ||
1272 | r = -EINVAL; | ||
1273 | } | ||
1274 | |||
1275 | return r; | ||
1276 | } | ||
1277 | |||
1278 | static int flic_create(struct kvm_device *dev, u32 type) | ||
1279 | { | ||
1280 | if (!dev) | ||
1281 | return -EINVAL; | ||
1282 | if (dev->kvm->arch.flic) | ||
1283 | return -EINVAL; | ||
1284 | dev->kvm->arch.flic = dev; | ||
1285 | return 0; | ||
1286 | } | ||
1287 | |||
1288 | static void flic_destroy(struct kvm_device *dev) | ||
1289 | { | ||
1290 | dev->kvm->arch.flic = NULL; | ||
1291 | kfree(dev); | ||
1292 | } | ||
1293 | |||
1294 | /* s390 floating irq controller (flic) */ | ||
1295 | struct kvm_device_ops kvm_flic_ops = { | ||
1296 | .name = "kvm-flic", | ||
1297 | .get_attr = flic_get_attr, | ||
1298 | .set_attr = flic_set_attr, | ||
1299 | .create = flic_create, | ||
1300 | .destroy = flic_destroy, | ||
1301 | }; | ||
1302 | |||
1303 | static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap) | ||
1304 | { | ||
1305 | unsigned long bit; | ||
1306 | |||
1307 | bit = bit_nr + (addr % PAGE_SIZE) * 8; | ||
1308 | |||
1309 | return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit; | ||
1310 | } | ||
1311 | |||
1312 | static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter, | ||
1313 | u64 addr) | ||
1314 | { | ||
1315 | struct s390_map_info *map; | ||
1316 | |||
1317 | if (!adapter) | ||
1318 | return NULL; | ||
1319 | |||
1320 | list_for_each_entry(map, &adapter->maps, list) { | ||
1321 | if (map->guest_addr == addr) | ||
1322 | return map; | ||
1323 | } | ||
1324 | return NULL; | ||
1325 | } | ||
1326 | |||
1327 | static int adapter_indicators_set(struct kvm *kvm, | ||
1328 | struct s390_io_adapter *adapter, | ||
1329 | struct kvm_s390_adapter_int *adapter_int) | ||
1330 | { | ||
1331 | unsigned long bit; | ||
1332 | int summary_set, idx; | ||
1333 | struct s390_map_info *info; | ||
1334 | void *map; | ||
1335 | |||
1336 | info = get_map_info(adapter, adapter_int->ind_addr); | ||
1337 | if (!info) | ||
1338 | return -1; | ||
1339 | map = page_address(info->page); | ||
1340 | bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap); | ||
1341 | set_bit(bit, map); | ||
1342 | idx = srcu_read_lock(&kvm->srcu); | ||
1343 | mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); | ||
1344 | set_page_dirty_lock(info->page); | ||
1345 | info = get_map_info(adapter, adapter_int->summary_addr); | ||
1346 | if (!info) { | ||
1347 | srcu_read_unlock(&kvm->srcu, idx); | ||
1348 | return -1; | ||
1349 | } | ||
1350 | map = page_address(info->page); | ||
1351 | bit = get_ind_bit(info->addr, adapter_int->summary_offset, | ||
1352 | adapter->swap); | ||
1353 | summary_set = test_and_set_bit(bit, map); | ||
1354 | mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); | ||
1355 | set_page_dirty_lock(info->page); | ||
1356 | srcu_read_unlock(&kvm->srcu, idx); | ||
1357 | return summary_set ? 0 : 1; | ||
1358 | } | ||
1359 | |||
1360 | /* | ||
1361 | * < 0 - not injected due to error | ||
1362 | * = 0 - coalesced, summary indicator already active | ||
1363 | * > 0 - injected interrupt | ||
1364 | */ | ||
1365 | static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e, | ||
1366 | struct kvm *kvm, int irq_source_id, int level, | ||
1367 | bool line_status) | ||
1368 | { | ||
1369 | int ret; | ||
1370 | struct s390_io_adapter *adapter; | ||
1371 | |||
1372 | /* We're only interested in the 0->1 transition. */ | ||
1373 | if (!level) | ||
1374 | return 0; | ||
1375 | adapter = get_io_adapter(kvm, e->adapter.adapter_id); | ||
1376 | if (!adapter) | ||
1377 | return -1; | ||
1378 | down_read(&adapter->maps_lock); | ||
1379 | ret = adapter_indicators_set(kvm, adapter, &e->adapter); | ||
1380 | up_read(&adapter->maps_lock); | ||
1381 | if ((ret > 0) && !adapter->masked) { | ||
1382 | struct kvm_s390_interrupt s390int = { | ||
1383 | .type = KVM_S390_INT_IO(1, 0, 0, 0), | ||
1384 | .parm = 0, | ||
1385 | .parm64 = (adapter->isc << 27) | 0x80000000, | ||
1386 | }; | ||
1387 | ret = kvm_s390_inject_vm(kvm, &s390int); | ||
1388 | if (ret == 0) | ||
1389 | ret = 1; | ||
1390 | } | ||
1391 | return ret; | ||
1392 | } | ||
1393 | |||
1394 | int kvm_set_routing_entry(struct kvm_irq_routing_table *rt, | ||
1395 | struct kvm_kernel_irq_routing_entry *e, | ||
1396 | const struct kvm_irq_routing_entry *ue) | ||
1397 | { | ||
1398 | int ret; | ||
1399 | |||
1400 | switch (ue->type) { | ||
1401 | case KVM_IRQ_ROUTING_S390_ADAPTER: | ||
1402 | e->set = set_adapter_int; | ||
1403 | e->adapter.summary_addr = ue->u.adapter.summary_addr; | ||
1404 | e->adapter.ind_addr = ue->u.adapter.ind_addr; | ||
1405 | e->adapter.summary_offset = ue->u.adapter.summary_offset; | ||
1406 | e->adapter.ind_offset = ue->u.adapter.ind_offset; | ||
1407 | e->adapter.adapter_id = ue->u.adapter.adapter_id; | ||
1408 | ret = 0; | ||
1409 | break; | ||
1410 | default: | ||
1411 | ret = -EINVAL; | ||
1412 | } | ||
1413 | |||
1414 | return ret; | ||
1415 | } | ||
1416 | |||
1417 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, | ||
1418 | int irq_source_id, int level, bool line_status) | ||
1419 | { | ||
1420 | return -EINVAL; | ||
1421 | } | ||