diff options
author | Joerg Roedel <jroedel@suse.de> | 2016-09-05 06:43:16 -0400 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2016-09-05 06:43:16 -0400 |
commit | 368d06cd536ecace3a904ae2e6e42771636d52aa (patch) | |
tree | b09c2b36487bb9af8a9b0587c63d8d012f7788ec /drivers/iommu/amd_iommu.c | |
parent | 395adae45095e55ee90e688e9021d79dd6ffaefe (diff) | |
parent | d98de49a53e48f51332e97568127e722415e1232 (diff) |
Merge branch 'x86/amd-avic' into x86/amd
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r-- | drivers/iommu/amd_iommu.c | 484 |
1 files changed, 427 insertions, 57 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 2bcaca53aba5..bebe728019af 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -137,6 +137,7 @@ struct iommu_dev_data { | |||
137 | bool pri_tlp; /* PASID TLB required for | 137 | bool pri_tlp; /* PASID TLB required for |
138 | PPR completions */ | 138 | PPR completions */ |
139 | u32 errata; /* Bitmap for errata to apply */ | 139 | u32 errata; /* Bitmap for errata to apply */ |
140 | bool use_vapic; /* Enable device to use vapic mode */ | ||
140 | }; | 141 | }; |
141 | 142 | ||
142 | /* | 143 | /* |
@@ -707,14 +708,74 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu) | |||
707 | } | 708 | } |
708 | } | 709 | } |
709 | 710 | ||
711 | #ifdef CONFIG_IRQ_REMAP | ||
712 | static int (*iommu_ga_log_notifier)(u32); | ||
713 | |||
714 | int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)) | ||
715 | { | ||
716 | iommu_ga_log_notifier = notifier; | ||
717 | |||
718 | return 0; | ||
719 | } | ||
720 | EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier); | ||
721 | |||
722 | static void iommu_poll_ga_log(struct amd_iommu *iommu) | ||
723 | { | ||
724 | u32 head, tail, cnt = 0; | ||
725 | |||
726 | if (iommu->ga_log == NULL) | ||
727 | return; | ||
728 | |||
729 | head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); | ||
730 | tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET); | ||
731 | |||
732 | while (head != tail) { | ||
733 | volatile u64 *raw; | ||
734 | u64 log_entry; | ||
735 | |||
736 | raw = (u64 *)(iommu->ga_log + head); | ||
737 | cnt++; | ||
738 | |||
739 | /* Avoid memcpy function-call overhead */ | ||
740 | log_entry = *raw; | ||
741 | |||
742 | /* Update head pointer of hardware ring-buffer */ | ||
743 | head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE; | ||
744 | writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); | ||
745 | |||
746 | /* Handle GA entry */ | ||
747 | switch (GA_REQ_TYPE(log_entry)) { | ||
748 | case GA_GUEST_NR: | ||
749 | if (!iommu_ga_log_notifier) | ||
750 | break; | ||
751 | |||
752 | pr_debug("AMD-Vi: %s: devid=%#x, ga_tag=%#x\n", | ||
753 | __func__, GA_DEVID(log_entry), | ||
754 | GA_TAG(log_entry)); | ||
755 | |||
756 | if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0) | ||
757 | pr_err("AMD-Vi: GA log notifier failed.\n"); | ||
758 | break; | ||
759 | default: | ||
760 | break; | ||
761 | } | ||
762 | } | ||
763 | } | ||
764 | #endif /* CONFIG_IRQ_REMAP */ | ||
765 | |||
766 | #define AMD_IOMMU_INT_MASK \ | ||
767 | (MMIO_STATUS_EVT_INT_MASK | \ | ||
768 | MMIO_STATUS_PPR_INT_MASK | \ | ||
769 | MMIO_STATUS_GALOG_INT_MASK) | ||
770 | |||
710 | irqreturn_t amd_iommu_int_thread(int irq, void *data) | 771 | irqreturn_t amd_iommu_int_thread(int irq, void *data) |
711 | { | 772 | { |
712 | struct amd_iommu *iommu = (struct amd_iommu *) data; | 773 | struct amd_iommu *iommu = (struct amd_iommu *) data; |
713 | u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); | 774 | u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); |
714 | 775 | ||
715 | while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) { | 776 | while (status & AMD_IOMMU_INT_MASK) { |
716 | /* Enable EVT and PPR interrupts again */ | 777 | /* Enable EVT and PPR and GA interrupts again */ |
717 | writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK), | 778 | writel(AMD_IOMMU_INT_MASK, |
718 | iommu->mmio_base + MMIO_STATUS_OFFSET); | 779 | iommu->mmio_base + MMIO_STATUS_OFFSET); |
719 | 780 | ||
720 | if (status & MMIO_STATUS_EVT_INT_MASK) { | 781 | if (status & MMIO_STATUS_EVT_INT_MASK) { |
@@ -727,6 +788,13 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) | |||
727 | iommu_poll_ppr_log(iommu); | 788 | iommu_poll_ppr_log(iommu); |
728 | } | 789 | } |
729 | 790 | ||
791 | #ifdef CONFIG_IRQ_REMAP | ||
792 | if (status & MMIO_STATUS_GALOG_INT_MASK) { | ||
793 | pr_devel("AMD-Vi: Processing IOMMU GA Log\n"); | ||
794 | iommu_poll_ga_log(iommu); | ||
795 | } | ||
796 | #endif | ||
797 | |||
730 | /* | 798 | /* |
731 | * Hardware bug: ERBT1312 | 799 | * Hardware bug: ERBT1312 |
732 | * When re-enabling interrupt (by writing 1 | 800 | * When re-enabling interrupt (by writing 1 |
@@ -2948,6 +3016,12 @@ static void amd_iommu_detach_device(struct iommu_domain *dom, | |||
2948 | if (!iommu) | 3016 | if (!iommu) |
2949 | return; | 3017 | return; |
2950 | 3018 | ||
3019 | #ifdef CONFIG_IRQ_REMAP | ||
3020 | if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && | ||
3021 | (dom->type == IOMMU_DOMAIN_UNMANAGED)) | ||
3022 | dev_data->use_vapic = 0; | ||
3023 | #endif | ||
3024 | |||
2951 | iommu_completion_wait(iommu); | 3025 | iommu_completion_wait(iommu); |
2952 | } | 3026 | } |
2953 | 3027 | ||
@@ -2973,6 +3047,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, | |||
2973 | 3047 | ||
2974 | ret = attach_device(dev, domain); | 3048 | ret = attach_device(dev, domain); |
2975 | 3049 | ||
3050 | #ifdef CONFIG_IRQ_REMAP | ||
3051 | if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) { | ||
3052 | if (dom->type == IOMMU_DOMAIN_UNMANAGED) | ||
3053 | dev_data->use_vapic = 1; | ||
3054 | else | ||
3055 | dev_data->use_vapic = 0; | ||
3056 | } | ||
3057 | #endif | ||
3058 | |||
2976 | iommu_completion_wait(iommu); | 3059 | iommu_completion_wait(iommu); |
2977 | 3060 | ||
2978 | return ret; | 3061 | return ret; |
@@ -3511,34 +3594,6 @@ EXPORT_SYMBOL(amd_iommu_device_info); | |||
3511 | * | 3594 | * |
3512 | *****************************************************************************/ | 3595 | *****************************************************************************/ |
3513 | 3596 | ||
3514 | union irte { | ||
3515 | u32 val; | ||
3516 | struct { | ||
3517 | u32 valid : 1, | ||
3518 | no_fault : 1, | ||
3519 | int_type : 3, | ||
3520 | rq_eoi : 1, | ||
3521 | dm : 1, | ||
3522 | rsvd_1 : 1, | ||
3523 | destination : 8, | ||
3524 | vector : 8, | ||
3525 | rsvd_2 : 8; | ||
3526 | } fields; | ||
3527 | }; | ||
3528 | |||
3529 | struct irq_2_irte { | ||
3530 | u16 devid; /* Device ID for IRTE table */ | ||
3531 | u16 index; /* Index into IRTE table*/ | ||
3532 | }; | ||
3533 | |||
3534 | struct amd_ir_data { | ||
3535 | struct irq_2_irte irq_2_irte; | ||
3536 | union irte irte_entry; | ||
3537 | union { | ||
3538 | struct msi_msg msi_entry; | ||
3539 | }; | ||
3540 | }; | ||
3541 | |||
3542 | static struct irq_chip amd_ir_chip; | 3597 | static struct irq_chip amd_ir_chip; |
3543 | 3598 | ||
3544 | #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) | 3599 | #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) |
@@ -3560,8 +3615,6 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) | |||
3560 | amd_iommu_dev_table[devid].data[2] = dte; | 3615 | amd_iommu_dev_table[devid].data[2] = dte; |
3561 | } | 3616 | } |
3562 | 3617 | ||
3563 | #define IRTE_ALLOCATED (~1U) | ||
3564 | |||
3565 | static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) | 3618 | static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) |
3566 | { | 3619 | { |
3567 | struct irq_remap_table *table = NULL; | 3620 | struct irq_remap_table *table = NULL; |
@@ -3607,13 +3660,18 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) | |||
3607 | goto out; | 3660 | goto out; |
3608 | } | 3661 | } |
3609 | 3662 | ||
3610 | memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32)); | 3663 | if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) |
3664 | memset(table->table, 0, | ||
3665 | MAX_IRQS_PER_TABLE * sizeof(u32)); | ||
3666 | else | ||
3667 | memset(table->table, 0, | ||
3668 | (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2))); | ||
3611 | 3669 | ||
3612 | if (ioapic) { | 3670 | if (ioapic) { |
3613 | int i; | 3671 | int i; |
3614 | 3672 | ||
3615 | for (i = 0; i < 32; ++i) | 3673 | for (i = 0; i < 32; ++i) |
3616 | table->table[i] = IRTE_ALLOCATED; | 3674 | iommu->irte_ops->set_allocated(table, i); |
3617 | } | 3675 | } |
3618 | 3676 | ||
3619 | irq_lookup_table[devid] = table; | 3677 | irq_lookup_table[devid] = table; |
@@ -3639,6 +3697,10 @@ static int alloc_irq_index(u16 devid, int count) | |||
3639 | struct irq_remap_table *table; | 3697 | struct irq_remap_table *table; |
3640 | unsigned long flags; | 3698 | unsigned long flags; |
3641 | int index, c; | 3699 | int index, c; |
3700 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | ||
3701 | |||
3702 | if (!iommu) | ||
3703 | return -ENODEV; | ||
3642 | 3704 | ||
3643 | table = get_irq_table(devid, false); | 3705 | table = get_irq_table(devid, false); |
3644 | if (!table) | 3706 | if (!table) |
@@ -3650,14 +3712,14 @@ static int alloc_irq_index(u16 devid, int count) | |||
3650 | for (c = 0, index = table->min_index; | 3712 | for (c = 0, index = table->min_index; |
3651 | index < MAX_IRQS_PER_TABLE; | 3713 | index < MAX_IRQS_PER_TABLE; |
3652 | ++index) { | 3714 | ++index) { |
3653 | if (table->table[index] == 0) | 3715 | if (!iommu->irte_ops->is_allocated(table, index)) |
3654 | c += 1; | 3716 | c += 1; |
3655 | else | 3717 | else |
3656 | c = 0; | 3718 | c = 0; |
3657 | 3719 | ||
3658 | if (c == count) { | 3720 | if (c == count) { |
3659 | for (; c != 0; --c) | 3721 | for (; c != 0; --c) |
3660 | table->table[index - c + 1] = IRTE_ALLOCATED; | 3722 | iommu->irte_ops->set_allocated(table, index - c + 1); |
3661 | 3723 | ||
3662 | index -= count - 1; | 3724 | index -= count - 1; |
3663 | goto out; | 3725 | goto out; |
@@ -3672,7 +3734,42 @@ out: | |||
3672 | return index; | 3734 | return index; |
3673 | } | 3735 | } |
3674 | 3736 | ||
3675 | static int modify_irte(u16 devid, int index, union irte irte) | 3737 | static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, |
3738 | struct amd_ir_data *data) | ||
3739 | { | ||
3740 | struct irq_remap_table *table; | ||
3741 | struct amd_iommu *iommu; | ||
3742 | unsigned long flags; | ||
3743 | struct irte_ga *entry; | ||
3744 | |||
3745 | iommu = amd_iommu_rlookup_table[devid]; | ||
3746 | if (iommu == NULL) | ||
3747 | return -EINVAL; | ||
3748 | |||
3749 | table = get_irq_table(devid, false); | ||
3750 | if (!table) | ||
3751 | return -ENOMEM; | ||
3752 | |||
3753 | spin_lock_irqsave(&table->lock, flags); | ||
3754 | |||
3755 | entry = (struct irte_ga *)table->table; | ||
3756 | entry = &entry[index]; | ||
3757 | entry->lo.fields_remap.valid = 0; | ||
3758 | entry->hi.val = irte->hi.val; | ||
3759 | entry->lo.val = irte->lo.val; | ||
3760 | entry->lo.fields_remap.valid = 1; | ||
3761 | if (data) | ||
3762 | data->ref = entry; | ||
3763 | |||
3764 | spin_unlock_irqrestore(&table->lock, flags); | ||
3765 | |||
3766 | iommu_flush_irt(iommu, devid); | ||
3767 | iommu_completion_wait(iommu); | ||
3768 | |||
3769 | return 0; | ||
3770 | } | ||
3771 | |||
3772 | static int modify_irte(u16 devid, int index, union irte *irte) | ||
3676 | { | 3773 | { |
3677 | struct irq_remap_table *table; | 3774 | struct irq_remap_table *table; |
3678 | struct amd_iommu *iommu; | 3775 | struct amd_iommu *iommu; |
@@ -3687,7 +3784,7 @@ static int modify_irte(u16 devid, int index, union irte irte) | |||
3687 | return -ENOMEM; | 3784 | return -ENOMEM; |
3688 | 3785 | ||
3689 | spin_lock_irqsave(&table->lock, flags); | 3786 | spin_lock_irqsave(&table->lock, flags); |
3690 | table->table[index] = irte.val; | 3787 | table->table[index] = irte->val; |
3691 | spin_unlock_irqrestore(&table->lock, flags); | 3788 | spin_unlock_irqrestore(&table->lock, flags); |
3692 | 3789 | ||
3693 | iommu_flush_irt(iommu, devid); | 3790 | iommu_flush_irt(iommu, devid); |
@@ -3711,13 +3808,146 @@ static void free_irte(u16 devid, int index) | |||
3711 | return; | 3808 | return; |
3712 | 3809 | ||
3713 | spin_lock_irqsave(&table->lock, flags); | 3810 | spin_lock_irqsave(&table->lock, flags); |
3714 | table->table[index] = 0; | 3811 | iommu->irte_ops->clear_allocated(table, index); |
3715 | spin_unlock_irqrestore(&table->lock, flags); | 3812 | spin_unlock_irqrestore(&table->lock, flags); |
3716 | 3813 | ||
3717 | iommu_flush_irt(iommu, devid); | 3814 | iommu_flush_irt(iommu, devid); |
3718 | iommu_completion_wait(iommu); | 3815 | iommu_completion_wait(iommu); |
3719 | } | 3816 | } |
3720 | 3817 | ||
3818 | static void irte_prepare(void *entry, | ||
3819 | u32 delivery_mode, u32 dest_mode, | ||
3820 | u8 vector, u32 dest_apicid, int devid) | ||
3821 | { | ||
3822 | union irte *irte = (union irte *) entry; | ||
3823 | |||
3824 | irte->val = 0; | ||
3825 | irte->fields.vector = vector; | ||
3826 | irte->fields.int_type = delivery_mode; | ||
3827 | irte->fields.destination = dest_apicid; | ||
3828 | irte->fields.dm = dest_mode; | ||
3829 | irte->fields.valid = 1; | ||
3830 | } | ||
3831 | |||
3832 | static void irte_ga_prepare(void *entry, | ||
3833 | u32 delivery_mode, u32 dest_mode, | ||
3834 | u8 vector, u32 dest_apicid, int devid) | ||
3835 | { | ||
3836 | struct irte_ga *irte = (struct irte_ga *) entry; | ||
3837 | struct iommu_dev_data *dev_data = search_dev_data(devid); | ||
3838 | |||
3839 | irte->lo.val = 0; | ||
3840 | irte->hi.val = 0; | ||
3841 | irte->lo.fields_remap.guest_mode = dev_data ? dev_data->use_vapic : 0; | ||
3842 | irte->lo.fields_remap.int_type = delivery_mode; | ||
3843 | irte->lo.fields_remap.dm = dest_mode; | ||
3844 | irte->hi.fields.vector = vector; | ||
3845 | irte->lo.fields_remap.destination = dest_apicid; | ||
3846 | irte->lo.fields_remap.valid = 1; | ||
3847 | } | ||
3848 | |||
3849 | static void irte_activate(void *entry, u16 devid, u16 index) | ||
3850 | { | ||
3851 | union irte *irte = (union irte *) entry; | ||
3852 | |||
3853 | irte->fields.valid = 1; | ||
3854 | modify_irte(devid, index, irte); | ||
3855 | } | ||
3856 | |||
3857 | static void irte_ga_activate(void *entry, u16 devid, u16 index) | ||
3858 | { | ||
3859 | struct irte_ga *irte = (struct irte_ga *) entry; | ||
3860 | |||
3861 | irte->lo.fields_remap.valid = 1; | ||
3862 | modify_irte_ga(devid, index, irte, NULL); | ||
3863 | } | ||
3864 | |||
3865 | static void irte_deactivate(void *entry, u16 devid, u16 index) | ||
3866 | { | ||
3867 | union irte *irte = (union irte *) entry; | ||
3868 | |||
3869 | irte->fields.valid = 0; | ||
3870 | modify_irte(devid, index, irte); | ||
3871 | } | ||
3872 | |||
3873 | static void irte_ga_deactivate(void *entry, u16 devid, u16 index) | ||
3874 | { | ||
3875 | struct irte_ga *irte = (struct irte_ga *) entry; | ||
3876 | |||
3877 | irte->lo.fields_remap.valid = 0; | ||
3878 | modify_irte_ga(devid, index, irte, NULL); | ||
3879 | } | ||
3880 | |||
3881 | static void irte_set_affinity(void *entry, u16 devid, u16 index, | ||
3882 | u8 vector, u32 dest_apicid) | ||
3883 | { | ||
3884 | union irte *irte = (union irte *) entry; | ||
3885 | |||
3886 | irte->fields.vector = vector; | ||
3887 | irte->fields.destination = dest_apicid; | ||
3888 | modify_irte(devid, index, irte); | ||
3889 | } | ||
3890 | |||
3891 | static void irte_ga_set_affinity(void *entry, u16 devid, u16 index, | ||
3892 | u8 vector, u32 dest_apicid) | ||
3893 | { | ||
3894 | struct irte_ga *irte = (struct irte_ga *) entry; | ||
3895 | struct iommu_dev_data *dev_data = search_dev_data(devid); | ||
3896 | |||
3897 | if (!dev_data || !dev_data->use_vapic) { | ||
3898 | irte->hi.fields.vector = vector; | ||
3899 | irte->lo.fields_remap.destination = dest_apicid; | ||
3900 | irte->lo.fields_remap.guest_mode = 0; | ||
3901 | modify_irte_ga(devid, index, irte, NULL); | ||
3902 | } | ||
3903 | } | ||
3904 | |||
3905 | #define IRTE_ALLOCATED (~1U) | ||
3906 | static void irte_set_allocated(struct irq_remap_table *table, int index) | ||
3907 | { | ||
3908 | table->table[index] = IRTE_ALLOCATED; | ||
3909 | } | ||
3910 | |||
3911 | static void irte_ga_set_allocated(struct irq_remap_table *table, int index) | ||
3912 | { | ||
3913 | struct irte_ga *ptr = (struct irte_ga *)table->table; | ||
3914 | struct irte_ga *irte = &ptr[index]; | ||
3915 | |||
3916 | memset(&irte->lo.val, 0, sizeof(u64)); | ||
3917 | memset(&irte->hi.val, 0, sizeof(u64)); | ||
3918 | irte->hi.fields.vector = 0xff; | ||
3919 | } | ||
3920 | |||
3921 | static bool irte_is_allocated(struct irq_remap_table *table, int index) | ||
3922 | { | ||
3923 | union irte *ptr = (union irte *)table->table; | ||
3924 | union irte *irte = &ptr[index]; | ||
3925 | |||
3926 | return irte->val != 0; | ||
3927 | } | ||
3928 | |||
3929 | static bool irte_ga_is_allocated(struct irq_remap_table *table, int index) | ||
3930 | { | ||
3931 | struct irte_ga *ptr = (struct irte_ga *)table->table; | ||
3932 | struct irte_ga *irte = &ptr[index]; | ||
3933 | |||
3934 | return irte->hi.fields.vector != 0; | ||
3935 | } | ||
3936 | |||
3937 | static void irte_clear_allocated(struct irq_remap_table *table, int index) | ||
3938 | { | ||
3939 | table->table[index] = 0; | ||
3940 | } | ||
3941 | |||
3942 | static void irte_ga_clear_allocated(struct irq_remap_table *table, int index) | ||
3943 | { | ||
3944 | struct irte_ga *ptr = (struct irte_ga *)table->table; | ||
3945 | struct irte_ga *irte = &ptr[index]; | ||
3946 | |||
3947 | memset(&irte->lo.val, 0, sizeof(u64)); | ||
3948 | memset(&irte->hi.val, 0, sizeof(u64)); | ||
3949 | } | ||
3950 | |||
3721 | static int get_devid(struct irq_alloc_info *info) | 3951 | static int get_devid(struct irq_alloc_info *info) |
3722 | { | 3952 | { |
3723 | int devid = -1; | 3953 | int devid = -1; |
@@ -3802,19 +4032,17 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data, | |||
3802 | { | 4032 | { |
3803 | struct irq_2_irte *irte_info = &data->irq_2_irte; | 4033 | struct irq_2_irte *irte_info = &data->irq_2_irte; |
3804 | struct msi_msg *msg = &data->msi_entry; | 4034 | struct msi_msg *msg = &data->msi_entry; |
3805 | union irte *irte = &data->irte_entry; | ||
3806 | struct IO_APIC_route_entry *entry; | 4035 | struct IO_APIC_route_entry *entry; |
4036 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | ||
4037 | |||
4038 | if (!iommu) | ||
4039 | return; | ||
3807 | 4040 | ||
3808 | data->irq_2_irte.devid = devid; | 4041 | data->irq_2_irte.devid = devid; |
3809 | data->irq_2_irte.index = index + sub_handle; | 4042 | data->irq_2_irte.index = index + sub_handle; |
3810 | 4043 | iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode, | |
3811 | /* Setup IRTE for IOMMU */ | 4044 | apic->irq_dest_mode, irq_cfg->vector, |
3812 | irte->val = 0; | 4045 | irq_cfg->dest_apicid, devid); |
3813 | irte->fields.vector = irq_cfg->vector; | ||
3814 | irte->fields.int_type = apic->irq_delivery_mode; | ||
3815 | irte->fields.destination = irq_cfg->dest_apicid; | ||
3816 | irte->fields.dm = apic->irq_dest_mode; | ||
3817 | irte->fields.valid = 1; | ||
3818 | 4046 | ||
3819 | switch (info->type) { | 4047 | switch (info->type) { |
3820 | case X86_IRQ_ALLOC_TYPE_IOAPIC: | 4048 | case X86_IRQ_ALLOC_TYPE_IOAPIC: |
@@ -3845,12 +4073,32 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data, | |||
3845 | } | 4073 | } |
3846 | } | 4074 | } |
3847 | 4075 | ||
4076 | struct amd_irte_ops irte_32_ops = { | ||
4077 | .prepare = irte_prepare, | ||
4078 | .activate = irte_activate, | ||
4079 | .deactivate = irte_deactivate, | ||
4080 | .set_affinity = irte_set_affinity, | ||
4081 | .set_allocated = irte_set_allocated, | ||
4082 | .is_allocated = irte_is_allocated, | ||
4083 | .clear_allocated = irte_clear_allocated, | ||
4084 | }; | ||
4085 | |||
4086 | struct amd_irte_ops irte_128_ops = { | ||
4087 | .prepare = irte_ga_prepare, | ||
4088 | .activate = irte_ga_activate, | ||
4089 | .deactivate = irte_ga_deactivate, | ||
4090 | .set_affinity = irte_ga_set_affinity, | ||
4091 | .set_allocated = irte_ga_set_allocated, | ||
4092 | .is_allocated = irte_ga_is_allocated, | ||
4093 | .clear_allocated = irte_ga_clear_allocated, | ||
4094 | }; | ||
4095 | |||
3848 | static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, | 4096 | static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, |
3849 | unsigned int nr_irqs, void *arg) | 4097 | unsigned int nr_irqs, void *arg) |
3850 | { | 4098 | { |
3851 | struct irq_alloc_info *info = arg; | 4099 | struct irq_alloc_info *info = arg; |
3852 | struct irq_data *irq_data; | 4100 | struct irq_data *irq_data; |
3853 | struct amd_ir_data *data; | 4101 | struct amd_ir_data *data = NULL; |
3854 | struct irq_cfg *cfg; | 4102 | struct irq_cfg *cfg; |
3855 | int i, ret, devid; | 4103 | int i, ret, devid; |
3856 | int index = -1; | 4104 | int index = -1; |
@@ -3903,6 +4151,16 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, | |||
3903 | if (!data) | 4151 | if (!data) |
3904 | goto out_free_data; | 4152 | goto out_free_data; |
3905 | 4153 | ||
4154 | if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) | ||
4155 | data->entry = kzalloc(sizeof(union irte), GFP_KERNEL); | ||
4156 | else | ||
4157 | data->entry = kzalloc(sizeof(struct irte_ga), | ||
4158 | GFP_KERNEL); | ||
4159 | if (!data->entry) { | ||
4160 | kfree(data); | ||
4161 | goto out_free_data; | ||
4162 | } | ||
4163 | |||
3906 | irq_data->hwirq = (devid << 16) + i; | 4164 | irq_data->hwirq = (devid << 16) + i; |
3907 | irq_data->chip_data = data; | 4165 | irq_data->chip_data = data; |
3908 | irq_data->chip = &amd_ir_chip; | 4166 | irq_data->chip = &amd_ir_chip; |
@@ -3939,6 +4197,7 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq, | |||
3939 | data = irq_data->chip_data; | 4197 | data = irq_data->chip_data; |
3940 | irte_info = &data->irq_2_irte; | 4198 | irte_info = &data->irq_2_irte; |
3941 | free_irte(irte_info->devid, irte_info->index); | 4199 | free_irte(irte_info->devid, irte_info->index); |
4200 | kfree(data->entry); | ||
3942 | kfree(data); | 4201 | kfree(data); |
3943 | } | 4202 | } |
3944 | } | 4203 | } |
@@ -3950,8 +4209,11 @@ static void irq_remapping_activate(struct irq_domain *domain, | |||
3950 | { | 4209 | { |
3951 | struct amd_ir_data *data = irq_data->chip_data; | 4210 | struct amd_ir_data *data = irq_data->chip_data; |
3952 | struct irq_2_irte *irte_info = &data->irq_2_irte; | 4211 | struct irq_2_irte *irte_info = &data->irq_2_irte; |
4212 | struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; | ||
3953 | 4213 | ||
3954 | modify_irte(irte_info->devid, irte_info->index, data->irte_entry); | 4214 | if (iommu) |
4215 | iommu->irte_ops->activate(data->entry, irte_info->devid, | ||
4216 | irte_info->index); | ||
3955 | } | 4217 | } |
3956 | 4218 | ||
3957 | static void irq_remapping_deactivate(struct irq_domain *domain, | 4219 | static void irq_remapping_deactivate(struct irq_domain *domain, |
@@ -3959,10 +4221,11 @@ static void irq_remapping_deactivate(struct irq_domain *domain, | |||
3959 | { | 4221 | { |
3960 | struct amd_ir_data *data = irq_data->chip_data; | 4222 | struct amd_ir_data *data = irq_data->chip_data; |
3961 | struct irq_2_irte *irte_info = &data->irq_2_irte; | 4223 | struct irq_2_irte *irte_info = &data->irq_2_irte; |
3962 | union irte entry; | 4224 | struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; |
3963 | 4225 | ||
3964 | entry.val = 0; | 4226 | if (iommu) |
3965 | modify_irte(irte_info->devid, irte_info->index, data->irte_entry); | 4227 | iommu->irte_ops->deactivate(data->entry, irte_info->devid, |
4228 | irte_info->index); | ||
3966 | } | 4229 | } |
3967 | 4230 | ||
3968 | static struct irq_domain_ops amd_ir_domain_ops = { | 4231 | static struct irq_domain_ops amd_ir_domain_ops = { |
@@ -3972,6 +4235,70 @@ static struct irq_domain_ops amd_ir_domain_ops = { | |||
3972 | .deactivate = irq_remapping_deactivate, | 4235 | .deactivate = irq_remapping_deactivate, |
3973 | }; | 4236 | }; |
3974 | 4237 | ||
4238 | static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) | ||
4239 | { | ||
4240 | struct amd_iommu *iommu; | ||
4241 | struct amd_iommu_pi_data *pi_data = vcpu_info; | ||
4242 | struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data; | ||
4243 | struct amd_ir_data *ir_data = data->chip_data; | ||
4244 | struct irte_ga *irte = (struct irte_ga *) ir_data->entry; | ||
4245 | struct irq_2_irte *irte_info = &ir_data->irq_2_irte; | ||
4246 | struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid); | ||
4247 | |||
4248 | /* Note: | ||
4249 | * This device has never been set up for guest mode. | ||
4250 | * we should not modify the IRTE | ||
4251 | */ | ||
4252 | if (!dev_data || !dev_data->use_vapic) | ||
4253 | return 0; | ||
4254 | |||
4255 | pi_data->ir_data = ir_data; | ||
4256 | |||
4257 | /* Note: | ||
4258 | * SVM tries to set up for VAPIC mode, but we are in | ||
4259 | * legacy mode. So, we force legacy mode instead. | ||
4260 | */ | ||
4261 | if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) { | ||
4262 | pr_debug("AMD-Vi: %s: Fall back to using intr legacy remap\n", | ||
4263 | __func__); | ||
4264 | pi_data->is_guest_mode = false; | ||
4265 | } | ||
4266 | |||
4267 | iommu = amd_iommu_rlookup_table[irte_info->devid]; | ||
4268 | if (iommu == NULL) | ||
4269 | return -EINVAL; | ||
4270 | |||
4271 | pi_data->prev_ga_tag = ir_data->cached_ga_tag; | ||
4272 | if (pi_data->is_guest_mode) { | ||
4273 | /* Setting */ | ||
4274 | irte->hi.fields.ga_root_ptr = (pi_data->base >> 12); | ||
4275 | irte->hi.fields.vector = vcpu_pi_info->vector; | ||
4276 | irte->lo.fields_vapic.guest_mode = 1; | ||
4277 | irte->lo.fields_vapic.ga_tag = pi_data->ga_tag; | ||
4278 | |||
4279 | ir_data->cached_ga_tag = pi_data->ga_tag; | ||
4280 | } else { | ||
4281 | /* Un-Setting */ | ||
4282 | struct irq_cfg *cfg = irqd_cfg(data); | ||
4283 | |||
4284 | irte->hi.val = 0; | ||
4285 | irte->lo.val = 0; | ||
4286 | irte->hi.fields.vector = cfg->vector; | ||
4287 | irte->lo.fields_remap.guest_mode = 0; | ||
4288 | irte->lo.fields_remap.destination = cfg->dest_apicid; | ||
4289 | irte->lo.fields_remap.int_type = apic->irq_delivery_mode; | ||
4290 | irte->lo.fields_remap.dm = apic->irq_dest_mode; | ||
4291 | |||
4292 | /* | ||
4293 | * This communicates the ga_tag back to the caller | ||
4294 | * so that it can do all the necessary clean up. | ||
4295 | */ | ||
4296 | ir_data->cached_ga_tag = 0; | ||
4297 | } | ||
4298 | |||
4299 | return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data); | ||
4300 | } | ||
4301 | |||
3975 | static int amd_ir_set_affinity(struct irq_data *data, | 4302 | static int amd_ir_set_affinity(struct irq_data *data, |
3976 | const struct cpumask *mask, bool force) | 4303 | const struct cpumask *mask, bool force) |
3977 | { | 4304 | { |
@@ -3979,8 +4306,12 @@ static int amd_ir_set_affinity(struct irq_data *data, | |||
3979 | struct irq_2_irte *irte_info = &ir_data->irq_2_irte; | 4306 | struct irq_2_irte *irte_info = &ir_data->irq_2_irte; |
3980 | struct irq_cfg *cfg = irqd_cfg(data); | 4307 | struct irq_cfg *cfg = irqd_cfg(data); |
3981 | struct irq_data *parent = data->parent_data; | 4308 | struct irq_data *parent = data->parent_data; |
4309 | struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; | ||
3982 | int ret; | 4310 | int ret; |
3983 | 4311 | ||
4312 | if (!iommu) | ||
4313 | return -ENODEV; | ||
4314 | |||
3984 | ret = parent->chip->irq_set_affinity(parent, mask, force); | 4315 | ret = parent->chip->irq_set_affinity(parent, mask, force); |
3985 | if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) | 4316 | if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) |
3986 | return ret; | 4317 | return ret; |
@@ -3989,9 +4320,8 @@ static int amd_ir_set_affinity(struct irq_data *data, | |||
3989 | * Atomically updates the IRTE with the new destination, vector | 4320 | * Atomically updates the IRTE with the new destination, vector |
3990 | * and flushes the interrupt entry cache. | 4321 | * and flushes the interrupt entry cache. |
3991 | */ | 4322 | */ |
3992 | ir_data->irte_entry.fields.vector = cfg->vector; | 4323 | iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid, |
3993 | ir_data->irte_entry.fields.destination = cfg->dest_apicid; | 4324 | irte_info->index, cfg->vector, cfg->dest_apicid); |
3994 | modify_irte(irte_info->devid, irte_info->index, ir_data->irte_entry); | ||
3995 | 4325 | ||
3996 | /* | 4326 | /* |
3997 | * After this point, all the interrupts will start arriving | 4327 | * After this point, all the interrupts will start arriving |
@@ -4013,6 +4343,7 @@ static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg) | |||
4013 | static struct irq_chip amd_ir_chip = { | 4343 | static struct irq_chip amd_ir_chip = { |
4014 | .irq_ack = ir_ack_apic_edge, | 4344 | .irq_ack = ir_ack_apic_edge, |
4015 | .irq_set_affinity = amd_ir_set_affinity, | 4345 | .irq_set_affinity = amd_ir_set_affinity, |
4346 | .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity, | ||
4016 | .irq_compose_msi_msg = ir_compose_msi_msg, | 4347 | .irq_compose_msi_msg = ir_compose_msi_msg, |
4017 | }; | 4348 | }; |
4018 | 4349 | ||
@@ -4027,4 +4358,43 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu) | |||
4027 | 4358 | ||
4028 | return 0; | 4359 | return 0; |
4029 | } | 4360 | } |
4361 | |||
4362 | int amd_iommu_update_ga(int cpu, bool is_run, void *data) | ||
4363 | { | ||
4364 | unsigned long flags; | ||
4365 | struct amd_iommu *iommu; | ||
4366 | struct irq_remap_table *irt; | ||
4367 | struct amd_ir_data *ir_data = (struct amd_ir_data *)data; | ||
4368 | int devid = ir_data->irq_2_irte.devid; | ||
4369 | struct irte_ga *entry = (struct irte_ga *) ir_data->entry; | ||
4370 | struct irte_ga *ref = (struct irte_ga *) ir_data->ref; | ||
4371 | |||
4372 | if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || | ||
4373 | !ref || !entry || !entry->lo.fields_vapic.guest_mode) | ||
4374 | return 0; | ||
4375 | |||
4376 | iommu = amd_iommu_rlookup_table[devid]; | ||
4377 | if (!iommu) | ||
4378 | return -ENODEV; | ||
4379 | |||
4380 | irt = get_irq_table(devid, false); | ||
4381 | if (!irt) | ||
4382 | return -ENODEV; | ||
4383 | |||
4384 | spin_lock_irqsave(&irt->lock, flags); | ||
4385 | |||
4386 | if (ref->lo.fields_vapic.guest_mode) { | ||
4387 | if (cpu >= 0) | ||
4388 | ref->lo.fields_vapic.destination = cpu; | ||
4389 | ref->lo.fields_vapic.is_run = is_run; | ||
4390 | barrier(); | ||
4391 | } | ||
4392 | |||
4393 | spin_unlock_irqrestore(&irt->lock, flags); | ||
4394 | |||
4395 | iommu_flush_irt(iommu, devid); | ||
4396 | iommu_completion_wait(iommu); | ||
4397 | return 0; | ||
4398 | } | ||
4399 | EXPORT_SYMBOL(amd_iommu_update_ga); | ||
4030 | #endif | 4400 | #endif |