diff options
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/intel-iommu.c | 194 |
1 files changed, 194 insertions, 0 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index cb24defdb6d9..358dd406fe21 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -743,6 +743,196 @@ static int iommu_disable_translation(struct intel_iommu *iommu) | |||
743 | return 0; | 743 | return 0; |
744 | } | 744 | } |
745 | 745 | ||
746 | /* iommu interrupt handling. Most stuff are MSI-like. */ | ||
747 | |||
748 | static char *fault_reason_strings[] = | ||
749 | { | ||
750 | "Software", | ||
751 | "Present bit in root entry is clear", | ||
752 | "Present bit in context entry is clear", | ||
753 | "Invalid context entry", | ||
754 | "Access beyond MGAW", | ||
755 | "PTE Write access is not set", | ||
756 | "PTE Read access is not set", | ||
757 | "Next page table ptr is invalid", | ||
758 | "Root table address invalid", | ||
759 | "Context table ptr is invalid", | ||
760 | "non-zero reserved fields in RTP", | ||
761 | "non-zero reserved fields in CTP", | ||
762 | "non-zero reserved fields in PTE", | ||
763 | "Unknown" | ||
764 | }; | ||
765 | #define MAX_FAULT_REASON_IDX ARRAY_SIZE(fault_reason_strings) | ||
766 | |||
767 | char *dmar_get_fault_reason(u8 fault_reason) | ||
768 | { | ||
769 | if (fault_reason > MAX_FAULT_REASON_IDX) | ||
770 | return fault_reason_strings[MAX_FAULT_REASON_IDX]; | ||
771 | else | ||
772 | return fault_reason_strings[fault_reason]; | ||
773 | } | ||
774 | |||
775 | void dmar_msi_unmask(unsigned int irq) | ||
776 | { | ||
777 | struct intel_iommu *iommu = get_irq_data(irq); | ||
778 | unsigned long flag; | ||
779 | |||
780 | /* unmask it */ | ||
781 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
782 | writel(0, iommu->reg + DMAR_FECTL_REG); | ||
783 | /* Read a reg to force flush the post write */ | ||
784 | readl(iommu->reg + DMAR_FECTL_REG); | ||
785 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
786 | } | ||
787 | |||
788 | void dmar_msi_mask(unsigned int irq) | ||
789 | { | ||
790 | unsigned long flag; | ||
791 | struct intel_iommu *iommu = get_irq_data(irq); | ||
792 | |||
793 | /* mask it */ | ||
794 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
795 | writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); | ||
796 | /* Read a reg to force flush the post write */ | ||
797 | readl(iommu->reg + DMAR_FECTL_REG); | ||
798 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
799 | } | ||
800 | |||
801 | void dmar_msi_write(int irq, struct msi_msg *msg) | ||
802 | { | ||
803 | struct intel_iommu *iommu = get_irq_data(irq); | ||
804 | unsigned long flag; | ||
805 | |||
806 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
807 | writel(msg->data, iommu->reg + DMAR_FEDATA_REG); | ||
808 | writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); | ||
809 | writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); | ||
810 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
811 | } | ||
812 | |||
813 | void dmar_msi_read(int irq, struct msi_msg *msg) | ||
814 | { | ||
815 | struct intel_iommu *iommu = get_irq_data(irq); | ||
816 | unsigned long flag; | ||
817 | |||
818 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
819 | msg->data = readl(iommu->reg + DMAR_FEDATA_REG); | ||
820 | msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); | ||
821 | msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); | ||
822 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
823 | } | ||
824 | |||
825 | static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, | ||
826 | u8 fault_reason, u16 source_id, u64 addr) | ||
827 | { | ||
828 | char *reason; | ||
829 | |||
830 | reason = dmar_get_fault_reason(fault_reason); | ||
831 | |||
832 | printk(KERN_ERR | ||
833 | "DMAR:[%s] Request device [%02x:%02x.%d] " | ||
834 | "fault addr %llx \n" | ||
835 | "DMAR:[fault reason %02d] %s\n", | ||
836 | (type ? "DMA Read" : "DMA Write"), | ||
837 | (source_id >> 8), PCI_SLOT(source_id & 0xFF), | ||
838 | PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); | ||
839 | return 0; | ||
840 | } | ||
841 | |||
842 | #define PRIMARY_FAULT_REG_LEN (16) | ||
843 | static irqreturn_t iommu_page_fault(int irq, void *dev_id) | ||
844 | { | ||
845 | struct intel_iommu *iommu = dev_id; | ||
846 | int reg, fault_index; | ||
847 | u32 fault_status; | ||
848 | unsigned long flag; | ||
849 | |||
850 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
851 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | ||
852 | |||
853 | /* TBD: ignore advanced fault log currently */ | ||
854 | if (!(fault_status & DMA_FSTS_PPF)) | ||
855 | goto clear_overflow; | ||
856 | |||
857 | fault_index = dma_fsts_fault_record_index(fault_status); | ||
858 | reg = cap_fault_reg_offset(iommu->cap); | ||
859 | while (1) { | ||
860 | u8 fault_reason; | ||
861 | u16 source_id; | ||
862 | u64 guest_addr; | ||
863 | int type; | ||
864 | u32 data; | ||
865 | |||
866 | /* highest 32 bits */ | ||
867 | data = readl(iommu->reg + reg + | ||
868 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | ||
869 | if (!(data & DMA_FRCD_F)) | ||
870 | break; | ||
871 | |||
872 | fault_reason = dma_frcd_fault_reason(data); | ||
873 | type = dma_frcd_type(data); | ||
874 | |||
875 | data = readl(iommu->reg + reg + | ||
876 | fault_index * PRIMARY_FAULT_REG_LEN + 8); | ||
877 | source_id = dma_frcd_source_id(data); | ||
878 | |||
879 | guest_addr = dmar_readq(iommu->reg + reg + | ||
880 | fault_index * PRIMARY_FAULT_REG_LEN); | ||
881 | guest_addr = dma_frcd_page_addr(guest_addr); | ||
882 | /* clear the fault */ | ||
883 | writel(DMA_FRCD_F, iommu->reg + reg + | ||
884 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | ||
885 | |||
886 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
887 | |||
888 | iommu_page_fault_do_one(iommu, type, fault_reason, | ||
889 | source_id, guest_addr); | ||
890 | |||
891 | fault_index++; | ||
892 | if (fault_index > cap_num_fault_regs(iommu->cap)) | ||
893 | fault_index = 0; | ||
894 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
895 | } | ||
896 | clear_overflow: | ||
897 | /* clear primary fault overflow */ | ||
898 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | ||
899 | if (fault_status & DMA_FSTS_PFO) | ||
900 | writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG); | ||
901 | |||
902 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
903 | return IRQ_HANDLED; | ||
904 | } | ||
905 | |||
906 | int dmar_set_interrupt(struct intel_iommu *iommu) | ||
907 | { | ||
908 | int irq, ret; | ||
909 | |||
910 | irq = create_irq(); | ||
911 | if (!irq) { | ||
912 | printk(KERN_ERR "IOMMU: no free vectors\n"); | ||
913 | return -EINVAL; | ||
914 | } | ||
915 | |||
916 | set_irq_data(irq, iommu); | ||
917 | iommu->irq = irq; | ||
918 | |||
919 | ret = arch_setup_dmar_msi(irq); | ||
920 | if (ret) { | ||
921 | set_irq_data(irq, NULL); | ||
922 | iommu->irq = 0; | ||
923 | destroy_irq(irq); | ||
924 | return 0; | ||
925 | } | ||
926 | |||
927 | /* Force fault register is cleared */ | ||
928 | iommu_page_fault(irq, iommu); | ||
929 | |||
930 | ret = request_irq(irq, iommu_page_fault, 0, iommu->name, iommu); | ||
931 | if (ret) | ||
932 | printk(KERN_ERR "IOMMU: can't request irq\n"); | ||
933 | return ret; | ||
934 | } | ||
935 | |||
746 | static int iommu_init_domains(struct intel_iommu *iommu) | 936 | static int iommu_init_domains(struct intel_iommu *iommu) |
747 | { | 937 | { |
748 | unsigned long ndomains; | 938 | unsigned long ndomains; |
@@ -1490,6 +1680,10 @@ int __init init_dmars(void) | |||
1490 | 1680 | ||
1491 | iommu_flush_write_buffer(iommu); | 1681 | iommu_flush_write_buffer(iommu); |
1492 | 1682 | ||
1683 | ret = dmar_set_interrupt(iommu); | ||
1684 | if (ret) | ||
1685 | goto error; | ||
1686 | |||
1493 | iommu_set_root_entry(iommu); | 1687 | iommu_set_root_entry(iommu); |
1494 | 1688 | ||
1495 | iommu_flush_context_global(iommu, 0); | 1689 | iommu_flush_context_global(iommu, 0); |