diff options
-rw-r--r-- | drivers/iommu/amd_iommu.c | 11 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu-v3.c | 204 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu.c | 11 | ||||
-rw-r--r-- | drivers/iommu/dmar.c | 3 | ||||
-rw-r--r-- | drivers/iommu/intel-svm.c | 4 | ||||
-rw-r--r-- | drivers/iommu/ipmmu-vmsa.c | 517 | ||||
-rw-r--r-- | drivers/iommu/mtk_iommu_v1.c | 2 | ||||
-rw-r--r-- | drivers/iommu/qcom_iommu.c | 18 | ||||
-rw-r--r-- | include/linux/intel-iommu.h | 1 |
9 files changed, 466 insertions, 305 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 9dc7facfd2e5..a8c111e96cc3 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -2382,11 +2382,9 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, | |||
2382 | size_t size, | 2382 | size_t size, |
2383 | int dir) | 2383 | int dir) |
2384 | { | 2384 | { |
2385 | dma_addr_t flush_addr; | ||
2386 | dma_addr_t i, start; | 2385 | dma_addr_t i, start; |
2387 | unsigned int pages; | 2386 | unsigned int pages; |
2388 | 2387 | ||
2389 | flush_addr = dma_addr; | ||
2390 | pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); | 2388 | pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); |
2391 | dma_addr &= PAGE_MASK; | 2389 | dma_addr &= PAGE_MASK; |
2392 | start = dma_addr; | 2390 | start = dma_addr; |
@@ -3153,7 +3151,7 @@ static void amd_iommu_apply_resv_region(struct device *dev, | |||
3153 | unsigned long start, end; | 3151 | unsigned long start, end; |
3154 | 3152 | ||
3155 | start = IOVA_PFN(region->start); | 3153 | start = IOVA_PFN(region->start); |
3156 | end = IOVA_PFN(region->start + region->length); | 3154 | end = IOVA_PFN(region->start + region->length - 1); |
3157 | 3155 | ||
3158 | WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); | 3156 | WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); |
3159 | } | 3157 | } |
@@ -3682,13 +3680,12 @@ static int alloc_irq_index(u16 devid, int count, bool align) | |||
3682 | 3680 | ||
3683 | /* Scan table for free entries */ | 3681 | /* Scan table for free entries */ |
3684 | for (index = ALIGN(table->min_index, alignment), c = 0; | 3682 | for (index = ALIGN(table->min_index, alignment), c = 0; |
3685 | index < MAX_IRQS_PER_TABLE; | 3683 | index < MAX_IRQS_PER_TABLE;) { |
3686 | index++) { | ||
3687 | if (!iommu->irte_ops->is_allocated(table, index)) { | 3684 | if (!iommu->irte_ops->is_allocated(table, index)) { |
3688 | c += 1; | 3685 | c += 1; |
3689 | } else { | 3686 | } else { |
3690 | c = 0; | 3687 | c = 0; |
3691 | index = ALIGN(index, alignment); | 3688 | index = ALIGN(index + 1, alignment); |
3692 | continue; | 3689 | continue; |
3693 | } | 3690 | } |
3694 | 3691 | ||
@@ -3699,6 +3696,8 @@ static int alloc_irq_index(u16 devid, int count, bool align) | |||
3699 | index -= count - 1; | 3696 | index -= count - 1; |
3700 | goto out; | 3697 | goto out; |
3701 | } | 3698 | } |
3699 | |||
3700 | index++; | ||
3702 | } | 3701 | } |
3703 | 3702 | ||
3704 | index = -ENOSPC; | 3703 | index = -ENOSPC; |
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index ee0c7b73cff7..f122071688fd 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
@@ -316,6 +316,7 @@ | |||
316 | #define ARM64_TCR_TBI0_MASK 0x1UL | 316 | #define ARM64_TCR_TBI0_MASK 0x1UL |
317 | 317 | ||
318 | #define CTXDESC_CD_0_AA64 (1UL << 41) | 318 | #define CTXDESC_CD_0_AA64 (1UL << 41) |
319 | #define CTXDESC_CD_0_S (1UL << 44) | ||
319 | #define CTXDESC_CD_0_R (1UL << 45) | 320 | #define CTXDESC_CD_0_R (1UL << 45) |
320 | #define CTXDESC_CD_0_A (1UL << 46) | 321 | #define CTXDESC_CD_0_A (1UL << 46) |
321 | #define CTXDESC_CD_0_ASET_SHIFT 47 | 322 | #define CTXDESC_CD_0_ASET_SHIFT 47 |
@@ -377,7 +378,16 @@ | |||
377 | 378 | ||
378 | #define CMDQ_SYNC_0_CS_SHIFT 12 | 379 | #define CMDQ_SYNC_0_CS_SHIFT 12 |
379 | #define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT) | 380 | #define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT) |
381 | #define CMDQ_SYNC_0_CS_IRQ (1UL << CMDQ_SYNC_0_CS_SHIFT) | ||
380 | #define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT) | 382 | #define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT) |
383 | #define CMDQ_SYNC_0_MSH_SHIFT 22 | ||
384 | #define CMDQ_SYNC_0_MSH_ISH (3UL << CMDQ_SYNC_0_MSH_SHIFT) | ||
385 | #define CMDQ_SYNC_0_MSIATTR_SHIFT 24 | ||
386 | #define CMDQ_SYNC_0_MSIATTR_OIWB (0xfUL << CMDQ_SYNC_0_MSIATTR_SHIFT) | ||
387 | #define CMDQ_SYNC_0_MSIDATA_SHIFT 32 | ||
388 | #define CMDQ_SYNC_0_MSIDATA_MASK 0xffffffffUL | ||
389 | #define CMDQ_SYNC_1_MSIADDR_SHIFT 0 | ||
390 | #define CMDQ_SYNC_1_MSIADDR_MASK 0xffffffffffffcUL | ||
381 | 391 | ||
382 | /* Event queue */ | 392 | /* Event queue */ |
383 | #define EVTQ_ENT_DWORDS 4 | 393 | #define EVTQ_ENT_DWORDS 4 |
@@ -408,20 +418,12 @@ | |||
408 | 418 | ||
409 | /* High-level queue structures */ | 419 | /* High-level queue structures */ |
410 | #define ARM_SMMU_POLL_TIMEOUT_US 100 | 420 | #define ARM_SMMU_POLL_TIMEOUT_US 100 |
411 | #define ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US 1000000 /* 1s! */ | 421 | #define ARM_SMMU_CMDQ_SYNC_TIMEOUT_US 1000000 /* 1s! */ |
422 | #define ARM_SMMU_CMDQ_SYNC_SPIN_COUNT 10 | ||
412 | 423 | ||
413 | #define MSI_IOVA_BASE 0x8000000 | 424 | #define MSI_IOVA_BASE 0x8000000 |
414 | #define MSI_IOVA_LENGTH 0x100000 | 425 | #define MSI_IOVA_LENGTH 0x100000 |
415 | 426 | ||
416 | /* Until ACPICA headers cover IORT rev. C */ | ||
417 | #ifndef ACPI_IORT_SMMU_HISILICON_HI161X | ||
418 | #define ACPI_IORT_SMMU_HISILICON_HI161X 0x1 | ||
419 | #endif | ||
420 | |||
421 | #ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX | ||
422 | #define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2 | ||
423 | #endif | ||
424 | |||
425 | static bool disable_bypass; | 427 | static bool disable_bypass; |
426 | module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); | 428 | module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); |
427 | MODULE_PARM_DESC(disable_bypass, | 429 | MODULE_PARM_DESC(disable_bypass, |
@@ -504,6 +506,10 @@ struct arm_smmu_cmdq_ent { | |||
504 | } pri; | 506 | } pri; |
505 | 507 | ||
506 | #define CMDQ_OP_CMD_SYNC 0x46 | 508 | #define CMDQ_OP_CMD_SYNC 0x46 |
509 | struct { | ||
510 | u32 msidata; | ||
511 | u64 msiaddr; | ||
512 | } sync; | ||
507 | }; | 513 | }; |
508 | }; | 514 | }; |
509 | 515 | ||
@@ -604,6 +610,7 @@ struct arm_smmu_device { | |||
604 | #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10) | 610 | #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10) |
605 | #define ARM_SMMU_FEAT_STALLS (1 << 11) | 611 | #define ARM_SMMU_FEAT_STALLS (1 << 11) |
606 | #define ARM_SMMU_FEAT_HYP (1 << 12) | 612 | #define ARM_SMMU_FEAT_HYP (1 << 12) |
613 | #define ARM_SMMU_FEAT_STALL_FORCE (1 << 13) | ||
607 | u32 features; | 614 | u32 features; |
608 | 615 | ||
609 | #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) | 616 | #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) |
@@ -616,6 +623,7 @@ struct arm_smmu_device { | |||
616 | 623 | ||
617 | int gerr_irq; | 624 | int gerr_irq; |
618 | int combined_irq; | 625 | int combined_irq; |
626 | atomic_t sync_nr; | ||
619 | 627 | ||
620 | unsigned long ias; /* IPA */ | 628 | unsigned long ias; /* IPA */ |
621 | unsigned long oas; /* PA */ | 629 | unsigned long oas; /* PA */ |
@@ -634,6 +642,8 @@ struct arm_smmu_device { | |||
634 | 642 | ||
635 | struct arm_smmu_strtab_cfg strtab_cfg; | 643 | struct arm_smmu_strtab_cfg strtab_cfg; |
636 | 644 | ||
645 | u32 sync_count; | ||
646 | |||
637 | /* IOMMU core code handle */ | 647 | /* IOMMU core code handle */ |
638 | struct iommu_device iommu; | 648 | struct iommu_device iommu; |
639 | }; | 649 | }; |
@@ -757,26 +767,29 @@ static void queue_inc_prod(struct arm_smmu_queue *q) | |||
757 | * Wait for the SMMU to consume items. If drain is true, wait until the queue | 767 | * Wait for the SMMU to consume items. If drain is true, wait until the queue |
758 | * is empty. Otherwise, wait until there is at least one free slot. | 768 | * is empty. Otherwise, wait until there is at least one free slot. |
759 | */ | 769 | */ |
760 | static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe) | 770 | static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe) |
761 | { | 771 | { |
762 | ktime_t timeout; | 772 | ktime_t timeout; |
763 | unsigned int delay = 1; | 773 | unsigned int delay = 1, spin_cnt = 0; |
764 | 774 | ||
765 | /* Wait longer if it's queue drain */ | 775 | /* Wait longer if it's a CMD_SYNC */ |
766 | timeout = ktime_add_us(ktime_get(), drain ? | 776 | timeout = ktime_add_us(ktime_get(), sync ? |
767 | ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US : | 777 | ARM_SMMU_CMDQ_SYNC_TIMEOUT_US : |
768 | ARM_SMMU_POLL_TIMEOUT_US); | 778 | ARM_SMMU_POLL_TIMEOUT_US); |
769 | 779 | ||
770 | while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) { | 780 | while (queue_sync_cons(q), (sync ? !queue_empty(q) : queue_full(q))) { |
771 | if (ktime_compare(ktime_get(), timeout) > 0) | 781 | if (ktime_compare(ktime_get(), timeout) > 0) |
772 | return -ETIMEDOUT; | 782 | return -ETIMEDOUT; |
773 | 783 | ||
774 | if (wfe) { | 784 | if (wfe) { |
775 | wfe(); | 785 | wfe(); |
776 | } else { | 786 | } else if (++spin_cnt < ARM_SMMU_CMDQ_SYNC_SPIN_COUNT) { |
777 | cpu_relax(); | 787 | cpu_relax(); |
788 | continue; | ||
789 | } else { | ||
778 | udelay(delay); | 790 | udelay(delay); |
779 | delay *= 2; | 791 | delay *= 2; |
792 | spin_cnt = 0; | ||
780 | } | 793 | } |
781 | } | 794 | } |
782 | 795 | ||
@@ -878,7 +891,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) | |||
878 | } | 891 | } |
879 | break; | 892 | break; |
880 | case CMDQ_OP_CMD_SYNC: | 893 | case CMDQ_OP_CMD_SYNC: |
881 | cmd[0] |= CMDQ_SYNC_0_CS_SEV; | 894 | if (ent->sync.msiaddr) |
895 | cmd[0] |= CMDQ_SYNC_0_CS_IRQ; | ||
896 | else | ||
897 | cmd[0] |= CMDQ_SYNC_0_CS_SEV; | ||
898 | cmd[0] |= CMDQ_SYNC_0_MSH_ISH | CMDQ_SYNC_0_MSIATTR_OIWB; | ||
899 | cmd[0] |= (u64)ent->sync.msidata << CMDQ_SYNC_0_MSIDATA_SHIFT; | ||
900 | cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; | ||
882 | break; | 901 | break; |
883 | default: | 902 | default: |
884 | return -ENOENT; | 903 | return -ENOENT; |
@@ -936,13 +955,22 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) | |||
936 | queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); | 955 | queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); |
937 | } | 956 | } |
938 | 957 | ||
958 | static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd) | ||
959 | { | ||
960 | struct arm_smmu_queue *q = &smmu->cmdq.q; | ||
961 | bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); | ||
962 | |||
963 | while (queue_insert_raw(q, cmd) == -ENOSPC) { | ||
964 | if (queue_poll_cons(q, false, wfe)) | ||
965 | dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); | ||
966 | } | ||
967 | } | ||
968 | |||
939 | static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, | 969 | static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, |
940 | struct arm_smmu_cmdq_ent *ent) | 970 | struct arm_smmu_cmdq_ent *ent) |
941 | { | 971 | { |
942 | u64 cmd[CMDQ_ENT_DWORDS]; | 972 | u64 cmd[CMDQ_ENT_DWORDS]; |
943 | unsigned long flags; | 973 | unsigned long flags; |
944 | bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); | ||
945 | struct arm_smmu_queue *q = &smmu->cmdq.q; | ||
946 | 974 | ||
947 | if (arm_smmu_cmdq_build_cmd(cmd, ent)) { | 975 | if (arm_smmu_cmdq_build_cmd(cmd, ent)) { |
948 | dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", | 976 | dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", |
@@ -951,14 +979,76 @@ static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, | |||
951 | } | 979 | } |
952 | 980 | ||
953 | spin_lock_irqsave(&smmu->cmdq.lock, flags); | 981 | spin_lock_irqsave(&smmu->cmdq.lock, flags); |
954 | while (queue_insert_raw(q, cmd) == -ENOSPC) { | 982 | arm_smmu_cmdq_insert_cmd(smmu, cmd); |
955 | if (queue_poll_cons(q, false, wfe)) | 983 | spin_unlock_irqrestore(&smmu->cmdq.lock, flags); |
956 | dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); | 984 | } |
957 | } | ||
958 | 985 | ||
959 | if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe)) | 986 | /* |
960 | dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n"); | 987 | * The difference between val and sync_idx is bounded by the maximum size of |
988 | * a queue at 2^20 entries, so 32 bits is plenty for wrap-safe arithmetic. | ||
989 | */ | ||
990 | static int __arm_smmu_sync_poll_msi(struct arm_smmu_device *smmu, u32 sync_idx) | ||
991 | { | ||
992 | ktime_t timeout; | ||
993 | u32 val; | ||
994 | |||
995 | timeout = ktime_add_us(ktime_get(), ARM_SMMU_CMDQ_SYNC_TIMEOUT_US); | ||
996 | val = smp_cond_load_acquire(&smmu->sync_count, | ||
997 | (int)(VAL - sync_idx) >= 0 || | ||
998 | !ktime_before(ktime_get(), timeout)); | ||
999 | |||
1000 | return (int)(val - sync_idx) < 0 ? -ETIMEDOUT : 0; | ||
1001 | } | ||
1002 | |||
1003 | static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu) | ||
1004 | { | ||
1005 | u64 cmd[CMDQ_ENT_DWORDS]; | ||
1006 | unsigned long flags; | ||
1007 | struct arm_smmu_cmdq_ent ent = { | ||
1008 | .opcode = CMDQ_OP_CMD_SYNC, | ||
1009 | .sync = { | ||
1010 | .msidata = atomic_inc_return_relaxed(&smmu->sync_nr), | ||
1011 | .msiaddr = virt_to_phys(&smmu->sync_count), | ||
1012 | }, | ||
1013 | }; | ||
1014 | |||
1015 | arm_smmu_cmdq_build_cmd(cmd, &ent); | ||
1016 | |||
1017 | spin_lock_irqsave(&smmu->cmdq.lock, flags); | ||
1018 | arm_smmu_cmdq_insert_cmd(smmu, cmd); | ||
961 | spin_unlock_irqrestore(&smmu->cmdq.lock, flags); | 1019 | spin_unlock_irqrestore(&smmu->cmdq.lock, flags); |
1020 | |||
1021 | return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata); | ||
1022 | } | ||
1023 | |||
1024 | static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) | ||
1025 | { | ||
1026 | u64 cmd[CMDQ_ENT_DWORDS]; | ||
1027 | unsigned long flags; | ||
1028 | bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); | ||
1029 | struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC }; | ||
1030 | int ret; | ||
1031 | |||
1032 | arm_smmu_cmdq_build_cmd(cmd, &ent); | ||
1033 | |||
1034 | spin_lock_irqsave(&smmu->cmdq.lock, flags); | ||
1035 | arm_smmu_cmdq_insert_cmd(smmu, cmd); | ||
1036 | ret = queue_poll_cons(&smmu->cmdq.q, true, wfe); | ||
1037 | spin_unlock_irqrestore(&smmu->cmdq.lock, flags); | ||
1038 | |||
1039 | return ret; | ||
1040 | } | ||
1041 | |||
1042 | static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) | ||
1043 | { | ||
1044 | int ret; | ||
1045 | bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) && | ||
1046 | (smmu->features & ARM_SMMU_FEAT_COHERENCY); | ||
1047 | |||
1048 | ret = msi ? __arm_smmu_cmdq_issue_sync_msi(smmu) | ||
1049 | : __arm_smmu_cmdq_issue_sync(smmu); | ||
1050 | if (ret) | ||
1051 | dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n"); | ||
962 | } | 1052 | } |
963 | 1053 | ||
964 | /* Context descriptor manipulation functions */ | 1054 | /* Context descriptor manipulation functions */ |
@@ -996,6 +1086,11 @@ static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu, | |||
996 | CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE | | 1086 | CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE | |
997 | CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT | | 1087 | CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT | |
998 | CTXDESC_CD_0_V; | 1088 | CTXDESC_CD_0_V; |
1089 | |||
1090 | /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */ | ||
1091 | if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE) | ||
1092 | val |= CTXDESC_CD_0_S; | ||
1093 | |||
999 | cfg->cdptr[0] = cpu_to_le64(val); | 1094 | cfg->cdptr[0] = cpu_to_le64(val); |
1000 | 1095 | ||
1001 | val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT; | 1096 | val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT; |
@@ -1029,8 +1124,7 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid) | |||
1029 | }; | 1124 | }; |
1030 | 1125 | ||
1031 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); | 1126 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); |
1032 | cmd.opcode = CMDQ_OP_CMD_SYNC; | 1127 | arm_smmu_cmdq_issue_sync(smmu); |
1033 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); | ||
1034 | } | 1128 | } |
1035 | 1129 | ||
1036 | static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, | 1130 | static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, |
@@ -1094,7 +1188,11 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, | |||
1094 | dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING | 1188 | dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING |
1095 | << STRTAB_STE_1_SHCFG_SHIFT); | 1189 | << STRTAB_STE_1_SHCFG_SHIFT); |
1096 | dst[2] = 0; /* Nuke the VMID */ | 1190 | dst[2] = 0; /* Nuke the VMID */ |
1097 | if (ste_live) | 1191 | /* |
1192 | * The SMMU can perform negative caching, so we must sync | ||
1193 | * the STE regardless of whether the old value was live. | ||
1194 | */ | ||
1195 | if (smmu) | ||
1098 | arm_smmu_sync_ste_for_sid(smmu, sid); | 1196 | arm_smmu_sync_ste_for_sid(smmu, sid); |
1099 | return; | 1197 | return; |
1100 | } | 1198 | } |
@@ -1112,7 +1210,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, | |||
1112 | #endif | 1210 | #endif |
1113 | STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT); | 1211 | STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT); |
1114 | 1212 | ||
1115 | if (smmu->features & ARM_SMMU_FEAT_STALLS) | 1213 | if (smmu->features & ARM_SMMU_FEAT_STALLS && |
1214 | !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE)) | ||
1116 | dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); | 1215 | dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); |
1117 | 1216 | ||
1118 | val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK | 1217 | val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK |
@@ -1275,12 +1374,6 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) | |||
1275 | return IRQ_HANDLED; | 1374 | return IRQ_HANDLED; |
1276 | } | 1375 | } |
1277 | 1376 | ||
1278 | static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev) | ||
1279 | { | ||
1280 | /* We don't actually use CMD_SYNC interrupts for anything */ | ||
1281 | return IRQ_HANDLED; | ||
1282 | } | ||
1283 | |||
1284 | static int arm_smmu_device_disable(struct arm_smmu_device *smmu); | 1377 | static int arm_smmu_device_disable(struct arm_smmu_device *smmu); |
1285 | 1378 | ||
1286 | static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev) | 1379 | static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev) |
@@ -1313,10 +1406,8 @@ static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev) | |||
1313 | if (active & GERROR_MSI_EVTQ_ABT_ERR) | 1406 | if (active & GERROR_MSI_EVTQ_ABT_ERR) |
1314 | dev_warn(smmu->dev, "EVTQ MSI write aborted\n"); | 1407 | dev_warn(smmu->dev, "EVTQ MSI write aborted\n"); |
1315 | 1408 | ||
1316 | if (active & GERROR_MSI_CMDQ_ABT_ERR) { | 1409 | if (active & GERROR_MSI_CMDQ_ABT_ERR) |
1317 | dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); | 1410 | dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); |
1318 | arm_smmu_cmdq_sync_handler(irq, smmu->dev); | ||
1319 | } | ||
1320 | 1411 | ||
1321 | if (active & GERROR_PRIQ_ABT_ERR) | 1412 | if (active & GERROR_PRIQ_ABT_ERR) |
1322 | dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n"); | 1413 | dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n"); |
@@ -1345,17 +1436,13 @@ static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev) | |||
1345 | static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev) | 1436 | static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev) |
1346 | { | 1437 | { |
1347 | arm_smmu_gerror_handler(irq, dev); | 1438 | arm_smmu_gerror_handler(irq, dev); |
1348 | arm_smmu_cmdq_sync_handler(irq, dev); | ||
1349 | return IRQ_WAKE_THREAD; | 1439 | return IRQ_WAKE_THREAD; |
1350 | } | 1440 | } |
1351 | 1441 | ||
1352 | /* IO_PGTABLE API */ | 1442 | /* IO_PGTABLE API */ |
1353 | static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) | 1443 | static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) |
1354 | { | 1444 | { |
1355 | struct arm_smmu_cmdq_ent cmd; | 1445 | arm_smmu_cmdq_issue_sync(smmu); |
1356 | |||
1357 | cmd.opcode = CMDQ_OP_CMD_SYNC; | ||
1358 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); | ||
1359 | } | 1446 | } |
1360 | 1447 | ||
1361 | static void arm_smmu_tlb_sync(void *cookie) | 1448 | static void arm_smmu_tlb_sync(void *cookie) |
@@ -2157,6 +2244,7 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu) | |||
2157 | { | 2244 | { |
2158 | int ret; | 2245 | int ret; |
2159 | 2246 | ||
2247 | atomic_set(&smmu->sync_nr, 0); | ||
2160 | ret = arm_smmu_init_queues(smmu); | 2248 | ret = arm_smmu_init_queues(smmu); |
2161 | if (ret) | 2249 | if (ret) |
2162 | return ret; | 2250 | return ret; |
@@ -2275,15 +2363,6 @@ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) | |||
2275 | dev_warn(smmu->dev, "failed to enable evtq irq\n"); | 2363 | dev_warn(smmu->dev, "failed to enable evtq irq\n"); |
2276 | } | 2364 | } |
2277 | 2365 | ||
2278 | irq = smmu->cmdq.q.irq; | ||
2279 | if (irq) { | ||
2280 | ret = devm_request_irq(smmu->dev, irq, | ||
2281 | arm_smmu_cmdq_sync_handler, 0, | ||
2282 | "arm-smmu-v3-cmdq-sync", smmu); | ||
2283 | if (ret < 0) | ||
2284 | dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n"); | ||
2285 | } | ||
2286 | |||
2287 | irq = smmu->gerr_irq; | 2366 | irq = smmu->gerr_irq; |
2288 | if (irq) { | 2367 | if (irq) { |
2289 | ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler, | 2368 | ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler, |
@@ -2409,8 +2488,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) | |||
2409 | /* Invalidate any cached configuration */ | 2488 | /* Invalidate any cached configuration */ |
2410 | cmd.opcode = CMDQ_OP_CFGI_ALL; | 2489 | cmd.opcode = CMDQ_OP_CFGI_ALL; |
2411 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); | 2490 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); |
2412 | cmd.opcode = CMDQ_OP_CMD_SYNC; | 2491 | arm_smmu_cmdq_issue_sync(smmu); |
2413 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); | ||
2414 | 2492 | ||
2415 | /* Invalidate any stale TLB entries */ | 2493 | /* Invalidate any stale TLB entries */ |
2416 | if (smmu->features & ARM_SMMU_FEAT_HYP) { | 2494 | if (smmu->features & ARM_SMMU_FEAT_HYP) { |
@@ -2420,8 +2498,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) | |||
2420 | 2498 | ||
2421 | cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL; | 2499 | cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL; |
2422 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); | 2500 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); |
2423 | cmd.opcode = CMDQ_OP_CMD_SYNC; | 2501 | arm_smmu_cmdq_issue_sync(smmu); |
2424 | arm_smmu_cmdq_issue_cmd(smmu, &cmd); | ||
2425 | 2502 | ||
2426 | /* Event queue */ | 2503 | /* Event queue */ |
2427 | writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); | 2504 | writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); |
@@ -2542,13 +2619,14 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) | |||
2542 | * register, but warn on mismatch. | 2619 | * register, but warn on mismatch. |
2543 | */ | 2620 | */ |
2544 | if (!!(reg & IDR0_COHACC) != coherent) | 2621 | if (!!(reg & IDR0_COHACC) != coherent) |
2545 | dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n", | 2622 | dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n", |
2546 | coherent ? "true" : "false"); | 2623 | coherent ? "true" : "false"); |
2547 | 2624 | ||
2548 | switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) { | 2625 | switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) { |
2549 | case IDR0_STALL_MODEL_STALL: | ||
2550 | /* Fallthrough */ | ||
2551 | case IDR0_STALL_MODEL_FORCE: | 2626 | case IDR0_STALL_MODEL_FORCE: |
2627 | smmu->features |= ARM_SMMU_FEAT_STALL_FORCE; | ||
2628 | /* Fallthrough */ | ||
2629 | case IDR0_STALL_MODEL_STALL: | ||
2552 | smmu->features |= ARM_SMMU_FEAT_STALLS; | 2630 | smmu->features |= ARM_SMMU_FEAT_STALLS; |
2553 | } | 2631 | } |
2554 | 2632 | ||
@@ -2675,7 +2753,7 @@ static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu) | |||
2675 | case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX: | 2753 | case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX: |
2676 | smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY; | 2754 | smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY; |
2677 | break; | 2755 | break; |
2678 | case ACPI_IORT_SMMU_HISILICON_HI161X: | 2756 | case ACPI_IORT_SMMU_V3_HISILICON_HI161X: |
2679 | smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH; | 2757 | smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH; |
2680 | break; | 2758 | break; |
2681 | } | 2759 | } |
@@ -2793,10 +2871,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev) | |||
2793 | if (irq > 0) | 2871 | if (irq > 0) |
2794 | smmu->priq.q.irq = irq; | 2872 | smmu->priq.q.irq = irq; |
2795 | 2873 | ||
2796 | irq = platform_get_irq_byname(pdev, "cmdq-sync"); | ||
2797 | if (irq > 0) | ||
2798 | smmu->cmdq.q.irq = irq; | ||
2799 | |||
2800 | irq = platform_get_irq_byname(pdev, "gerror"); | 2874 | irq = platform_get_irq_byname(pdev, "gerror"); |
2801 | if (irq > 0) | 2875 | if (irq > 0) |
2802 | smmu->gerr_irq = irq; | 2876 | smmu->gerr_irq = irq; |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e4a82d70d446..78d4c6b8f1ba 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -59,6 +59,7 @@ | |||
59 | #define ARM_MMU500_ACTLR_CPRE (1 << 1) | 59 | #define ARM_MMU500_ACTLR_CPRE (1 << 1) |
60 | 60 | ||
61 | #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26) | 61 | #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26) |
62 | #define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10) | ||
62 | #define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8) | 63 | #define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8) |
63 | 64 | ||
64 | #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ | 65 | #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ |
@@ -119,14 +120,6 @@ enum arm_smmu_implementation { | |||
119 | CAVIUM_SMMUV2, | 120 | CAVIUM_SMMUV2, |
120 | }; | 121 | }; |
121 | 122 | ||
122 | /* Until ACPICA headers cover IORT rev. C */ | ||
123 | #ifndef ACPI_IORT_SMMU_CORELINK_MMU401 | ||
124 | #define ACPI_IORT_SMMU_CORELINK_MMU401 0x4 | ||
125 | #endif | ||
126 | #ifndef ACPI_IORT_SMMU_CAVIUM_THUNDERX | ||
127 | #define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x5 | ||
128 | #endif | ||
129 | |||
130 | struct arm_smmu_s2cr { | 123 | struct arm_smmu_s2cr { |
131 | struct iommu_group *group; | 124 | struct iommu_group *group; |
132 | int count; | 125 | int count; |
@@ -1616,7 +1609,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
1616 | * Allow unmatched Stream IDs to allocate bypass | 1609 | * Allow unmatched Stream IDs to allocate bypass |
1617 | * TLB entries for reduced latency. | 1610 | * TLB entries for reduced latency. |
1618 | */ | 1611 | */ |
1619 | reg |= ARM_MMU500_ACR_SMTNMB_TLBEN; | 1612 | reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN; |
1620 | writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR); | 1613 | writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR); |
1621 | } | 1614 | } |
1622 | 1615 | ||
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 1ea7cd537873..9a7ffd13c7f0 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
@@ -1679,7 +1679,8 @@ irqreturn_t dmar_fault(int irq, void *dev_id) | |||
1679 | raw_spin_lock_irqsave(&iommu->register_lock, flag); | 1679 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
1680 | } | 1680 | } |
1681 | 1681 | ||
1682 | writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG); | 1682 | writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO, |
1683 | iommu->reg + DMAR_FSTS_REG); | ||
1683 | 1684 | ||
1684 | unlock_exit: | 1685 | unlock_exit: |
1685 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); | 1686 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index f6697e55c2d4..ed1cf7c5a43b 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c | |||
@@ -292,7 +292,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ | |||
292 | int pasid_max; | 292 | int pasid_max; |
293 | int ret; | 293 | int ret; |
294 | 294 | ||
295 | if (WARN_ON(!iommu)) | 295 | if (WARN_ON(!iommu || !iommu->pasid_table)) |
296 | return -EINVAL; | 296 | return -EINVAL; |
297 | 297 | ||
298 | if (dev_is_pci(dev)) { | 298 | if (dev_is_pci(dev)) { |
@@ -458,6 +458,8 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) | |||
458 | kfree_rcu(sdev, rcu); | 458 | kfree_rcu(sdev, rcu); |
459 | 459 | ||
460 | if (list_empty(&svm->devs)) { | 460 | if (list_empty(&svm->devs)) { |
461 | svm->iommu->pasid_table[svm->pasid].val = 0; | ||
462 | wmb(); | ||
461 | 463 | ||
462 | idr_remove(&svm->iommu->pasid_idr, svm->pasid); | 464 | idr_remove(&svm->iommu->pasid_idr, svm->pasid); |
463 | if (svm->mm) | 465 | if (svm->mm) |
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index af8140054273..8dce3a9de9d8 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c | |||
@@ -19,30 +19,49 @@ | |||
19 | #include <linux/iommu.h> | 19 | #include <linux/iommu.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/of.h> | 21 | #include <linux/of.h> |
22 | #include <linux/of_device.h> | ||
23 | #include <linux/of_iommu.h> | ||
22 | #include <linux/of_platform.h> | 24 | #include <linux/of_platform.h> |
23 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
24 | #include <linux/sizes.h> | 26 | #include <linux/sizes.h> |
25 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/sys_soc.h> | ||
26 | 29 | ||
27 | #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) | 30 | #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) |
28 | #include <asm/dma-iommu.h> | 31 | #include <asm/dma-iommu.h> |
29 | #include <asm/pgalloc.h> | 32 | #include <asm/pgalloc.h> |
33 | #else | ||
34 | #define arm_iommu_create_mapping(...) NULL | ||
35 | #define arm_iommu_attach_device(...) -ENODEV | ||
36 | #define arm_iommu_release_mapping(...) do {} while (0) | ||
37 | #define arm_iommu_detach_device(...) do {} while (0) | ||
30 | #endif | 38 | #endif |
31 | 39 | ||
32 | #include "io-pgtable.h" | 40 | #include "io-pgtable.h" |
33 | 41 | ||
34 | #define IPMMU_CTX_MAX 1 | 42 | #define IPMMU_CTX_MAX 8 |
43 | |||
44 | struct ipmmu_features { | ||
45 | bool use_ns_alias_offset; | ||
46 | bool has_cache_leaf_nodes; | ||
47 | unsigned int number_of_contexts; | ||
48 | bool setup_imbuscr; | ||
49 | bool twobit_imttbcr_sl0; | ||
50 | }; | ||
35 | 51 | ||
36 | struct ipmmu_vmsa_device { | 52 | struct ipmmu_vmsa_device { |
37 | struct device *dev; | 53 | struct device *dev; |
38 | void __iomem *base; | 54 | void __iomem *base; |
39 | struct iommu_device iommu; | 55 | struct iommu_device iommu; |
40 | 56 | struct ipmmu_vmsa_device *root; | |
57 | const struct ipmmu_features *features; | ||
41 | unsigned int num_utlbs; | 58 | unsigned int num_utlbs; |
59 | unsigned int num_ctx; | ||
42 | spinlock_t lock; /* Protects ctx and domains[] */ | 60 | spinlock_t lock; /* Protects ctx and domains[] */ |
43 | DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); | 61 | DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); |
44 | struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; | 62 | struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; |
45 | 63 | ||
64 | struct iommu_group *group; | ||
46 | struct dma_iommu_mapping *mapping; | 65 | struct dma_iommu_mapping *mapping; |
47 | }; | 66 | }; |
48 | 67 | ||
@@ -57,18 +76,12 @@ struct ipmmu_vmsa_domain { | |||
57 | spinlock_t lock; /* Protects mappings */ | 76 | spinlock_t lock; /* Protects mappings */ |
58 | }; | 77 | }; |
59 | 78 | ||
60 | struct ipmmu_vmsa_iommu_priv { | ||
61 | struct ipmmu_vmsa_device *mmu; | ||
62 | struct device *dev; | ||
63 | struct list_head list; | ||
64 | }; | ||
65 | |||
66 | static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) | 79 | static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) |
67 | { | 80 | { |
68 | return container_of(dom, struct ipmmu_vmsa_domain, io_domain); | 81 | return container_of(dom, struct ipmmu_vmsa_domain, io_domain); |
69 | } | 82 | } |
70 | 83 | ||
71 | static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev) | 84 | static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) |
72 | { | 85 | { |
73 | return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL; | 86 | return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL; |
74 | } | 87 | } |
@@ -133,6 +146,10 @@ static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev) | |||
133 | #define IMTTBCR_TSZ0_MASK (7 << 0) | 146 | #define IMTTBCR_TSZ0_MASK (7 << 0) |
134 | #define IMTTBCR_TSZ0_SHIFT O | 147 | #define IMTTBCR_TSZ0_SHIFT O |
135 | 148 | ||
149 | #define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6) | ||
150 | #define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6) | ||
151 | #define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) | ||
152 | |||
136 | #define IMBUSCR 0x000c | 153 | #define IMBUSCR 0x000c |
137 | #define IMBUSCR_DVM (1 << 2) | 154 | #define IMBUSCR_DVM (1 << 2) |
138 | #define IMBUSCR_BUSSEL_SYS (0 << 0) | 155 | #define IMBUSCR_BUSSEL_SYS (0 << 0) |
@@ -194,6 +211,36 @@ static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev) | |||
194 | #define IMUASID_ASID0_SHIFT 0 | 211 | #define IMUASID_ASID0_SHIFT 0 |
195 | 212 | ||
196 | /* ----------------------------------------------------------------------------- | 213 | /* ----------------------------------------------------------------------------- |
214 | * Root device handling | ||
215 | */ | ||
216 | |||
217 | static struct platform_driver ipmmu_driver; | ||
218 | |||
219 | static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) | ||
220 | { | ||
221 | return mmu->root == mmu; | ||
222 | } | ||
223 | |||
224 | static int __ipmmu_check_device(struct device *dev, void *data) | ||
225 | { | ||
226 | struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); | ||
227 | struct ipmmu_vmsa_device **rootp = data; | ||
228 | |||
229 | if (ipmmu_is_root(mmu)) | ||
230 | *rootp = mmu; | ||
231 | |||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | static struct ipmmu_vmsa_device *ipmmu_find_root(void) | ||
236 | { | ||
237 | struct ipmmu_vmsa_device *root = NULL; | ||
238 | |||
239 | return driver_for_each_device(&ipmmu_driver.driver, NULL, &root, | ||
240 | __ipmmu_check_device) == 0 ? root : NULL; | ||
241 | } | ||
242 | |||
243 | /* ----------------------------------------------------------------------------- | ||
197 | * Read/Write Access | 244 | * Read/Write Access |
198 | */ | 245 | */ |
199 | 246 | ||
@@ -208,15 +255,29 @@ static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, | |||
208 | iowrite32(data, mmu->base + offset); | 255 | iowrite32(data, mmu->base + offset); |
209 | } | 256 | } |
210 | 257 | ||
211 | static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg) | 258 | static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain, |
259 | unsigned int reg) | ||
212 | { | 260 | { |
213 | return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); | 261 | return ipmmu_read(domain->mmu->root, |
262 | domain->context_id * IM_CTX_SIZE + reg); | ||
214 | } | 263 | } |
215 | 264 | ||
216 | static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg, | 265 | static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain, |
217 | u32 data) | 266 | unsigned int reg, u32 data) |
218 | { | 267 | { |
219 | ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); | 268 | ipmmu_write(domain->mmu->root, |
269 | domain->context_id * IM_CTX_SIZE + reg, data); | ||
270 | } | ||
271 | |||
272 | static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, | ||
273 | unsigned int reg, u32 data) | ||
274 | { | ||
275 | if (domain->mmu != domain->mmu->root) | ||
276 | ipmmu_write(domain->mmu, | ||
277 | domain->context_id * IM_CTX_SIZE + reg, data); | ||
278 | |||
279 | ipmmu_write(domain->mmu->root, | ||
280 | domain->context_id * IM_CTX_SIZE + reg, data); | ||
220 | } | 281 | } |
221 | 282 | ||
222 | /* ----------------------------------------------------------------------------- | 283 | /* ----------------------------------------------------------------------------- |
@@ -228,7 +289,7 @@ static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) | |||
228 | { | 289 | { |
229 | unsigned int count = 0; | 290 | unsigned int count = 0; |
230 | 291 | ||
231 | while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) { | 292 | while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) { |
232 | cpu_relax(); | 293 | cpu_relax(); |
233 | if (++count == TLB_LOOP_TIMEOUT) { | 294 | if (++count == TLB_LOOP_TIMEOUT) { |
234 | dev_err_ratelimited(domain->mmu->dev, | 295 | dev_err_ratelimited(domain->mmu->dev, |
@@ -243,9 +304,9 @@ static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) | |||
243 | { | 304 | { |
244 | u32 reg; | 305 | u32 reg; |
245 | 306 | ||
246 | reg = ipmmu_ctx_read(domain, IMCTR); | 307 | reg = ipmmu_ctx_read_root(domain, IMCTR); |
247 | reg |= IMCTR_FLUSH; | 308 | reg |= IMCTR_FLUSH; |
248 | ipmmu_ctx_write(domain, IMCTR, reg); | 309 | ipmmu_ctx_write_all(domain, IMCTR, reg); |
249 | 310 | ||
250 | ipmmu_tlb_sync(domain); | 311 | ipmmu_tlb_sync(domain); |
251 | } | 312 | } |
@@ -313,11 +374,12 @@ static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, | |||
313 | 374 | ||
314 | spin_lock_irqsave(&mmu->lock, flags); | 375 | spin_lock_irqsave(&mmu->lock, flags); |
315 | 376 | ||
316 | ret = find_first_zero_bit(mmu->ctx, IPMMU_CTX_MAX); | 377 | ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx); |
317 | if (ret != IPMMU_CTX_MAX) { | 378 | if (ret != mmu->num_ctx) { |
318 | mmu->domains[ret] = domain; | 379 | mmu->domains[ret] = domain; |
319 | set_bit(ret, mmu->ctx); | 380 | set_bit(ret, mmu->ctx); |
320 | } | 381 | } else |
382 | ret = -EBUSY; | ||
321 | 383 | ||
322 | spin_unlock_irqrestore(&mmu->lock, flags); | 384 | spin_unlock_irqrestore(&mmu->lock, flags); |
323 | 385 | ||
@@ -340,6 +402,7 @@ static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, | |||
340 | static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) | 402 | static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) |
341 | { | 403 | { |
342 | u64 ttbr; | 404 | u64 ttbr; |
405 | u32 tmp; | ||
343 | int ret; | 406 | int ret; |
344 | 407 | ||
345 | /* | 408 | /* |
@@ -364,51 +427,59 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) | |||
364 | * TODO: Add support for coherent walk through CCI with DVM and remove | 427 | * TODO: Add support for coherent walk through CCI with DVM and remove |
365 | * cache handling. For now, delegate it to the io-pgtable code. | 428 | * cache handling. For now, delegate it to the io-pgtable code. |
366 | */ | 429 | */ |
367 | domain->cfg.iommu_dev = domain->mmu->dev; | 430 | domain->cfg.iommu_dev = domain->mmu->root->dev; |
368 | 431 | ||
369 | /* | 432 | /* |
370 | * Find an unused context. | 433 | * Find an unused context. |
371 | */ | 434 | */ |
372 | ret = ipmmu_domain_allocate_context(domain->mmu, domain); | 435 | ret = ipmmu_domain_allocate_context(domain->mmu->root, domain); |
373 | if (ret == IPMMU_CTX_MAX) | 436 | if (ret < 0) |
374 | return -EBUSY; | 437 | return ret; |
375 | 438 | ||
376 | domain->context_id = ret; | 439 | domain->context_id = ret; |
377 | 440 | ||
378 | domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, | 441 | domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, |
379 | domain); | 442 | domain); |
380 | if (!domain->iop) { | 443 | if (!domain->iop) { |
381 | ipmmu_domain_free_context(domain->mmu, domain->context_id); | 444 | ipmmu_domain_free_context(domain->mmu->root, |
445 | domain->context_id); | ||
382 | return -EINVAL; | 446 | return -EINVAL; |
383 | } | 447 | } |
384 | 448 | ||
385 | /* TTBR0 */ | 449 | /* TTBR0 */ |
386 | ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; | 450 | ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; |
387 | ipmmu_ctx_write(domain, IMTTLBR0, ttbr); | 451 | ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr); |
388 | ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); | 452 | ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32); |
389 | 453 | ||
390 | /* | 454 | /* |
391 | * TTBCR | 455 | * TTBCR |
392 | * We use long descriptors with inner-shareable WBWA tables and allocate | 456 | * We use long descriptors with inner-shareable WBWA tables and allocate |
393 | * the whole 32-bit VA space to TTBR0. | 457 | * the whole 32-bit VA space to TTBR0. |
394 | */ | 458 | */ |
395 | ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE | | 459 | if (domain->mmu->features->twobit_imttbcr_sl0) |
396 | IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | | 460 | tmp = IMTTBCR_SL0_TWOBIT_LVL_1; |
397 | IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1); | 461 | else |
462 | tmp = IMTTBCR_SL0_LVL_1; | ||
463 | |||
464 | ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | | ||
465 | IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | | ||
466 | IMTTBCR_IRGN0_WB_WA | tmp); | ||
398 | 467 | ||
399 | /* MAIR0 */ | 468 | /* MAIR0 */ |
400 | ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]); | 469 | ipmmu_ctx_write_root(domain, IMMAIR0, |
470 | domain->cfg.arm_lpae_s1_cfg.mair[0]); | ||
401 | 471 | ||
402 | /* IMBUSCR */ | 472 | /* IMBUSCR */ |
403 | ipmmu_ctx_write(domain, IMBUSCR, | 473 | if (domain->mmu->features->setup_imbuscr) |
404 | ipmmu_ctx_read(domain, IMBUSCR) & | 474 | ipmmu_ctx_write_root(domain, IMBUSCR, |
405 | ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); | 475 | ipmmu_ctx_read_root(domain, IMBUSCR) & |
476 | ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); | ||
406 | 477 | ||
407 | /* | 478 | /* |
408 | * IMSTR | 479 | * IMSTR |
409 | * Clear all interrupt flags. | 480 | * Clear all interrupt flags. |
410 | */ | 481 | */ |
411 | ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR)); | 482 | ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR)); |
412 | 483 | ||
413 | /* | 484 | /* |
414 | * IMCTR | 485 | * IMCTR |
@@ -417,7 +488,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) | |||
417 | * software management as we have no use for it. Flush the TLB as | 488 | * software management as we have no use for it. Flush the TLB as |
418 | * required when modifying the context registers. | 489 | * required when modifying the context registers. |
419 | */ | 490 | */ |
420 | ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); | 491 | ipmmu_ctx_write_all(domain, IMCTR, |
492 | IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); | ||
421 | 493 | ||
422 | return 0; | 494 | return 0; |
423 | } | 495 | } |
@@ -430,9 +502,9 @@ static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) | |||
430 | * | 502 | * |
431 | * TODO: Is TLB flush really needed ? | 503 | * TODO: Is TLB flush really needed ? |
432 | */ | 504 | */ |
433 | ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH); | 505 | ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH); |
434 | ipmmu_tlb_sync(domain); | 506 | ipmmu_tlb_sync(domain); |
435 | ipmmu_domain_free_context(domain->mmu, domain->context_id); | 507 | ipmmu_domain_free_context(domain->mmu->root, domain->context_id); |
436 | } | 508 | } |
437 | 509 | ||
438 | /* ----------------------------------------------------------------------------- | 510 | /* ----------------------------------------------------------------------------- |
@@ -446,11 +518,11 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) | |||
446 | u32 status; | 518 | u32 status; |
447 | u32 iova; | 519 | u32 iova; |
448 | 520 | ||
449 | status = ipmmu_ctx_read(domain, IMSTR); | 521 | status = ipmmu_ctx_read_root(domain, IMSTR); |
450 | if (!(status & err_mask)) | 522 | if (!(status & err_mask)) |
451 | return IRQ_NONE; | 523 | return IRQ_NONE; |
452 | 524 | ||
453 | iova = ipmmu_ctx_read(domain, IMEAR); | 525 | iova = ipmmu_ctx_read_root(domain, IMEAR); |
454 | 526 | ||
455 | /* | 527 | /* |
456 | * Clear the error status flags. Unlike traditional interrupt flag | 528 | * Clear the error status flags. Unlike traditional interrupt flag |
@@ -458,7 +530,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) | |||
458 | * seems to require 0. The error address register must be read before, | 530 | * seems to require 0. The error address register must be read before, |
459 | * otherwise its value will be 0. | 531 | * otherwise its value will be 0. |
460 | */ | 532 | */ |
461 | ipmmu_ctx_write(domain, IMSTR, 0); | 533 | ipmmu_ctx_write_root(domain, IMSTR, 0); |
462 | 534 | ||
463 | /* Log fatal errors. */ | 535 | /* Log fatal errors. */ |
464 | if (status & IMSTR_MHIT) | 536 | if (status & IMSTR_MHIT) |
@@ -499,7 +571,7 @@ static irqreturn_t ipmmu_irq(int irq, void *dev) | |||
499 | /* | 571 | /* |
500 | * Check interrupts for all active contexts. | 572 | * Check interrupts for all active contexts. |
501 | */ | 573 | */ |
502 | for (i = 0; i < IPMMU_CTX_MAX; i++) { | 574 | for (i = 0; i < mmu->num_ctx; i++) { |
503 | if (!mmu->domains[i]) | 575 | if (!mmu->domains[i]) |
504 | continue; | 576 | continue; |
505 | if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) | 577 | if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) |
@@ -528,6 +600,27 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type) | |||
528 | return &domain->io_domain; | 600 | return &domain->io_domain; |
529 | } | 601 | } |
530 | 602 | ||
603 | static struct iommu_domain *ipmmu_domain_alloc(unsigned type) | ||
604 | { | ||
605 | struct iommu_domain *io_domain = NULL; | ||
606 | |||
607 | switch (type) { | ||
608 | case IOMMU_DOMAIN_UNMANAGED: | ||
609 | io_domain = __ipmmu_domain_alloc(type); | ||
610 | break; | ||
611 | |||
612 | case IOMMU_DOMAIN_DMA: | ||
613 | io_domain = __ipmmu_domain_alloc(type); | ||
614 | if (io_domain && iommu_get_dma_cookie(io_domain)) { | ||
615 | kfree(io_domain); | ||
616 | io_domain = NULL; | ||
617 | } | ||
618 | break; | ||
619 | } | ||
620 | |||
621 | return io_domain; | ||
622 | } | ||
623 | |||
531 | static void ipmmu_domain_free(struct iommu_domain *io_domain) | 624 | static void ipmmu_domain_free(struct iommu_domain *io_domain) |
532 | { | 625 | { |
533 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); | 626 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
@@ -536,6 +629,7 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain) | |||
536 | * Free the domain resources. We assume that all devices have already | 629 | * Free the domain resources. We assume that all devices have already |
537 | * been detached. | 630 | * been detached. |
538 | */ | 631 | */ |
632 | iommu_put_dma_cookie(io_domain); | ||
539 | ipmmu_domain_destroy_context(domain); | 633 | ipmmu_domain_destroy_context(domain); |
540 | free_io_pgtable_ops(domain->iop); | 634 | free_io_pgtable_ops(domain->iop); |
541 | kfree(domain); | 635 | kfree(domain); |
@@ -544,15 +638,14 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain) | |||
544 | static int ipmmu_attach_device(struct iommu_domain *io_domain, | 638 | static int ipmmu_attach_device(struct iommu_domain *io_domain, |
545 | struct device *dev) | 639 | struct device *dev) |
546 | { | 640 | { |
547 | struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); | ||
548 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; | 641 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
549 | struct ipmmu_vmsa_device *mmu = priv->mmu; | 642 | struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); |
550 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); | 643 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
551 | unsigned long flags; | 644 | unsigned long flags; |
552 | unsigned int i; | 645 | unsigned int i; |
553 | int ret = 0; | 646 | int ret = 0; |
554 | 647 | ||
555 | if (!priv || !priv->mmu) { | 648 | if (!mmu) { |
556 | dev_err(dev, "Cannot attach to IPMMU\n"); | 649 | dev_err(dev, "Cannot attach to IPMMU\n"); |
557 | return -ENXIO; | 650 | return -ENXIO; |
558 | } | 651 | } |
@@ -563,6 +656,13 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, | |||
563 | /* The domain hasn't been used yet, initialize it. */ | 656 | /* The domain hasn't been used yet, initialize it. */ |
564 | domain->mmu = mmu; | 657 | domain->mmu = mmu; |
565 | ret = ipmmu_domain_init_context(domain); | 658 | ret = ipmmu_domain_init_context(domain); |
659 | if (ret < 0) { | ||
660 | dev_err(dev, "Unable to initialize IPMMU context\n"); | ||
661 | domain->mmu = NULL; | ||
662 | } else { | ||
663 | dev_info(dev, "Using IPMMU context %u\n", | ||
664 | domain->context_id); | ||
665 | } | ||
566 | } else if (domain->mmu != mmu) { | 666 | } else if (domain->mmu != mmu) { |
567 | /* | 667 | /* |
568 | * Something is wrong, we can't attach two devices using | 668 | * Something is wrong, we can't attach two devices using |
@@ -641,62 +741,53 @@ static int ipmmu_init_platform_device(struct device *dev, | |||
641 | struct of_phandle_args *args) | 741 | struct of_phandle_args *args) |
642 | { | 742 | { |
643 | struct platform_device *ipmmu_pdev; | 743 | struct platform_device *ipmmu_pdev; |
644 | struct ipmmu_vmsa_iommu_priv *priv; | ||
645 | 744 | ||
646 | ipmmu_pdev = of_find_device_by_node(args->np); | 745 | ipmmu_pdev = of_find_device_by_node(args->np); |
647 | if (!ipmmu_pdev) | 746 | if (!ipmmu_pdev) |
648 | return -ENODEV; | 747 | return -ENODEV; |
649 | 748 | ||
650 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 749 | dev->iommu_fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev); |
651 | if (!priv) | ||
652 | return -ENOMEM; | ||
653 | |||
654 | priv->mmu = platform_get_drvdata(ipmmu_pdev); | ||
655 | priv->dev = dev; | ||
656 | dev->iommu_fwspec->iommu_priv = priv; | ||
657 | return 0; | 750 | return 0; |
658 | } | 751 | } |
659 | 752 | ||
753 | static bool ipmmu_slave_whitelist(struct device *dev) | ||
754 | { | ||
755 | /* By default, do not allow use of IPMMU */ | ||
756 | return false; | ||
757 | } | ||
758 | |||
759 | static const struct soc_device_attribute soc_r8a7795[] = { | ||
760 | { .soc_id = "r8a7795", }, | ||
761 | { /* sentinel */ } | ||
762 | }; | ||
763 | |||
660 | static int ipmmu_of_xlate(struct device *dev, | 764 | static int ipmmu_of_xlate(struct device *dev, |
661 | struct of_phandle_args *spec) | 765 | struct of_phandle_args *spec) |
662 | { | 766 | { |
767 | /* For R-Car Gen3 use a white list to opt-in slave devices */ | ||
768 | if (soc_device_match(soc_r8a7795) && !ipmmu_slave_whitelist(dev)) | ||
769 | return -ENODEV; | ||
770 | |||
663 | iommu_fwspec_add_ids(dev, spec->args, 1); | 771 | iommu_fwspec_add_ids(dev, spec->args, 1); |
664 | 772 | ||
665 | /* Initialize once - xlate() will call multiple times */ | 773 | /* Initialize once - xlate() will call multiple times */ |
666 | if (to_priv(dev)) | 774 | if (to_ipmmu(dev)) |
667 | return 0; | 775 | return 0; |
668 | 776 | ||
669 | return ipmmu_init_platform_device(dev, spec); | 777 | return ipmmu_init_platform_device(dev, spec); |
670 | } | 778 | } |
671 | 779 | ||
672 | #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) | 780 | static int ipmmu_init_arm_mapping(struct device *dev) |
673 | |||
674 | static struct iommu_domain *ipmmu_domain_alloc(unsigned type) | ||
675 | { | ||
676 | if (type != IOMMU_DOMAIN_UNMANAGED) | ||
677 | return NULL; | ||
678 | |||
679 | return __ipmmu_domain_alloc(type); | ||
680 | } | ||
681 | |||
682 | static int ipmmu_add_device(struct device *dev) | ||
683 | { | 781 | { |
684 | struct ipmmu_vmsa_device *mmu = NULL; | 782 | struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); |
685 | struct iommu_group *group; | 783 | struct iommu_group *group; |
686 | int ret; | 784 | int ret; |
687 | 785 | ||
688 | /* | ||
689 | * Only let through devices that have been verified in xlate() | ||
690 | */ | ||
691 | if (!to_priv(dev)) | ||
692 | return -ENODEV; | ||
693 | |||
694 | /* Create a device group and add the device to it. */ | 786 | /* Create a device group and add the device to it. */ |
695 | group = iommu_group_alloc(); | 787 | group = iommu_group_alloc(); |
696 | if (IS_ERR(group)) { | 788 | if (IS_ERR(group)) { |
697 | dev_err(dev, "Failed to allocate IOMMU group\n"); | 789 | dev_err(dev, "Failed to allocate IOMMU group\n"); |
698 | ret = PTR_ERR(group); | 790 | return PTR_ERR(group); |
699 | goto error; | ||
700 | } | 791 | } |
701 | 792 | ||
702 | ret = iommu_group_add_device(group, dev); | 793 | ret = iommu_group_add_device(group, dev); |
@@ -704,8 +795,7 @@ static int ipmmu_add_device(struct device *dev) | |||
704 | 795 | ||
705 | if (ret < 0) { | 796 | if (ret < 0) { |
706 | dev_err(dev, "Failed to add device to IPMMU group\n"); | 797 | dev_err(dev, "Failed to add device to IPMMU group\n"); |
707 | group = NULL; | 798 | return ret; |
708 | goto error; | ||
709 | } | 799 | } |
710 | 800 | ||
711 | /* | 801 | /* |
@@ -717,7 +807,6 @@ static int ipmmu_add_device(struct device *dev) | |||
717 | * - Make the mapping size configurable ? We currently use a 2GB mapping | 807 | * - Make the mapping size configurable ? We currently use a 2GB mapping |
718 | * at a 1GB offset to ensure that NULL VAs will fault. | 808 | * at a 1GB offset to ensure that NULL VAs will fault. |
719 | */ | 809 | */ |
720 | mmu = to_priv(dev)->mmu; | ||
721 | if (!mmu->mapping) { | 810 | if (!mmu->mapping) { |
722 | struct dma_iommu_mapping *mapping; | 811 | struct dma_iommu_mapping *mapping; |
723 | 812 | ||
@@ -742,144 +831,58 @@ static int ipmmu_add_device(struct device *dev) | |||
742 | return 0; | 831 | return 0; |
743 | 832 | ||
744 | error: | 833 | error: |
745 | if (mmu) | 834 | iommu_group_remove_device(dev); |
835 | if (mmu->mapping) | ||
746 | arm_iommu_release_mapping(mmu->mapping); | 836 | arm_iommu_release_mapping(mmu->mapping); |
747 | 837 | ||
748 | if (!IS_ERR_OR_NULL(group)) | ||
749 | iommu_group_remove_device(dev); | ||
750 | |||
751 | return ret; | 838 | return ret; |
752 | } | 839 | } |
753 | 840 | ||
754 | static void ipmmu_remove_device(struct device *dev) | 841 | static int ipmmu_add_device(struct device *dev) |
755 | { | ||
756 | arm_iommu_detach_device(dev); | ||
757 | iommu_group_remove_device(dev); | ||
758 | } | ||
759 | |||
760 | static const struct iommu_ops ipmmu_ops = { | ||
761 | .domain_alloc = ipmmu_domain_alloc, | ||
762 | .domain_free = ipmmu_domain_free, | ||
763 | .attach_dev = ipmmu_attach_device, | ||
764 | .detach_dev = ipmmu_detach_device, | ||
765 | .map = ipmmu_map, | ||
766 | .unmap = ipmmu_unmap, | ||
767 | .map_sg = default_iommu_map_sg, | ||
768 | .iova_to_phys = ipmmu_iova_to_phys, | ||
769 | .add_device = ipmmu_add_device, | ||
770 | .remove_device = ipmmu_remove_device, | ||
771 | .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, | ||
772 | .of_xlate = ipmmu_of_xlate, | ||
773 | }; | ||
774 | |||
775 | #endif /* !CONFIG_ARM && CONFIG_IOMMU_DMA */ | ||
776 | |||
777 | #ifdef CONFIG_IOMMU_DMA | ||
778 | |||
779 | static DEFINE_SPINLOCK(ipmmu_slave_devices_lock); | ||
780 | static LIST_HEAD(ipmmu_slave_devices); | ||
781 | |||
782 | static struct iommu_domain *ipmmu_domain_alloc_dma(unsigned type) | ||
783 | { | ||
784 | struct iommu_domain *io_domain = NULL; | ||
785 | |||
786 | switch (type) { | ||
787 | case IOMMU_DOMAIN_UNMANAGED: | ||
788 | io_domain = __ipmmu_domain_alloc(type); | ||
789 | break; | ||
790 | |||
791 | case IOMMU_DOMAIN_DMA: | ||
792 | io_domain = __ipmmu_domain_alloc(type); | ||
793 | if (io_domain) | ||
794 | iommu_get_dma_cookie(io_domain); | ||
795 | break; | ||
796 | } | ||
797 | |||
798 | return io_domain; | ||
799 | } | ||
800 | |||
801 | static void ipmmu_domain_free_dma(struct iommu_domain *io_domain) | ||
802 | { | ||
803 | switch (io_domain->type) { | ||
804 | case IOMMU_DOMAIN_DMA: | ||
805 | iommu_put_dma_cookie(io_domain); | ||
806 | /* fall-through */ | ||
807 | default: | ||
808 | ipmmu_domain_free(io_domain); | ||
809 | break; | ||
810 | } | ||
811 | } | ||
812 | |||
813 | static int ipmmu_add_device_dma(struct device *dev) | ||
814 | { | 842 | { |
815 | struct iommu_group *group; | 843 | struct iommu_group *group; |
816 | 844 | ||
817 | /* | 845 | /* |
818 | * Only let through devices that have been verified in xlate() | 846 | * Only let through devices that have been verified in xlate() |
819 | */ | 847 | */ |
820 | if (!to_priv(dev)) | 848 | if (!to_ipmmu(dev)) |
821 | return -ENODEV; | 849 | return -ENODEV; |
822 | 850 | ||
851 | if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) | ||
852 | return ipmmu_init_arm_mapping(dev); | ||
853 | |||
823 | group = iommu_group_get_for_dev(dev); | 854 | group = iommu_group_get_for_dev(dev); |
824 | if (IS_ERR(group)) | 855 | if (IS_ERR(group)) |
825 | return PTR_ERR(group); | 856 | return PTR_ERR(group); |
826 | 857 | ||
827 | spin_lock(&ipmmu_slave_devices_lock); | 858 | iommu_group_put(group); |
828 | list_add(&to_priv(dev)->list, &ipmmu_slave_devices); | ||
829 | spin_unlock(&ipmmu_slave_devices_lock); | ||
830 | return 0; | 859 | return 0; |
831 | } | 860 | } |
832 | 861 | ||
833 | static void ipmmu_remove_device_dma(struct device *dev) | 862 | static void ipmmu_remove_device(struct device *dev) |
834 | { | 863 | { |
835 | struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); | 864 | arm_iommu_detach_device(dev); |
836 | |||
837 | spin_lock(&ipmmu_slave_devices_lock); | ||
838 | list_del(&priv->list); | ||
839 | spin_unlock(&ipmmu_slave_devices_lock); | ||
840 | |||
841 | iommu_group_remove_device(dev); | 865 | iommu_group_remove_device(dev); |
842 | } | 866 | } |
843 | 867 | ||
844 | static struct device *ipmmu_find_sibling_device(struct device *dev) | 868 | static struct iommu_group *ipmmu_find_group(struct device *dev) |
845 | { | ||
846 | struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); | ||
847 | struct ipmmu_vmsa_iommu_priv *sibling_priv = NULL; | ||
848 | bool found = false; | ||
849 | |||
850 | spin_lock(&ipmmu_slave_devices_lock); | ||
851 | |||
852 | list_for_each_entry(sibling_priv, &ipmmu_slave_devices, list) { | ||
853 | if (priv == sibling_priv) | ||
854 | continue; | ||
855 | if (sibling_priv->mmu == priv->mmu) { | ||
856 | found = true; | ||
857 | break; | ||
858 | } | ||
859 | } | ||
860 | |||
861 | spin_unlock(&ipmmu_slave_devices_lock); | ||
862 | |||
863 | return found ? sibling_priv->dev : NULL; | ||
864 | } | ||
865 | |||
866 | static struct iommu_group *ipmmu_find_group_dma(struct device *dev) | ||
867 | { | 869 | { |
870 | struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); | ||
868 | struct iommu_group *group; | 871 | struct iommu_group *group; |
869 | struct device *sibling; | ||
870 | 872 | ||
871 | sibling = ipmmu_find_sibling_device(dev); | 873 | if (mmu->group) |
872 | if (sibling) | 874 | return iommu_group_ref_get(mmu->group); |
873 | group = iommu_group_get(sibling); | 875 | |
874 | if (!sibling || IS_ERR(group)) | 876 | group = iommu_group_alloc(); |
875 | group = generic_device_group(dev); | 877 | if (!IS_ERR(group)) |
878 | mmu->group = group; | ||
876 | 879 | ||
877 | return group; | 880 | return group; |
878 | } | 881 | } |
879 | 882 | ||
880 | static const struct iommu_ops ipmmu_ops = { | 883 | static const struct iommu_ops ipmmu_ops = { |
881 | .domain_alloc = ipmmu_domain_alloc_dma, | 884 | .domain_alloc = ipmmu_domain_alloc, |
882 | .domain_free = ipmmu_domain_free_dma, | 885 | .domain_free = ipmmu_domain_free, |
883 | .attach_dev = ipmmu_attach_device, | 886 | .attach_dev = ipmmu_attach_device, |
884 | .detach_dev = ipmmu_detach_device, | 887 | .detach_dev = ipmmu_detach_device, |
885 | .map = ipmmu_map, | 888 | .map = ipmmu_map, |
@@ -888,15 +891,13 @@ static const struct iommu_ops ipmmu_ops = { | |||
888 | .iotlb_sync = ipmmu_iotlb_sync, | 891 | .iotlb_sync = ipmmu_iotlb_sync, |
889 | .map_sg = default_iommu_map_sg, | 892 | .map_sg = default_iommu_map_sg, |
890 | .iova_to_phys = ipmmu_iova_to_phys, | 893 | .iova_to_phys = ipmmu_iova_to_phys, |
891 | .add_device = ipmmu_add_device_dma, | 894 | .add_device = ipmmu_add_device, |
892 | .remove_device = ipmmu_remove_device_dma, | 895 | .remove_device = ipmmu_remove_device, |
893 | .device_group = ipmmu_find_group_dma, | 896 | .device_group = ipmmu_find_group, |
894 | .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, | 897 | .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, |
895 | .of_xlate = ipmmu_of_xlate, | 898 | .of_xlate = ipmmu_of_xlate, |
896 | }; | 899 | }; |
897 | 900 | ||
898 | #endif /* CONFIG_IOMMU_DMA */ | ||
899 | |||
900 | /* ----------------------------------------------------------------------------- | 901 | /* ----------------------------------------------------------------------------- |
901 | * Probe/remove and init | 902 | * Probe/remove and init |
902 | */ | 903 | */ |
@@ -906,10 +907,40 @@ static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) | |||
906 | unsigned int i; | 907 | unsigned int i; |
907 | 908 | ||
908 | /* Disable all contexts. */ | 909 | /* Disable all contexts. */ |
909 | for (i = 0; i < 4; ++i) | 910 | for (i = 0; i < mmu->num_ctx; ++i) |
910 | ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); | 911 | ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); |
911 | } | 912 | } |
912 | 913 | ||
914 | static const struct ipmmu_features ipmmu_features_default = { | ||
915 | .use_ns_alias_offset = true, | ||
916 | .has_cache_leaf_nodes = false, | ||
917 | .number_of_contexts = 1, /* software only tested with one context */ | ||
918 | .setup_imbuscr = true, | ||
919 | .twobit_imttbcr_sl0 = false, | ||
920 | }; | ||
921 | |||
922 | static const struct ipmmu_features ipmmu_features_r8a7795 = { | ||
923 | .use_ns_alias_offset = false, | ||
924 | .has_cache_leaf_nodes = true, | ||
925 | .number_of_contexts = 8, | ||
926 | .setup_imbuscr = false, | ||
927 | .twobit_imttbcr_sl0 = true, | ||
928 | }; | ||
929 | |||
930 | static const struct of_device_id ipmmu_of_ids[] = { | ||
931 | { | ||
932 | .compatible = "renesas,ipmmu-vmsa", | ||
933 | .data = &ipmmu_features_default, | ||
934 | }, { | ||
935 | .compatible = "renesas,ipmmu-r8a7795", | ||
936 | .data = &ipmmu_features_r8a7795, | ||
937 | }, { | ||
938 | /* Terminator */ | ||
939 | }, | ||
940 | }; | ||
941 | |||
942 | MODULE_DEVICE_TABLE(of, ipmmu_of_ids); | ||
943 | |||
913 | static int ipmmu_probe(struct platform_device *pdev) | 944 | static int ipmmu_probe(struct platform_device *pdev) |
914 | { | 945 | { |
915 | struct ipmmu_vmsa_device *mmu; | 946 | struct ipmmu_vmsa_device *mmu; |
@@ -927,6 +958,8 @@ static int ipmmu_probe(struct platform_device *pdev) | |||
927 | mmu->num_utlbs = 32; | 958 | mmu->num_utlbs = 32; |
928 | spin_lock_init(&mmu->lock); | 959 | spin_lock_init(&mmu->lock); |
929 | bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); | 960 | bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); |
961 | mmu->features = of_device_get_match_data(&pdev->dev); | ||
962 | dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); | ||
930 | 963 | ||
931 | /* Map I/O memory and request IRQ. */ | 964 | /* Map I/O memory and request IRQ. */ |
932 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 965 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -946,34 +979,71 @@ static int ipmmu_probe(struct platform_device *pdev) | |||
946 | * Offset the registers base unconditionally to point to the non-secure | 979 | * Offset the registers base unconditionally to point to the non-secure |
947 | * alias space for now. | 980 | * alias space for now. |
948 | */ | 981 | */ |
949 | mmu->base += IM_NS_ALIAS_OFFSET; | 982 | if (mmu->features->use_ns_alias_offset) |
983 | mmu->base += IM_NS_ALIAS_OFFSET; | ||
984 | |||
985 | mmu->num_ctx = min_t(unsigned int, IPMMU_CTX_MAX, | ||
986 | mmu->features->number_of_contexts); | ||
950 | 987 | ||
951 | irq = platform_get_irq(pdev, 0); | 988 | irq = platform_get_irq(pdev, 0); |
952 | if (irq < 0) { | ||
953 | dev_err(&pdev->dev, "no IRQ found\n"); | ||
954 | return irq; | ||
955 | } | ||
956 | 989 | ||
957 | ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, | 990 | /* |
958 | dev_name(&pdev->dev), mmu); | 991 | * Determine if this IPMMU instance is a root device by checking for |
959 | if (ret < 0) { | 992 | * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property. |
960 | dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); | 993 | */ |
961 | return ret; | 994 | if (!mmu->features->has_cache_leaf_nodes || |
962 | } | 995 | !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL)) |
996 | mmu->root = mmu; | ||
997 | else | ||
998 | mmu->root = ipmmu_find_root(); | ||
963 | 999 | ||
964 | ipmmu_device_reset(mmu); | 1000 | /* |
1001 | * Wait until the root device has been registered for sure. | ||
1002 | */ | ||
1003 | if (!mmu->root) | ||
1004 | return -EPROBE_DEFER; | ||
1005 | |||
1006 | /* Root devices have mandatory IRQs */ | ||
1007 | if (ipmmu_is_root(mmu)) { | ||
1008 | if (irq < 0) { | ||
1009 | dev_err(&pdev->dev, "no IRQ found\n"); | ||
1010 | return irq; | ||
1011 | } | ||
965 | 1012 | ||
966 | ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, | 1013 | ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, |
967 | dev_name(&pdev->dev)); | 1014 | dev_name(&pdev->dev), mmu); |
968 | if (ret) | 1015 | if (ret < 0) { |
969 | return ret; | 1016 | dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); |
1017 | return ret; | ||
1018 | } | ||
970 | 1019 | ||
971 | iommu_device_set_ops(&mmu->iommu, &ipmmu_ops); | 1020 | ipmmu_device_reset(mmu); |
972 | iommu_device_set_fwnode(&mmu->iommu, &pdev->dev.of_node->fwnode); | 1021 | } |
973 | 1022 | ||
974 | ret = iommu_device_register(&mmu->iommu); | 1023 | /* |
975 | if (ret) | 1024 | * Register the IPMMU to the IOMMU subsystem in the following cases: |
976 | return ret; | 1025 | * - R-Car Gen2 IPMMU (all devices registered) |
1026 | * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device) | ||
1027 | */ | ||
1028 | if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) { | ||
1029 | ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, | ||
1030 | dev_name(&pdev->dev)); | ||
1031 | if (ret) | ||
1032 | return ret; | ||
1033 | |||
1034 | iommu_device_set_ops(&mmu->iommu, &ipmmu_ops); | ||
1035 | iommu_device_set_fwnode(&mmu->iommu, | ||
1036 | &pdev->dev.of_node->fwnode); | ||
1037 | |||
1038 | ret = iommu_device_register(&mmu->iommu); | ||
1039 | if (ret) | ||
1040 | return ret; | ||
1041 | |||
1042 | #if defined(CONFIG_IOMMU_DMA) | ||
1043 | if (!iommu_present(&platform_bus_type)) | ||
1044 | bus_set_iommu(&platform_bus_type, &ipmmu_ops); | ||
1045 | #endif | ||
1046 | } | ||
977 | 1047 | ||
978 | /* | 1048 | /* |
979 | * We can't create the ARM mapping here as it requires the bus to have | 1049 | * We can't create the ARM mapping here as it requires the bus to have |
@@ -993,20 +1063,13 @@ static int ipmmu_remove(struct platform_device *pdev) | |||
993 | iommu_device_sysfs_remove(&mmu->iommu); | 1063 | iommu_device_sysfs_remove(&mmu->iommu); |
994 | iommu_device_unregister(&mmu->iommu); | 1064 | iommu_device_unregister(&mmu->iommu); |
995 | 1065 | ||
996 | #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) | ||
997 | arm_iommu_release_mapping(mmu->mapping); | 1066 | arm_iommu_release_mapping(mmu->mapping); |
998 | #endif | ||
999 | 1067 | ||
1000 | ipmmu_device_reset(mmu); | 1068 | ipmmu_device_reset(mmu); |
1001 | 1069 | ||
1002 | return 0; | 1070 | return 0; |
1003 | } | 1071 | } |
1004 | 1072 | ||
1005 | static const struct of_device_id ipmmu_of_ids[] = { | ||
1006 | { .compatible = "renesas,ipmmu-vmsa", }, | ||
1007 | { } | ||
1008 | }; | ||
1009 | |||
1010 | static struct platform_driver ipmmu_driver = { | 1073 | static struct platform_driver ipmmu_driver = { |
1011 | .driver = { | 1074 | .driver = { |
1012 | .name = "ipmmu-vmsa", | 1075 | .name = "ipmmu-vmsa", |
@@ -1018,15 +1081,22 @@ static struct platform_driver ipmmu_driver = { | |||
1018 | 1081 | ||
1019 | static int __init ipmmu_init(void) | 1082 | static int __init ipmmu_init(void) |
1020 | { | 1083 | { |
1084 | static bool setup_done; | ||
1021 | int ret; | 1085 | int ret; |
1022 | 1086 | ||
1087 | if (setup_done) | ||
1088 | return 0; | ||
1089 | |||
1023 | ret = platform_driver_register(&ipmmu_driver); | 1090 | ret = platform_driver_register(&ipmmu_driver); |
1024 | if (ret < 0) | 1091 | if (ret < 0) |
1025 | return ret; | 1092 | return ret; |
1026 | 1093 | ||
1094 | #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) | ||
1027 | if (!iommu_present(&platform_bus_type)) | 1095 | if (!iommu_present(&platform_bus_type)) |
1028 | bus_set_iommu(&platform_bus_type, &ipmmu_ops); | 1096 | bus_set_iommu(&platform_bus_type, &ipmmu_ops); |
1097 | #endif | ||
1029 | 1098 | ||
1099 | setup_done = true; | ||
1030 | return 0; | 1100 | return 0; |
1031 | } | 1101 | } |
1032 | 1102 | ||
@@ -1038,6 +1108,19 @@ static void __exit ipmmu_exit(void) | |||
1038 | subsys_initcall(ipmmu_init); | 1108 | subsys_initcall(ipmmu_init); |
1039 | module_exit(ipmmu_exit); | 1109 | module_exit(ipmmu_exit); |
1040 | 1110 | ||
1111 | #ifdef CONFIG_IOMMU_DMA | ||
1112 | static int __init ipmmu_vmsa_iommu_of_setup(struct device_node *np) | ||
1113 | { | ||
1114 | ipmmu_init(); | ||
1115 | return 0; | ||
1116 | } | ||
1117 | |||
1118 | IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa", | ||
1119 | ipmmu_vmsa_iommu_of_setup); | ||
1120 | IOMMU_OF_DECLARE(ipmmu_r8a7795_iommu_of, "renesas,ipmmu-r8a7795", | ||
1121 | ipmmu_vmsa_iommu_of_setup); | ||
1122 | #endif | ||
1123 | |||
1041 | MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU"); | 1124 | MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU"); |
1042 | MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); | 1125 | MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); |
1043 | MODULE_LICENSE("GPL v2"); | 1126 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index bc1efbfb9ddf..542930cd183d 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c | |||
@@ -708,7 +708,7 @@ static struct platform_driver mtk_iommu_driver = { | |||
708 | .probe = mtk_iommu_probe, | 708 | .probe = mtk_iommu_probe, |
709 | .remove = mtk_iommu_remove, | 709 | .remove = mtk_iommu_remove, |
710 | .driver = { | 710 | .driver = { |
711 | .name = "mtk-iommu", | 711 | .name = "mtk-iommu-v1", |
712 | .of_match_table = mtk_iommu_of_ids, | 712 | .of_match_table = mtk_iommu_of_ids, |
713 | .pm = &mtk_iommu_pm_ops, | 713 | .pm = &mtk_iommu_pm_ops, |
714 | } | 714 | } |
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 4a2c4378b3db..e07f02d00c68 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c | |||
@@ -66,6 +66,7 @@ struct qcom_iommu_ctx { | |||
66 | void __iomem *base; | 66 | void __iomem *base; |
67 | bool secure_init; | 67 | bool secure_init; |
68 | u8 asid; /* asid and ctx bank # are 1:1 */ | 68 | u8 asid; /* asid and ctx bank # are 1:1 */ |
69 | struct iommu_domain *domain; | ||
69 | }; | 70 | }; |
70 | 71 | ||
71 | struct qcom_iommu_domain { | 72 | struct qcom_iommu_domain { |
@@ -194,12 +195,15 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev) | |||
194 | fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0); | 195 | fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0); |
195 | iova = iommu_readq(ctx, ARM_SMMU_CB_FAR); | 196 | iova = iommu_readq(ctx, ARM_SMMU_CB_FAR); |
196 | 197 | ||
197 | dev_err_ratelimited(ctx->dev, | 198 | if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) { |
198 | "Unhandled context fault: fsr=0x%x, " | 199 | dev_err_ratelimited(ctx->dev, |
199 | "iova=0x%016llx, fsynr=0x%x, cb=%d\n", | 200 | "Unhandled context fault: fsr=0x%x, " |
200 | fsr, iova, fsynr, ctx->asid); | 201 | "iova=0x%016llx, fsynr=0x%x, cb=%d\n", |
202 | fsr, iova, fsynr, ctx->asid); | ||
203 | } | ||
201 | 204 | ||
202 | iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr); | 205 | iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr); |
206 | iommu_writel(ctx, ARM_SMMU_CB_RESUME, RESUME_TERMINATE); | ||
203 | 207 | ||
204 | return IRQ_HANDLED; | 208 | return IRQ_HANDLED; |
205 | } | 209 | } |
@@ -274,12 +278,14 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, | |||
274 | 278 | ||
275 | /* SCTLR */ | 279 | /* SCTLR */ |
276 | reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | | 280 | reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | |
277 | SCTLR_M | SCTLR_S1_ASIDPNE; | 281 | SCTLR_M | SCTLR_S1_ASIDPNE | SCTLR_CFCFG; |
278 | 282 | ||
279 | if (IS_ENABLED(CONFIG_BIG_ENDIAN)) | 283 | if (IS_ENABLED(CONFIG_BIG_ENDIAN)) |
280 | reg |= SCTLR_E; | 284 | reg |= SCTLR_E; |
281 | 285 | ||
282 | iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg); | 286 | iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg); |
287 | |||
288 | ctx->domain = domain; | ||
283 | } | 289 | } |
284 | 290 | ||
285 | mutex_unlock(&qcom_domain->init_mutex); | 291 | mutex_unlock(&qcom_domain->init_mutex); |
@@ -395,6 +401,8 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de | |||
395 | 401 | ||
396 | /* Disable the context bank: */ | 402 | /* Disable the context bank: */ |
397 | iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); | 403 | iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); |
404 | |||
405 | ctx->domain = NULL; | ||
398 | } | 406 | } |
399 | pm_runtime_put_sync(qcom_iommu->dev); | 407 | pm_runtime_put_sync(qcom_iommu->dev); |
400 | 408 | ||
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 485a5b48f038..f3274d9f46a2 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -212,6 +212,7 @@ | |||
212 | #define DMA_FSTS_IQE (1 << 4) | 212 | #define DMA_FSTS_IQE (1 << 4) |
213 | #define DMA_FSTS_ICE (1 << 5) | 213 | #define DMA_FSTS_ICE (1 << 5) |
214 | #define DMA_FSTS_ITE (1 << 6) | 214 | #define DMA_FSTS_ITE (1 << 6) |
215 | #define DMA_FSTS_PRO (1 << 7) | ||
215 | #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) | 216 | #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) |
216 | 217 | ||
217 | /* FRCD_REG, 32 bits access */ | 218 | /* FRCD_REG, 32 bits access */ |