diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-09-12 12:26:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-09-12 12:26:49 -0400 |
commit | 753a6cb7e4fc112e2f120ca02d167535382e9cec (patch) | |
tree | 12854bde19f4bb9fc1eb96a71affc7338c3599dc | |
parent | 96ea975bfeec855d119bb93df885d7f18452856a (diff) | |
parent | 5a9137a66b521d667236e95c307b92af532fe600 (diff) |
Merge tag 'iommu-fixes-v3.17-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu fixes from Joerg Roedel:
- two fixes for issues found by Coverity
- various fixes for the ARM SMMU driver
- a warning fix for the FSL PAMU driver
* tag 'iommu-fixes-v3.17-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
iommu/fsl: Fix warning resulting from adding PCI device twice
iommu/arm-smmu: fix corner cases in address size calculations
iommu/arm-smmu: fix decimal printf format specifiers prefixed with 0x
iommu/arm-smmu: Do not access non-existing S2CR registers
iommu/arm-smmu: fix s2cr and smr teardown on device detach from domain
iommu/arm-smmu: remove pgtable_page_{c,d}tor()
iommu/arm-smmu: fix programming of SMMU_CBn_TCR for stage 1
iommu/arm-smmu: avoid calling request_irq in atomic context
iommu/vt-d: Check return value of acpi_bus_get_device()
iommu/core: Make iommu_group_get_for_dev() more robust
-rw-r--r-- | drivers/iommu/arm-smmu.c | 127 | ||||
-rw-r--r-- | drivers/iommu/dmar.c | 3 | ||||
-rw-r--r-- | drivers/iommu/fsl_pamu_domain.c | 10 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 8 |
4 files changed, 87 insertions, 61 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index ca18d6d42a9b..a83cc2a2a2ca 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -146,6 +146,8 @@ | |||
146 | #define ID0_CTTW (1 << 14) | 146 | #define ID0_CTTW (1 << 14) |
147 | #define ID0_NUMIRPT_SHIFT 16 | 147 | #define ID0_NUMIRPT_SHIFT 16 |
148 | #define ID0_NUMIRPT_MASK 0xff | 148 | #define ID0_NUMIRPT_MASK 0xff |
149 | #define ID0_NUMSIDB_SHIFT 9 | ||
150 | #define ID0_NUMSIDB_MASK 0xf | ||
149 | #define ID0_NUMSMRG_SHIFT 0 | 151 | #define ID0_NUMSMRG_SHIFT 0 |
150 | #define ID0_NUMSMRG_MASK 0xff | 152 | #define ID0_NUMSMRG_MASK 0xff |
151 | 153 | ||
@@ -524,9 +526,18 @@ static int register_smmu_master(struct arm_smmu_device *smmu, | |||
524 | master->of_node = masterspec->np; | 526 | master->of_node = masterspec->np; |
525 | master->cfg.num_streamids = masterspec->args_count; | 527 | master->cfg.num_streamids = masterspec->args_count; |
526 | 528 | ||
527 | for (i = 0; i < master->cfg.num_streamids; ++i) | 529 | for (i = 0; i < master->cfg.num_streamids; ++i) { |
528 | master->cfg.streamids[i] = masterspec->args[i]; | 530 | u16 streamid = masterspec->args[i]; |
529 | 531 | ||
532 | if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && | ||
533 | (streamid >= smmu->num_mapping_groups)) { | ||
534 | dev_err(dev, | ||
535 | "stream ID for master device %s greater than maximum allowed (%d)\n", | ||
536 | masterspec->np->name, smmu->num_mapping_groups); | ||
537 | return -ERANGE; | ||
538 | } | ||
539 | master->cfg.streamids[i] = streamid; | ||
540 | } | ||
530 | return insert_smmu_master(smmu, master); | 541 | return insert_smmu_master(smmu, master); |
531 | } | 542 | } |
532 | 543 | ||
@@ -623,7 +634,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | |||
623 | 634 | ||
624 | if (fsr & FSR_IGN) | 635 | if (fsr & FSR_IGN) |
625 | dev_err_ratelimited(smmu->dev, | 636 | dev_err_ratelimited(smmu->dev, |
626 | "Unexpected context fault (fsr 0x%u)\n", | 637 | "Unexpected context fault (fsr 0x%x)\n", |
627 | fsr); | 638 | fsr); |
628 | 639 | ||
629 | fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); | 640 | fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); |
@@ -752,6 +763,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
752 | reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); | 763 | reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); |
753 | break; | 764 | break; |
754 | case 39: | 765 | case 39: |
766 | case 40: | ||
755 | reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); | 767 | reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); |
756 | break; | 768 | break; |
757 | case 42: | 769 | case 42: |
@@ -773,6 +785,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
773 | reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT); | 785 | reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT); |
774 | break; | 786 | break; |
775 | case 39: | 787 | case 39: |
788 | case 40: | ||
776 | reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT); | 789 | reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT); |
777 | break; | 790 | break; |
778 | case 42: | 791 | case 42: |
@@ -843,8 +856,11 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
843 | reg |= TTBCR_EAE | | 856 | reg |= TTBCR_EAE | |
844 | (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | | 857 | (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | |
845 | (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | | 858 | (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | |
846 | (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) | | 859 | (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT); |
847 | (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); | 860 | |
861 | if (!stage1) | ||
862 | reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); | ||
863 | |||
848 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | 864 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); |
849 | 865 | ||
850 | /* MAIR0 (stage-1 only) */ | 866 | /* MAIR0 (stage-1 only) */ |
@@ -868,10 +884,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
868 | static int arm_smmu_init_domain_context(struct iommu_domain *domain, | 884 | static int arm_smmu_init_domain_context(struct iommu_domain *domain, |
869 | struct arm_smmu_device *smmu) | 885 | struct arm_smmu_device *smmu) |
870 | { | 886 | { |
871 | int irq, ret, start; | 887 | int irq, start, ret = 0; |
888 | unsigned long flags; | ||
872 | struct arm_smmu_domain *smmu_domain = domain->priv; | 889 | struct arm_smmu_domain *smmu_domain = domain->priv; |
873 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | 890 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
874 | 891 | ||
892 | spin_lock_irqsave(&smmu_domain->lock, flags); | ||
893 | if (smmu_domain->smmu) | ||
894 | goto out_unlock; | ||
895 | |||
875 | if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { | 896 | if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { |
876 | /* | 897 | /* |
877 | * We will likely want to change this if/when KVM gets | 898 | * We will likely want to change this if/when KVM gets |
@@ -890,7 +911,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
890 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, | 911 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, |
891 | smmu->num_context_banks); | 912 | smmu->num_context_banks); |
892 | if (IS_ERR_VALUE(ret)) | 913 | if (IS_ERR_VALUE(ret)) |
893 | return ret; | 914 | goto out_unlock; |
894 | 915 | ||
895 | cfg->cbndx = ret; | 916 | cfg->cbndx = ret; |
896 | if (smmu->version == 1) { | 917 | if (smmu->version == 1) { |
@@ -900,6 +921,10 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
900 | cfg->irptndx = cfg->cbndx; | 921 | cfg->irptndx = cfg->cbndx; |
901 | } | 922 | } |
902 | 923 | ||
924 | ACCESS_ONCE(smmu_domain->smmu) = smmu; | ||
925 | arm_smmu_init_context_bank(smmu_domain); | ||
926 | spin_unlock_irqrestore(&smmu_domain->lock, flags); | ||
927 | |||
903 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; | 928 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; |
904 | ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, | 929 | ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, |
905 | "arm-smmu-context-fault", domain); | 930 | "arm-smmu-context-fault", domain); |
@@ -907,15 +932,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
907 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", | 932 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", |
908 | cfg->irptndx, irq); | 933 | cfg->irptndx, irq); |
909 | cfg->irptndx = INVALID_IRPTNDX; | 934 | cfg->irptndx = INVALID_IRPTNDX; |
910 | goto out_free_context; | ||
911 | } | 935 | } |
912 | 936 | ||
913 | smmu_domain->smmu = smmu; | ||
914 | arm_smmu_init_context_bank(smmu_domain); | ||
915 | return 0; | 937 | return 0; |
916 | 938 | ||
917 | out_free_context: | 939 | out_unlock: |
918 | __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); | 940 | spin_unlock_irqrestore(&smmu_domain->lock, flags); |
919 | return ret; | 941 | return ret; |
920 | } | 942 | } |
921 | 943 | ||
@@ -975,7 +997,6 @@ static void arm_smmu_free_ptes(pmd_t *pmd) | |||
975 | { | 997 | { |
976 | pgtable_t table = pmd_pgtable(*pmd); | 998 | pgtable_t table = pmd_pgtable(*pmd); |
977 | 999 | ||
978 | pgtable_page_dtor(table); | ||
979 | __free_page(table); | 1000 | __free_page(table); |
980 | } | 1001 | } |
981 | 1002 | ||
@@ -1108,6 +1129,9 @@ static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, | |||
1108 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | 1129 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
1109 | struct arm_smmu_smr *smrs = cfg->smrs; | 1130 | struct arm_smmu_smr *smrs = cfg->smrs; |
1110 | 1131 | ||
1132 | if (!smrs) | ||
1133 | return; | ||
1134 | |||
1111 | /* Invalidate the SMRs before freeing back to the allocator */ | 1135 | /* Invalidate the SMRs before freeing back to the allocator */ |
1112 | for (i = 0; i < cfg->num_streamids; ++i) { | 1136 | for (i = 0; i < cfg->num_streamids; ++i) { |
1113 | u8 idx = smrs[i].idx; | 1137 | u8 idx = smrs[i].idx; |
@@ -1120,20 +1144,6 @@ static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, | |||
1120 | kfree(smrs); | 1144 | kfree(smrs); |
1121 | } | 1145 | } |
1122 | 1146 | ||
1123 | static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu, | ||
1124 | struct arm_smmu_master_cfg *cfg) | ||
1125 | { | ||
1126 | int i; | ||
1127 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1128 | |||
1129 | for (i = 0; i < cfg->num_streamids; ++i) { | ||
1130 | u16 sid = cfg->streamids[i]; | ||
1131 | |||
1132 | writel_relaxed(S2CR_TYPE_BYPASS, | ||
1133 | gr0_base + ARM_SMMU_GR0_S2CR(sid)); | ||
1134 | } | ||
1135 | } | ||
1136 | |||
1137 | static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, | 1147 | static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, |
1138 | struct arm_smmu_master_cfg *cfg) | 1148 | struct arm_smmu_master_cfg *cfg) |
1139 | { | 1149 | { |
@@ -1160,23 +1170,30 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, | |||
1160 | static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, | 1170 | static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, |
1161 | struct arm_smmu_master_cfg *cfg) | 1171 | struct arm_smmu_master_cfg *cfg) |
1162 | { | 1172 | { |
1173 | int i; | ||
1163 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 1174 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
1175 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1164 | 1176 | ||
1165 | /* | 1177 | /* |
1166 | * We *must* clear the S2CR first, because freeing the SMR means | 1178 | * We *must* clear the S2CR first, because freeing the SMR means |
1167 | * that it can be re-allocated immediately. | 1179 | * that it can be re-allocated immediately. |
1168 | */ | 1180 | */ |
1169 | arm_smmu_bypass_stream_mapping(smmu, cfg); | 1181 | for (i = 0; i < cfg->num_streamids; ++i) { |
1182 | u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; | ||
1183 | |||
1184 | writel_relaxed(S2CR_TYPE_BYPASS, | ||
1185 | gr0_base + ARM_SMMU_GR0_S2CR(idx)); | ||
1186 | } | ||
1187 | |||
1170 | arm_smmu_master_free_smrs(smmu, cfg); | 1188 | arm_smmu_master_free_smrs(smmu, cfg); |
1171 | } | 1189 | } |
1172 | 1190 | ||
1173 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | 1191 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) |
1174 | { | 1192 | { |
1175 | int ret = -EINVAL; | 1193 | int ret; |
1176 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1194 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1177 | struct arm_smmu_device *smmu; | 1195 | struct arm_smmu_device *smmu, *dom_smmu; |
1178 | struct arm_smmu_master_cfg *cfg; | 1196 | struct arm_smmu_master_cfg *cfg; |
1179 | unsigned long flags; | ||
1180 | 1197 | ||
1181 | smmu = dev_get_master_dev(dev)->archdata.iommu; | 1198 | smmu = dev_get_master_dev(dev)->archdata.iommu; |
1182 | if (!smmu) { | 1199 | if (!smmu) { |
@@ -1188,20 +1205,22 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1188 | * Sanity check the domain. We don't support domains across | 1205 | * Sanity check the domain. We don't support domains across |
1189 | * different SMMUs. | 1206 | * different SMMUs. |
1190 | */ | 1207 | */ |
1191 | spin_lock_irqsave(&smmu_domain->lock, flags); | 1208 | dom_smmu = ACCESS_ONCE(smmu_domain->smmu); |
1192 | if (!smmu_domain->smmu) { | 1209 | if (!dom_smmu) { |
1193 | /* Now that we have a master, we can finalise the domain */ | 1210 | /* Now that we have a master, we can finalise the domain */ |
1194 | ret = arm_smmu_init_domain_context(domain, smmu); | 1211 | ret = arm_smmu_init_domain_context(domain, smmu); |
1195 | if (IS_ERR_VALUE(ret)) | 1212 | if (IS_ERR_VALUE(ret)) |
1196 | goto err_unlock; | 1213 | return ret; |
1197 | } else if (smmu_domain->smmu != smmu) { | 1214 | |
1215 | dom_smmu = smmu_domain->smmu; | ||
1216 | } | ||
1217 | |||
1218 | if (dom_smmu != smmu) { | ||
1198 | dev_err(dev, | 1219 | dev_err(dev, |
1199 | "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", | 1220 | "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", |
1200 | dev_name(smmu_domain->smmu->dev), | 1221 | dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); |
1201 | dev_name(smmu->dev)); | 1222 | return -EINVAL; |
1202 | goto err_unlock; | ||
1203 | } | 1223 | } |
1204 | spin_unlock_irqrestore(&smmu_domain->lock, flags); | ||
1205 | 1224 | ||
1206 | /* Looks ok, so add the device to the domain */ | 1225 | /* Looks ok, so add the device to the domain */ |
1207 | cfg = find_smmu_master_cfg(smmu_domain->smmu, dev); | 1226 | cfg = find_smmu_master_cfg(smmu_domain->smmu, dev); |
@@ -1209,10 +1228,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1209 | return -ENODEV; | 1228 | return -ENODEV; |
1210 | 1229 | ||
1211 | return arm_smmu_domain_add_master(smmu_domain, cfg); | 1230 | return arm_smmu_domain_add_master(smmu_domain, cfg); |
1212 | |||
1213 | err_unlock: | ||
1214 | spin_unlock_irqrestore(&smmu_domain->lock, flags); | ||
1215 | return ret; | ||
1216 | } | 1231 | } |
1217 | 1232 | ||
1218 | static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) | 1233 | static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) |
@@ -1247,10 +1262,6 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, | |||
1247 | return -ENOMEM; | 1262 | return -ENOMEM; |
1248 | 1263 | ||
1249 | arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE); | 1264 | arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE); |
1250 | if (!pgtable_page_ctor(table)) { | ||
1251 | __free_page(table); | ||
1252 | return -ENOMEM; | ||
1253 | } | ||
1254 | pmd_populate(NULL, pmd, table); | 1265 | pmd_populate(NULL, pmd, table); |
1255 | arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd)); | 1266 | arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd)); |
1256 | } | 1267 | } |
@@ -1626,7 +1637,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
1626 | 1637 | ||
1627 | /* Mark all SMRn as invalid and all S2CRn as bypass */ | 1638 | /* Mark all SMRn as invalid and all S2CRn as bypass */ |
1628 | for (i = 0; i < smmu->num_mapping_groups; ++i) { | 1639 | for (i = 0; i < smmu->num_mapping_groups; ++i) { |
1629 | writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i)); | 1640 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i)); |
1630 | writel_relaxed(S2CR_TYPE_BYPASS, | 1641 | writel_relaxed(S2CR_TYPE_BYPASS, |
1631 | gr0_base + ARM_SMMU_GR0_S2CR(i)); | 1642 | gr0_base + ARM_SMMU_GR0_S2CR(i)); |
1632 | } | 1643 | } |
@@ -1761,6 +1772,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1761 | dev_notice(smmu->dev, | 1772 | dev_notice(smmu->dev, |
1762 | "\tstream matching with %u register groups, mask 0x%x", | 1773 | "\tstream matching with %u register groups, mask 0x%x", |
1763 | smmu->num_mapping_groups, mask); | 1774 | smmu->num_mapping_groups, mask); |
1775 | } else { | ||
1776 | smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) & | ||
1777 | ID0_NUMSIDB_MASK; | ||
1764 | } | 1778 | } |
1765 | 1779 | ||
1766 | /* ID1 */ | 1780 | /* ID1 */ |
@@ -1794,11 +1808,16 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1794 | * Stage-1 output limited by stage-2 input size due to pgd | 1808 | * Stage-1 output limited by stage-2 input size due to pgd |
1795 | * allocation (PTRS_PER_PGD). | 1809 | * allocation (PTRS_PER_PGD). |
1796 | */ | 1810 | */ |
1811 | if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { | ||
1797 | #ifdef CONFIG_64BIT | 1812 | #ifdef CONFIG_64BIT |
1798 | smmu->s1_output_size = min_t(unsigned long, VA_BITS, size); | 1813 | smmu->s1_output_size = min_t(unsigned long, VA_BITS, size); |
1799 | #else | 1814 | #else |
1800 | smmu->s1_output_size = min(32UL, size); | 1815 | smmu->s1_output_size = min(32UL, size); |
1801 | #endif | 1816 | #endif |
1817 | } else { | ||
1818 | smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, | ||
1819 | size); | ||
1820 | } | ||
1802 | 1821 | ||
1803 | /* The stage-2 output mask is also applied for bypass */ | 1822 | /* The stage-2 output mask is also applied for bypass */ |
1804 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); | 1823 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); |
@@ -1889,6 +1908,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
1889 | smmu->irqs[i] = irq; | 1908 | smmu->irqs[i] = irq; |
1890 | } | 1909 | } |
1891 | 1910 | ||
1911 | err = arm_smmu_device_cfg_probe(smmu); | ||
1912 | if (err) | ||
1913 | return err; | ||
1914 | |||
1892 | i = 0; | 1915 | i = 0; |
1893 | smmu->masters = RB_ROOT; | 1916 | smmu->masters = RB_ROOT; |
1894 | while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters", | 1917 | while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters", |
@@ -1905,10 +1928,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
1905 | } | 1928 | } |
1906 | dev_notice(dev, "registered %d master devices\n", i); | 1929 | dev_notice(dev, "registered %d master devices\n", i); |
1907 | 1930 | ||
1908 | err = arm_smmu_device_cfg_probe(smmu); | ||
1909 | if (err) | ||
1910 | goto out_put_masters; | ||
1911 | |||
1912 | parse_driver_options(smmu); | 1931 | parse_driver_options(smmu); |
1913 | 1932 | ||
1914 | if (smmu->version > 1 && | 1933 | if (smmu->version > 1 && |
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 60ab474bfff3..06d268abe951 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
@@ -678,8 +678,7 @@ static int __init dmar_acpi_dev_scope_init(void) | |||
678 | andd->device_name); | 678 | andd->device_name); |
679 | continue; | 679 | continue; |
680 | } | 680 | } |
681 | acpi_bus_get_device(h, &adev); | 681 | if (acpi_bus_get_device(h, &adev)) { |
682 | if (!adev) { | ||
683 | pr_err("Failed to get device for ACPI object %s\n", | 682 | pr_err("Failed to get device for ACPI object %s\n", |
684 | andd->device_name); | 683 | andd->device_name); |
685 | continue; | 684 | continue; |
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 61d1dafa242d..56feed7cec15 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c | |||
@@ -984,7 +984,7 @@ static int fsl_pamu_add_device(struct device *dev) | |||
984 | struct iommu_group *group = ERR_PTR(-ENODEV); | 984 | struct iommu_group *group = ERR_PTR(-ENODEV); |
985 | struct pci_dev *pdev; | 985 | struct pci_dev *pdev; |
986 | const u32 *prop; | 986 | const u32 *prop; |
987 | int ret, len; | 987 | int ret = 0, len; |
988 | 988 | ||
989 | /* | 989 | /* |
990 | * For platform devices we allocate a separate group for | 990 | * For platform devices we allocate a separate group for |
@@ -1007,7 +1007,13 @@ static int fsl_pamu_add_device(struct device *dev) | |||
1007 | if (IS_ERR(group)) | 1007 | if (IS_ERR(group)) |
1008 | return PTR_ERR(group); | 1008 | return PTR_ERR(group); |
1009 | 1009 | ||
1010 | ret = iommu_group_add_device(group, dev); | 1010 | /* |
1011 | * Check if device has already been added to an iommu group. | ||
1012 | * Group could have already been created for a PCI device in | ||
1013 | * the iommu_group_get_for_dev path. | ||
1014 | */ | ||
1015 | if (!dev->iommu_group) | ||
1016 | ret = iommu_group_add_device(group, dev); | ||
1011 | 1017 | ||
1012 | iommu_group_put(group); | 1018 | iommu_group_put(group); |
1013 | return ret; | 1019 | return ret; |
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index ac4adb337038..0639b9274b11 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
@@ -678,15 +678,17 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev) | |||
678 | */ | 678 | */ |
679 | struct iommu_group *iommu_group_get_for_dev(struct device *dev) | 679 | struct iommu_group *iommu_group_get_for_dev(struct device *dev) |
680 | { | 680 | { |
681 | struct iommu_group *group = ERR_PTR(-EIO); | 681 | struct iommu_group *group; |
682 | int ret; | 682 | int ret; |
683 | 683 | ||
684 | group = iommu_group_get(dev); | 684 | group = iommu_group_get(dev); |
685 | if (group) | 685 | if (group) |
686 | return group; | 686 | return group; |
687 | 687 | ||
688 | if (dev_is_pci(dev)) | 688 | if (!dev_is_pci(dev)) |
689 | group = iommu_group_get_for_pci_dev(to_pci_dev(dev)); | 689 | return ERR_PTR(-EINVAL); |
690 | |||
691 | group = iommu_group_get_for_pci_dev(to_pci_dev(dev)); | ||
690 | 692 | ||
691 | if (IS_ERR(group)) | 693 | if (IS_ERR(group)) |
692 | return group; | 694 | return group; |