diff options
Diffstat (limited to 'drivers/iommu/arm-smmu.c')
| -rw-r--r-- | drivers/iommu/arm-smmu.c | 220 |
1 files changed, 129 insertions, 91 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index a83cc2a2a2ca..60558f794922 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | * - v7/v8 long-descriptor format | 24 | * - v7/v8 long-descriptor format |
| 25 | * - Non-secure access to the SMMU | 25 | * - Non-secure access to the SMMU |
| 26 | * - 4k and 64k pages, with contiguous pte hints. | 26 | * - 4k and 64k pages, with contiguous pte hints. |
| 27 | * - Up to 42-bit addressing (dependent on VA_BITS) | 27 | * - Up to 48-bit addressing (dependent on VA_BITS) |
| 28 | * - Context fault reporting | 28 | * - Context fault reporting |
| 29 | */ | 29 | */ |
| 30 | 30 | ||
| @@ -59,7 +59,7 @@ | |||
| 59 | 59 | ||
| 60 | /* SMMU global address space */ | 60 | /* SMMU global address space */ |
| 61 | #define ARM_SMMU_GR0(smmu) ((smmu)->base) | 61 | #define ARM_SMMU_GR0(smmu) ((smmu)->base) |
| 62 | #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize) | 62 | #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift)) |
| 63 | 63 | ||
| 64 | /* | 64 | /* |
| 65 | * SMMU global address space with conditional offset to access secure | 65 | * SMMU global address space with conditional offset to access secure |
| @@ -224,7 +224,7 @@ | |||
| 224 | 224 | ||
| 225 | /* Translation context bank */ | 225 | /* Translation context bank */ |
| 226 | #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1)) | 226 | #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1)) |
| 227 | #define ARM_SMMU_CB(smmu, n) ((n) * (smmu)->pagesize) | 227 | #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift)) |
| 228 | 228 | ||
| 229 | #define ARM_SMMU_CB_SCTLR 0x0 | 229 | #define ARM_SMMU_CB_SCTLR 0x0 |
| 230 | #define ARM_SMMU_CB_RESUME 0x8 | 230 | #define ARM_SMMU_CB_RESUME 0x8 |
| @@ -326,6 +326,16 @@ | |||
| 326 | 326 | ||
| 327 | #define FSYNR0_WNR (1 << 4) | 327 | #define FSYNR0_WNR (1 << 4) |
| 328 | 328 | ||
| 329 | static int force_stage; | ||
| 330 | module_param_named(force_stage, force_stage, int, S_IRUGO | S_IWUSR); | ||
| 331 | MODULE_PARM_DESC(force_stage, | ||
| 332 | "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation."); | ||
| 333 | |||
| 334 | enum arm_smmu_arch_version { | ||
| 335 | ARM_SMMU_V1 = 1, | ||
| 336 | ARM_SMMU_V2, | ||
| 337 | }; | ||
| 338 | |||
| 329 | struct arm_smmu_smr { | 339 | struct arm_smmu_smr { |
| 330 | u8 idx; | 340 | u8 idx; |
| 331 | u16 mask; | 341 | u16 mask; |
| @@ -349,7 +359,7 @@ struct arm_smmu_device { | |||
| 349 | 359 | ||
| 350 | void __iomem *base; | 360 | void __iomem *base; |
| 351 | unsigned long size; | 361 | unsigned long size; |
| 352 | unsigned long pagesize; | 362 | unsigned long pgshift; |
| 353 | 363 | ||
| 354 | #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) | 364 | #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) |
| 355 | #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1) | 365 | #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1) |
| @@ -360,7 +370,7 @@ struct arm_smmu_device { | |||
| 360 | 370 | ||
| 361 | #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) | 371 | #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) |
| 362 | u32 options; | 372 | u32 options; |
| 363 | int version; | 373 | enum arm_smmu_arch_version version; |
| 364 | 374 | ||
| 365 | u32 num_context_banks; | 375 | u32 num_context_banks; |
| 366 | u32 num_s2_context_banks; | 376 | u32 num_s2_context_banks; |
| @@ -370,8 +380,9 @@ struct arm_smmu_device { | |||
| 370 | u32 num_mapping_groups; | 380 | u32 num_mapping_groups; |
| 371 | DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); | 381 | DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); |
| 372 | 382 | ||
| 373 | unsigned long input_size; | 383 | unsigned long s1_input_size; |
| 374 | unsigned long s1_output_size; | 384 | unsigned long s1_output_size; |
| 385 | unsigned long s2_input_size; | ||
| 375 | unsigned long s2_output_size; | 386 | unsigned long s2_output_size; |
| 376 | 387 | ||
| 377 | u32 num_global_irqs; | 388 | u32 num_global_irqs; |
| @@ -426,17 +437,17 @@ static void parse_driver_options(struct arm_smmu_device *smmu) | |||
| 426 | } while (arm_smmu_options[++i].opt); | 437 | } while (arm_smmu_options[++i].opt); |
| 427 | } | 438 | } |
| 428 | 439 | ||
| 429 | static struct device *dev_get_master_dev(struct device *dev) | 440 | static struct device_node *dev_get_dev_node(struct device *dev) |
| 430 | { | 441 | { |
| 431 | if (dev_is_pci(dev)) { | 442 | if (dev_is_pci(dev)) { |
| 432 | struct pci_bus *bus = to_pci_dev(dev)->bus; | 443 | struct pci_bus *bus = to_pci_dev(dev)->bus; |
| 433 | 444 | ||
| 434 | while (!pci_is_root_bus(bus)) | 445 | while (!pci_is_root_bus(bus)) |
| 435 | bus = bus->parent; | 446 | bus = bus->parent; |
| 436 | return bus->bridge->parent; | 447 | return bus->bridge->parent->of_node; |
| 437 | } | 448 | } |
| 438 | 449 | ||
| 439 | return dev; | 450 | return dev->of_node; |
| 440 | } | 451 | } |
| 441 | 452 | ||
| 442 | static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, | 453 | static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, |
| @@ -461,15 +472,17 @@ static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, | |||
| 461 | } | 472 | } |
| 462 | 473 | ||
| 463 | static struct arm_smmu_master_cfg * | 474 | static struct arm_smmu_master_cfg * |
| 464 | find_smmu_master_cfg(struct arm_smmu_device *smmu, struct device *dev) | 475 | find_smmu_master_cfg(struct device *dev) |
| 465 | { | 476 | { |
| 466 | struct arm_smmu_master *master; | 477 | struct arm_smmu_master_cfg *cfg = NULL; |
| 478 | struct iommu_group *group = iommu_group_get(dev); | ||
| 467 | 479 | ||
| 468 | if (dev_is_pci(dev)) | 480 | if (group) { |
| 469 | return dev->archdata.iommu; | 481 | cfg = iommu_group_get_iommudata(group); |
| 482 | iommu_group_put(group); | ||
| 483 | } | ||
| 470 | 484 | ||
| 471 | master = find_smmu_master(smmu, dev->of_node); | 485 | return cfg; |
| 472 | return master ? &master->cfg : NULL; | ||
| 473 | } | 486 | } |
| 474 | 487 | ||
| 475 | static int insert_smmu_master(struct arm_smmu_device *smmu, | 488 | static int insert_smmu_master(struct arm_smmu_device *smmu, |
| @@ -545,7 +558,7 @@ static struct arm_smmu_device *find_smmu_for_device(struct device *dev) | |||
| 545 | { | 558 | { |
| 546 | struct arm_smmu_device *smmu; | 559 | struct arm_smmu_device *smmu; |
| 547 | struct arm_smmu_master *master = NULL; | 560 | struct arm_smmu_master *master = NULL; |
| 548 | struct device_node *dev_node = dev_get_master_dev(dev)->of_node; | 561 | struct device_node *dev_node = dev_get_dev_node(dev); |
| 549 | 562 | ||
| 550 | spin_lock(&arm_smmu_devices_lock); | 563 | spin_lock(&arm_smmu_devices_lock); |
| 551 | list_for_each_entry(smmu, &arm_smmu_devices, list) { | 564 | list_for_each_entry(smmu, &arm_smmu_devices, list) { |
| @@ -729,7 +742,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
| 729 | 742 | ||
| 730 | /* CBAR */ | 743 | /* CBAR */ |
| 731 | reg = cfg->cbar; | 744 | reg = cfg->cbar; |
| 732 | if (smmu->version == 1) | 745 | if (smmu->version == ARM_SMMU_V1) |
| 733 | reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT; | 746 | reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT; |
| 734 | 747 | ||
| 735 | /* | 748 | /* |
| @@ -744,7 +757,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
| 744 | } | 757 | } |
| 745 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); | 758 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); |
| 746 | 759 | ||
| 747 | if (smmu->version > 1) { | 760 | if (smmu->version > ARM_SMMU_V1) { |
| 748 | /* CBA2R */ | 761 | /* CBA2R */ |
| 749 | #ifdef CONFIG_64BIT | 762 | #ifdef CONFIG_64BIT |
| 750 | reg = CBA2R_RW64_64BIT; | 763 | reg = CBA2R_RW64_64BIT; |
| @@ -755,7 +768,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
| 755 | gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); | 768 | gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); |
| 756 | 769 | ||
| 757 | /* TTBCR2 */ | 770 | /* TTBCR2 */ |
| 758 | switch (smmu->input_size) { | 771 | switch (smmu->s1_input_size) { |
| 759 | case 32: | 772 | case 32: |
| 760 | reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); | 773 | reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); |
| 761 | break; | 774 | break; |
| @@ -817,14 +830,14 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
| 817 | * TTBCR | 830 | * TTBCR |
| 818 | * We use long descriptor, with inner-shareable WBWA tables in TTBR0. | 831 | * We use long descriptor, with inner-shareable WBWA tables in TTBR0. |
| 819 | */ | 832 | */ |
| 820 | if (smmu->version > 1) { | 833 | if (smmu->version > ARM_SMMU_V1) { |
| 821 | if (PAGE_SIZE == SZ_4K) | 834 | if (PAGE_SIZE == SZ_4K) |
| 822 | reg = TTBCR_TG0_4K; | 835 | reg = TTBCR_TG0_4K; |
| 823 | else | 836 | else |
| 824 | reg = TTBCR_TG0_64K; | 837 | reg = TTBCR_TG0_64K; |
| 825 | 838 | ||
| 826 | if (!stage1) { | 839 | if (!stage1) { |
| 827 | reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT; | 840 | reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT; |
| 828 | 841 | ||
| 829 | switch (smmu->s2_output_size) { | 842 | switch (smmu->s2_output_size) { |
| 830 | case 32: | 843 | case 32: |
| @@ -847,7 +860,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
| 847 | break; | 860 | break; |
| 848 | } | 861 | } |
| 849 | } else { | 862 | } else { |
| 850 | reg |= (64 - smmu->input_size) << TTBCR_T0SZ_SHIFT; | 863 | reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT; |
| 851 | } | 864 | } |
| 852 | } else { | 865 | } else { |
| 853 | reg = 0; | 866 | reg = 0; |
| @@ -914,7 +927,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
| 914 | goto out_unlock; | 927 | goto out_unlock; |
| 915 | 928 | ||
| 916 | cfg->cbndx = ret; | 929 | cfg->cbndx = ret; |
| 917 | if (smmu->version == 1) { | 930 | if (smmu->version == ARM_SMMU_V1) { |
| 918 | cfg->irptndx = atomic_inc_return(&smmu->irptndx); | 931 | cfg->irptndx = atomic_inc_return(&smmu->irptndx); |
| 919 | cfg->irptndx %= smmu->num_context_irqs; | 932 | cfg->irptndx %= smmu->num_context_irqs; |
| 920 | } else { | 933 | } else { |
| @@ -1151,9 +1164,10 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, | |||
| 1151 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 1164 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
| 1152 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | 1165 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
| 1153 | 1166 | ||
| 1167 | /* Devices in an IOMMU group may already be configured */ | ||
| 1154 | ret = arm_smmu_master_configure_smrs(smmu, cfg); | 1168 | ret = arm_smmu_master_configure_smrs(smmu, cfg); |
| 1155 | if (ret) | 1169 | if (ret) |
| 1156 | return ret; | 1170 | return ret == -EEXIST ? 0 : ret; |
| 1157 | 1171 | ||
| 1158 | for (i = 0; i < cfg->num_streamids; ++i) { | 1172 | for (i = 0; i < cfg->num_streamids; ++i) { |
| 1159 | u32 idx, s2cr; | 1173 | u32 idx, s2cr; |
| @@ -1174,6 +1188,10 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, | |||
| 1174 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 1188 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
| 1175 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | 1189 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
| 1176 | 1190 | ||
| 1191 | /* An IOMMU group is torn down by the first device to be removed */ | ||
| 1192 | if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs) | ||
| 1193 | return; | ||
| 1194 | |||
| 1177 | /* | 1195 | /* |
| 1178 | * We *must* clear the S2CR first, because freeing the SMR means | 1196 | * We *must* clear the S2CR first, because freeing the SMR means |
| 1179 | * that it can be re-allocated immediately. | 1197 | * that it can be re-allocated immediately. |
| @@ -1195,12 +1213,17 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
| 1195 | struct arm_smmu_device *smmu, *dom_smmu; | 1213 | struct arm_smmu_device *smmu, *dom_smmu; |
| 1196 | struct arm_smmu_master_cfg *cfg; | 1214 | struct arm_smmu_master_cfg *cfg; |
| 1197 | 1215 | ||
| 1198 | smmu = dev_get_master_dev(dev)->archdata.iommu; | 1216 | smmu = find_smmu_for_device(dev); |
| 1199 | if (!smmu) { | 1217 | if (!smmu) { |
| 1200 | dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); | 1218 | dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); |
| 1201 | return -ENXIO; | 1219 | return -ENXIO; |
| 1202 | } | 1220 | } |
| 1203 | 1221 | ||
| 1222 | if (dev->archdata.iommu) { | ||
| 1223 | dev_err(dev, "already attached to IOMMU domain\n"); | ||
| 1224 | return -EEXIST; | ||
| 1225 | } | ||
| 1226 | |||
| 1204 | /* | 1227 | /* |
| 1205 | * Sanity check the domain. We don't support domains across | 1228 | * Sanity check the domain. We don't support domains across |
| 1206 | * different SMMUs. | 1229 | * different SMMUs. |
| @@ -1223,11 +1246,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
| 1223 | } | 1246 | } |
| 1224 | 1247 | ||
| 1225 | /* Looks ok, so add the device to the domain */ | 1248 | /* Looks ok, so add the device to the domain */ |
| 1226 | cfg = find_smmu_master_cfg(smmu_domain->smmu, dev); | 1249 | cfg = find_smmu_master_cfg(dev); |
| 1227 | if (!cfg) | 1250 | if (!cfg) |
| 1228 | return -ENODEV; | 1251 | return -ENODEV; |
| 1229 | 1252 | ||
| 1230 | return arm_smmu_domain_add_master(smmu_domain, cfg); | 1253 | ret = arm_smmu_domain_add_master(smmu_domain, cfg); |
| 1254 | if (!ret) | ||
| 1255 | dev->archdata.iommu = domain; | ||
| 1256 | return ret; | ||
| 1231 | } | 1257 | } |
| 1232 | 1258 | ||
| 1233 | static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) | 1259 | static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) |
| @@ -1235,9 +1261,12 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) | |||
| 1235 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1261 | struct arm_smmu_domain *smmu_domain = domain->priv; |
| 1236 | struct arm_smmu_master_cfg *cfg; | 1262 | struct arm_smmu_master_cfg *cfg; |
| 1237 | 1263 | ||
| 1238 | cfg = find_smmu_master_cfg(smmu_domain->smmu, dev); | 1264 | cfg = find_smmu_master_cfg(dev); |
| 1239 | if (cfg) | 1265 | if (!cfg) |
| 1240 | arm_smmu_domain_remove_master(smmu_domain, cfg); | 1266 | return; |
| 1267 | |||
| 1268 | dev->archdata.iommu = NULL; | ||
| 1269 | arm_smmu_domain_remove_master(smmu_domain, cfg); | ||
| 1241 | } | 1270 | } |
| 1242 | 1271 | ||
| 1243 | static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, | 1272 | static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, |
| @@ -1379,6 +1408,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, | |||
| 1379 | ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn, | 1408 | ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn, |
| 1380 | prot, stage); | 1409 | prot, stage); |
| 1381 | phys += next - addr; | 1410 | phys += next - addr; |
| 1411 | pfn = __phys_to_pfn(phys); | ||
| 1382 | } while (pmd++, addr = next, addr < end); | 1412 | } while (pmd++, addr = next, addr < end); |
| 1383 | 1413 | ||
| 1384 | return ret; | 1414 | return ret; |
| @@ -1431,9 +1461,11 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, | |||
| 1431 | 1461 | ||
| 1432 | if (cfg->cbar == CBAR_TYPE_S2_TRANS) { | 1462 | if (cfg->cbar == CBAR_TYPE_S2_TRANS) { |
| 1433 | stage = 2; | 1463 | stage = 2; |
| 1464 | input_mask = (1ULL << smmu->s2_input_size) - 1; | ||
| 1434 | output_mask = (1ULL << smmu->s2_output_size) - 1; | 1465 | output_mask = (1ULL << smmu->s2_output_size) - 1; |
| 1435 | } else { | 1466 | } else { |
| 1436 | stage = 1; | 1467 | stage = 1; |
| 1468 | input_mask = (1ULL << smmu->s1_input_size) - 1; | ||
| 1437 | output_mask = (1ULL << smmu->s1_output_size) - 1; | 1469 | output_mask = (1ULL << smmu->s1_output_size) - 1; |
| 1438 | } | 1470 | } |
| 1439 | 1471 | ||
| @@ -1443,7 +1475,6 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, | |||
| 1443 | if (size & ~PAGE_MASK) | 1475 | if (size & ~PAGE_MASK) |
| 1444 | return -EINVAL; | 1476 | return -EINVAL; |
| 1445 | 1477 | ||
| 1446 | input_mask = (1ULL << smmu->input_size) - 1; | ||
| 1447 | if ((phys_addr_t)iova & ~input_mask) | 1478 | if ((phys_addr_t)iova & ~input_mask) |
| 1448 | return -ERANGE; | 1479 | return -ERANGE; |
| 1449 | 1480 | ||
| @@ -1526,20 +1557,19 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, | |||
| 1526 | return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); | 1557 | return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); |
| 1527 | } | 1558 | } |
| 1528 | 1559 | ||
| 1529 | static int arm_smmu_domain_has_cap(struct iommu_domain *domain, | 1560 | static bool arm_smmu_capable(enum iommu_cap cap) |
| 1530 | unsigned long cap) | ||
| 1531 | { | 1561 | { |
| 1532 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
| 1533 | struct arm_smmu_device *smmu = smmu_domain->smmu; | ||
| 1534 | u32 features = smmu ? smmu->features : 0; | ||
| 1535 | |||
| 1536 | switch (cap) { | 1562 | switch (cap) { |
| 1537 | case IOMMU_CAP_CACHE_COHERENCY: | 1563 | case IOMMU_CAP_CACHE_COHERENCY: |
| 1538 | return features & ARM_SMMU_FEAT_COHERENT_WALK; | 1564 | /* |
| 1565 | * Return true here as the SMMU can always send out coherent | ||
| 1566 | * requests. | ||
| 1567 | */ | ||
| 1568 | return true; | ||
| 1539 | case IOMMU_CAP_INTR_REMAP: | 1569 | case IOMMU_CAP_INTR_REMAP: |
| 1540 | return 1; /* MSIs are just memory writes */ | 1570 | return true; /* MSIs are just memory writes */ |
| 1541 | default: | 1571 | default: |
| 1542 | return 0; | 1572 | return false; |
| 1543 | } | 1573 | } |
| 1544 | } | 1574 | } |
| 1545 | 1575 | ||
| @@ -1549,17 +1579,19 @@ static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data) | |||
| 1549 | return 0; /* Continue walking */ | 1579 | return 0; /* Continue walking */ |
| 1550 | } | 1580 | } |
| 1551 | 1581 | ||
| 1582 | static void __arm_smmu_release_pci_iommudata(void *data) | ||
| 1583 | { | ||
| 1584 | kfree(data); | ||
| 1585 | } | ||
| 1586 | |||
| 1552 | static int arm_smmu_add_device(struct device *dev) | 1587 | static int arm_smmu_add_device(struct device *dev) |
| 1553 | { | 1588 | { |
| 1554 | struct arm_smmu_device *smmu; | 1589 | struct arm_smmu_device *smmu; |
| 1590 | struct arm_smmu_master_cfg *cfg; | ||
| 1555 | struct iommu_group *group; | 1591 | struct iommu_group *group; |
| 1592 | void (*releasefn)(void *) = NULL; | ||
| 1556 | int ret; | 1593 | int ret; |
| 1557 | 1594 | ||
| 1558 | if (dev->archdata.iommu) { | ||
| 1559 | dev_warn(dev, "IOMMU driver already assigned to device\n"); | ||
| 1560 | return -EINVAL; | ||
| 1561 | } | ||
| 1562 | |||
| 1563 | smmu = find_smmu_for_device(dev); | 1595 | smmu = find_smmu_for_device(dev); |
| 1564 | if (!smmu) | 1596 | if (!smmu) |
| 1565 | return -ENODEV; | 1597 | return -ENODEV; |
| @@ -1571,7 +1603,6 @@ static int arm_smmu_add_device(struct device *dev) | |||
| 1571 | } | 1603 | } |
| 1572 | 1604 | ||
| 1573 | if (dev_is_pci(dev)) { | 1605 | if (dev_is_pci(dev)) { |
| 1574 | struct arm_smmu_master_cfg *cfg; | ||
| 1575 | struct pci_dev *pdev = to_pci_dev(dev); | 1606 | struct pci_dev *pdev = to_pci_dev(dev); |
| 1576 | 1607 | ||
| 1577 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); | 1608 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); |
| @@ -1587,11 +1618,20 @@ static int arm_smmu_add_device(struct device *dev) | |||
| 1587 | */ | 1618 | */ |
| 1588 | pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, | 1619 | pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, |
| 1589 | &cfg->streamids[0]); | 1620 | &cfg->streamids[0]); |
| 1590 | dev->archdata.iommu = cfg; | 1621 | releasefn = __arm_smmu_release_pci_iommudata; |
| 1591 | } else { | 1622 | } else { |
| 1592 | dev->archdata.iommu = smmu; | 1623 | struct arm_smmu_master *master; |
| 1624 | |||
| 1625 | master = find_smmu_master(smmu, dev->of_node); | ||
| 1626 | if (!master) { | ||
| 1627 | ret = -ENODEV; | ||
| 1628 | goto out_put_group; | ||
| 1629 | } | ||
| 1630 | |||
| 1631 | cfg = &master->cfg; | ||
| 1593 | } | 1632 | } |
| 1594 | 1633 | ||
| 1634 | iommu_group_set_iommudata(group, cfg, releasefn); | ||
| 1595 | ret = iommu_group_add_device(group, dev); | 1635 | ret = iommu_group_add_device(group, dev); |
| 1596 | 1636 | ||
| 1597 | out_put_group: | 1637 | out_put_group: |
| @@ -1601,14 +1641,11 @@ out_put_group: | |||
| 1601 | 1641 | ||
| 1602 | static void arm_smmu_remove_device(struct device *dev) | 1642 | static void arm_smmu_remove_device(struct device *dev) |
| 1603 | { | 1643 | { |
| 1604 | if (dev_is_pci(dev)) | ||
| 1605 | kfree(dev->archdata.iommu); | ||
| 1606 | |||
| 1607 | dev->archdata.iommu = NULL; | ||
| 1608 | iommu_group_remove_device(dev); | 1644 | iommu_group_remove_device(dev); |
| 1609 | } | 1645 | } |
| 1610 | 1646 | ||
| 1611 | static const struct iommu_ops arm_smmu_ops = { | 1647 | static const struct iommu_ops arm_smmu_ops = { |
| 1648 | .capable = arm_smmu_capable, | ||
| 1612 | .domain_init = arm_smmu_domain_init, | 1649 | .domain_init = arm_smmu_domain_init, |
| 1613 | .domain_destroy = arm_smmu_domain_destroy, | 1650 | .domain_destroy = arm_smmu_domain_destroy, |
| 1614 | .attach_dev = arm_smmu_attach_dev, | 1651 | .attach_dev = arm_smmu_attach_dev, |
| @@ -1616,7 +1653,6 @@ static const struct iommu_ops arm_smmu_ops = { | |||
| 1616 | .map = arm_smmu_map, | 1653 | .map = arm_smmu_map, |
| 1617 | .unmap = arm_smmu_unmap, | 1654 | .unmap = arm_smmu_unmap, |
| 1618 | .iova_to_phys = arm_smmu_iova_to_phys, | 1655 | .iova_to_phys = arm_smmu_iova_to_phys, |
| 1619 | .domain_has_cap = arm_smmu_domain_has_cap, | ||
| 1620 | .add_device = arm_smmu_add_device, | 1656 | .add_device = arm_smmu_add_device, |
| 1621 | .remove_device = arm_smmu_remove_device, | 1657 | .remove_device = arm_smmu_remove_device, |
| 1622 | .pgsize_bitmap = (SECTION_SIZE | | 1658 | .pgsize_bitmap = (SECTION_SIZE | |
| @@ -1702,10 +1738,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
| 1702 | u32 id; | 1738 | u32 id; |
| 1703 | 1739 | ||
| 1704 | dev_notice(smmu->dev, "probing hardware configuration...\n"); | 1740 | dev_notice(smmu->dev, "probing hardware configuration...\n"); |
| 1705 | |||
| 1706 | /* Primecell ID */ | ||
| 1707 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_PIDR2); | ||
| 1708 | smmu->version = ((id >> PIDR2_ARCH_SHIFT) & PIDR2_ARCH_MASK) + 1; | ||
| 1709 | dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version); | 1741 | dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version); |
| 1710 | 1742 | ||
| 1711 | /* ID0 */ | 1743 | /* ID0 */ |
| @@ -1716,6 +1748,13 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
| 1716 | return -ENODEV; | 1748 | return -ENODEV; |
| 1717 | } | 1749 | } |
| 1718 | #endif | 1750 | #endif |
| 1751 | |||
| 1752 | /* Restrict available stages based on module parameter */ | ||
| 1753 | if (force_stage == 1) | ||
| 1754 | id &= ~(ID0_S2TS | ID0_NTS); | ||
| 1755 | else if (force_stage == 2) | ||
| 1756 | id &= ~(ID0_S1TS | ID0_NTS); | ||
| 1757 | |||
| 1719 | if (id & ID0_S1TS) { | 1758 | if (id & ID0_S1TS) { |
| 1720 | smmu->features |= ARM_SMMU_FEAT_TRANS_S1; | 1759 | smmu->features |= ARM_SMMU_FEAT_TRANS_S1; |
| 1721 | dev_notice(smmu->dev, "\tstage 1 translation\n"); | 1760 | dev_notice(smmu->dev, "\tstage 1 translation\n"); |
| @@ -1732,8 +1771,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
| 1732 | } | 1771 | } |
| 1733 | 1772 | ||
| 1734 | if (!(smmu->features & | 1773 | if (!(smmu->features & |
| 1735 | (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2 | | 1774 | (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) { |
| 1736 | ARM_SMMU_FEAT_TRANS_NESTED))) { | ||
| 1737 | dev_err(smmu->dev, "\tno translation support!\n"); | 1775 | dev_err(smmu->dev, "\tno translation support!\n"); |
| 1738 | return -ENODEV; | 1776 | return -ENODEV; |
| 1739 | } | 1777 | } |
| @@ -1779,12 +1817,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
| 1779 | 1817 | ||
| 1780 | /* ID1 */ | 1818 | /* ID1 */ |
| 1781 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); | 1819 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); |
| 1782 | smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K; | 1820 | smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; |
| 1783 | 1821 | ||
| 1784 | /* Check for size mismatch of SMMU address space from mapped region */ | 1822 | /* Check for size mismatch of SMMU address space from mapped region */ |
| 1785 | size = 1 << | 1823 | size = 1 << |
| 1786 | (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); | 1824 | (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); |
| 1787 | size *= (smmu->pagesize << 1); | 1825 | size *= 2 << smmu->pgshift; |
| 1788 | if (smmu->size != size) | 1826 | if (smmu->size != size) |
| 1789 | dev_warn(smmu->dev, | 1827 | dev_warn(smmu->dev, |
| 1790 | "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", | 1828 | "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", |
| @@ -1803,28 +1841,21 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
| 1803 | /* ID2 */ | 1841 | /* ID2 */ |
| 1804 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); | 1842 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); |
| 1805 | size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); | 1843 | size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); |
| 1844 | smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); | ||
| 1806 | 1845 | ||
| 1807 | /* | 1846 | /* Stage-2 input size limited due to pgd allocation (PTRS_PER_PGD) */ |
| 1808 | * Stage-1 output limited by stage-2 input size due to pgd | ||
| 1809 | * allocation (PTRS_PER_PGD). | ||
| 1810 | */ | ||
| 1811 | if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { | ||
| 1812 | #ifdef CONFIG_64BIT | 1847 | #ifdef CONFIG_64BIT |
| 1813 | smmu->s1_output_size = min_t(unsigned long, VA_BITS, size); | 1848 | smmu->s2_input_size = min_t(unsigned long, VA_BITS, size); |
| 1814 | #else | 1849 | #else |
| 1815 | smmu->s1_output_size = min(32UL, size); | 1850 | smmu->s2_input_size = min(32UL, size); |
| 1816 | #endif | 1851 | #endif |
| 1817 | } else { | ||
| 1818 | smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, | ||
| 1819 | size); | ||
| 1820 | } | ||
| 1821 | 1852 | ||
| 1822 | /* The stage-2 output mask is also applied for bypass */ | 1853 | /* The stage-2 output mask is also applied for bypass */ |
| 1823 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); | 1854 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); |
| 1824 | smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); | 1855 | smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); |
| 1825 | 1856 | ||
| 1826 | if (smmu->version == 1) { | 1857 | if (smmu->version == ARM_SMMU_V1) { |
| 1827 | smmu->input_size = 32; | 1858 | smmu->s1_input_size = 32; |
| 1828 | } else { | 1859 | } else { |
| 1829 | #ifdef CONFIG_64BIT | 1860 | #ifdef CONFIG_64BIT |
| 1830 | size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; | 1861 | size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; |
| @@ -1832,7 +1863,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
| 1832 | #else | 1863 | #else |
| 1833 | size = 32; | 1864 | size = 32; |
| 1834 | #endif | 1865 | #endif |
| 1835 | smmu->input_size = size; | 1866 | smmu->s1_input_size = size; |
| 1836 | 1867 | ||
| 1837 | if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) || | 1868 | if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) || |
| 1838 | (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) || | 1869 | (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) || |
| @@ -1843,15 +1874,30 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
| 1843 | } | 1874 | } |
| 1844 | } | 1875 | } |
| 1845 | 1876 | ||
| 1846 | dev_notice(smmu->dev, | 1877 | if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) |
| 1847 | "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n", | 1878 | dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", |
| 1848 | smmu->input_size, smmu->s1_output_size, | 1879 | smmu->s1_input_size, smmu->s1_output_size); |
| 1849 | smmu->s2_output_size); | 1880 | |
| 1881 | if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) | ||
| 1882 | dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", | ||
| 1883 | smmu->s2_input_size, smmu->s2_output_size); | ||
| 1884 | |||
| 1850 | return 0; | 1885 | return 0; |
| 1851 | } | 1886 | } |
| 1852 | 1887 | ||
| 1888 | static const struct of_device_id arm_smmu_of_match[] = { | ||
| 1889 | { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 }, | ||
| 1890 | { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 }, | ||
| 1891 | { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 }, | ||
| 1892 | { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 }, | ||
| 1893 | { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 }, | ||
| 1894 | { }, | ||
| 1895 | }; | ||
| 1896 | MODULE_DEVICE_TABLE(of, arm_smmu_of_match); | ||
| 1897 | |||
| 1853 | static int arm_smmu_device_dt_probe(struct platform_device *pdev) | 1898 | static int arm_smmu_device_dt_probe(struct platform_device *pdev) |
| 1854 | { | 1899 | { |
| 1900 | const struct of_device_id *of_id; | ||
| 1855 | struct resource *res; | 1901 | struct resource *res; |
| 1856 | struct arm_smmu_device *smmu; | 1902 | struct arm_smmu_device *smmu; |
| 1857 | struct device *dev = &pdev->dev; | 1903 | struct device *dev = &pdev->dev; |
| @@ -1866,6 +1912,9 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
| 1866 | } | 1912 | } |
| 1867 | smmu->dev = dev; | 1913 | smmu->dev = dev; |
| 1868 | 1914 | ||
| 1915 | of_id = of_match_node(arm_smmu_of_match, dev->of_node); | ||
| 1916 | smmu->version = (enum arm_smmu_arch_version)of_id->data; | ||
| 1917 | |||
| 1869 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1918 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1870 | smmu->base = devm_ioremap_resource(dev, res); | 1919 | smmu->base = devm_ioremap_resource(dev, res); |
| 1871 | if (IS_ERR(smmu->base)) | 1920 | if (IS_ERR(smmu->base)) |
| @@ -1930,7 +1979,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
| 1930 | 1979 | ||
| 1931 | parse_driver_options(smmu); | 1980 | parse_driver_options(smmu); |
| 1932 | 1981 | ||
| 1933 | if (smmu->version > 1 && | 1982 | if (smmu->version > ARM_SMMU_V1 && |
| 1934 | smmu->num_context_banks != smmu->num_context_irqs) { | 1983 | smmu->num_context_banks != smmu->num_context_irqs) { |
| 1935 | dev_err(dev, | 1984 | dev_err(dev, |
| 1936 | "found only %d context interrupt(s) but %d required\n", | 1985 | "found only %d context interrupt(s) but %d required\n", |
| @@ -2011,17 +2060,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev) | |||
| 2011 | return 0; | 2060 | return 0; |
| 2012 | } | 2061 | } |
| 2013 | 2062 | ||
| 2014 | #ifdef CONFIG_OF | ||
| 2015 | static struct of_device_id arm_smmu_of_match[] = { | ||
| 2016 | { .compatible = "arm,smmu-v1", }, | ||
| 2017 | { .compatible = "arm,smmu-v2", }, | ||
| 2018 | { .compatible = "arm,mmu-400", }, | ||
| 2019 | { .compatible = "arm,mmu-500", }, | ||
| 2020 | { }, | ||
| 2021 | }; | ||
| 2022 | MODULE_DEVICE_TABLE(of, arm_smmu_of_match); | ||
| 2023 | #endif | ||
| 2024 | |||
| 2025 | static struct platform_driver arm_smmu_driver = { | 2063 | static struct platform_driver arm_smmu_driver = { |
| 2026 | .driver = { | 2064 | .driver = { |
| 2027 | .owner = THIS_MODULE, | 2065 | .owner = THIS_MODULE, |
