diff options
author | Will Deacon <will@kernel.org> | 2019-07-02 11:44:41 -0400 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2019-07-29 12:22:57 -0400 |
commit | abfd6fe0cd535d31ee83b668be6eb59ce6a8469d (patch) | |
tree | 3434e458e2191d71cb8d6211209044d8e3560bcd | |
parent | 10b7a7d912697afd681a0bcfced9e05543aded35 (diff) |
iommu/io-pgtable: Replace ->tlb_add_flush() with ->tlb_add_page()
The ->tlb_add_flush() callback in the io-pgtable API now looks a bit
silly:
- It takes a size and a granule, which are always the same
- It takes a 'bool leaf', which is always true
- It only ever flushes a single page
With that in mind, replace it with an optional ->tlb_add_page() callback
that drops the useless parameters.
Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_mmu.c | 5 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu-v3.c | 8 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu.c | 88 | ||||
-rw-r--r-- | drivers/iommu/io-pgtable-arm-v7s.c | 12 | ||||
-rw-r--r-- | drivers/iommu/io-pgtable-arm.c | 11 | ||||
-rw-r--r-- | drivers/iommu/ipmmu-vmsa.c | 7 | ||||
-rw-r--r-- | drivers/iommu/msm_iommu.c | 7 | ||||
-rw-r--r-- | drivers/iommu/mtk_iommu.c | 8 | ||||
-rw-r--r-- | drivers/iommu/qcom_iommu.c | 8 | ||||
-rw-r--r-- | include/linux/io-pgtable.h | 22 |
10 files changed, 105 insertions, 71 deletions
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 651858147bd6..ff9af320cacc 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c | |||
@@ -247,10 +247,6 @@ static void mmu_tlb_inv_context_s1(void *cookie) | |||
247 | mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM); | 247 | mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM); |
248 | } | 248 | } |
249 | 249 | ||
250 | static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size, | ||
251 | size_t granule, bool leaf, void *cookie) | ||
252 | {} | ||
253 | |||
254 | static void mmu_tlb_sync_context(void *cookie) | 250 | static void mmu_tlb_sync_context(void *cookie) |
255 | { | 251 | { |
256 | //struct panfrost_device *pfdev = cookie; | 252 | //struct panfrost_device *pfdev = cookie; |
@@ -273,7 +269,6 @@ static const struct iommu_flush_ops mmu_tlb_ops = { | |||
273 | .tlb_flush_all = mmu_tlb_inv_context_s1, | 269 | .tlb_flush_all = mmu_tlb_inv_context_s1, |
274 | .tlb_flush_walk = mmu_tlb_flush_walk, | 270 | .tlb_flush_walk = mmu_tlb_flush_walk, |
275 | .tlb_flush_leaf = mmu_tlb_flush_leaf, | 271 | .tlb_flush_leaf = mmu_tlb_flush_leaf, |
276 | .tlb_add_flush = mmu_tlb_inv_range_nosync, | ||
277 | .tlb_sync = mmu_tlb_sync_context, | 272 | .tlb_sync = mmu_tlb_sync_context, |
278 | }; | 273 | }; |
279 | 274 | ||
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 79819b003b07..98c90a1b4b22 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
@@ -1603,6 +1603,12 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, | |||
1603 | } while (size -= granule); | 1603 | } while (size -= granule); |
1604 | } | 1604 | } |
1605 | 1605 | ||
1606 | static void arm_smmu_tlb_inv_page_nosync(unsigned long iova, size_t granule, | ||
1607 | void *cookie) | ||
1608 | { | ||
1609 | arm_smmu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); | ||
1610 | } | ||
1611 | |||
1606 | static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, | 1612 | static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, |
1607 | size_t granule, void *cookie) | 1613 | size_t granule, void *cookie) |
1608 | { | 1614 | { |
@@ -1627,7 +1633,7 @@ static const struct iommu_flush_ops arm_smmu_flush_ops = { | |||
1627 | .tlb_flush_all = arm_smmu_tlb_inv_context, | 1633 | .tlb_flush_all = arm_smmu_tlb_inv_context, |
1628 | .tlb_flush_walk = arm_smmu_tlb_inv_walk, | 1634 | .tlb_flush_walk = arm_smmu_tlb_inv_walk, |
1629 | .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, | 1635 | .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, |
1630 | .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, | 1636 | .tlb_add_page = arm_smmu_tlb_inv_page_nosync, |
1631 | .tlb_sync = arm_smmu_tlb_sync, | 1637 | .tlb_sync = arm_smmu_tlb_sync, |
1632 | }; | 1638 | }; |
1633 | 1639 | ||
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e9f01b860ae3..f056164a94b0 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -248,10 +248,16 @@ enum arm_smmu_domain_stage { | |||
248 | ARM_SMMU_DOMAIN_BYPASS, | 248 | ARM_SMMU_DOMAIN_BYPASS, |
249 | }; | 249 | }; |
250 | 250 | ||
251 | struct arm_smmu_flush_ops { | ||
252 | struct iommu_flush_ops tlb; | ||
253 | void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule, | ||
254 | bool leaf, void *cookie) | ||
255 | }; | ||
256 | |||
251 | struct arm_smmu_domain { | 257 | struct arm_smmu_domain { |
252 | struct arm_smmu_device *smmu; | 258 | struct arm_smmu_device *smmu; |
253 | struct io_pgtable_ops *pgtbl_ops; | 259 | struct io_pgtable_ops *pgtbl_ops; |
254 | const struct iommu_flush_ops *tlb_ops; | 260 | const struct arm_smmu_flush_ops *flush_ops; |
255 | struct arm_smmu_cfg cfg; | 261 | struct arm_smmu_cfg cfg; |
256 | enum arm_smmu_domain_stage stage; | 262 | enum arm_smmu_domain_stage stage; |
257 | bool non_strict; | 263 | bool non_strict; |
@@ -551,42 +557,62 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, | |||
551 | size_t granule, void *cookie) | 557 | size_t granule, void *cookie) |
552 | { | 558 | { |
553 | struct arm_smmu_domain *smmu_domain = cookie; | 559 | struct arm_smmu_domain *smmu_domain = cookie; |
560 | const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; | ||
554 | 561 | ||
555 | smmu_domain->tlb_ops->tlb_add_flush(iova, size, granule, false, cookie); | 562 | ops->tlb_inv_range(iova, size, granule, false, cookie); |
556 | smmu_domain->tlb_ops->tlb_sync(cookie); | 563 | ops->tlb.tlb_sync(cookie); |
557 | } | 564 | } |
558 | 565 | ||
559 | static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, | 566 | static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, |
560 | size_t granule, void *cookie) | 567 | size_t granule, void *cookie) |
561 | { | 568 | { |
562 | struct arm_smmu_domain *smmu_domain = cookie; | 569 | struct arm_smmu_domain *smmu_domain = cookie; |
570 | const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; | ||
571 | |||
572 | ops->tlb_inv_range(iova, size, granule, true, cookie); | ||
573 | ops->tlb.tlb_sync(cookie); | ||
574 | } | ||
575 | |||
576 | static void arm_smmu_tlb_add_page(unsigned long iova, size_t granule, | ||
577 | void *cookie) | ||
578 | { | ||
579 | struct arm_smmu_domain *smmu_domain = cookie; | ||
580 | const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; | ||
563 | 581 | ||
564 | smmu_domain->tlb_ops->tlb_add_flush(iova, size, granule, true, cookie); | 582 | ops->tlb_inv_range(iova, granule, granule, true, cookie); |
565 | smmu_domain->tlb_ops->tlb_sync(cookie); | ||
566 | } | 583 | } |
567 | 584 | ||
568 | static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = { | 585 | static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = { |
569 | .tlb_flush_all = arm_smmu_tlb_inv_context_s1, | 586 | .tlb = { |
570 | .tlb_flush_walk = arm_smmu_tlb_inv_walk, | 587 | .tlb_flush_all = arm_smmu_tlb_inv_context_s1, |
571 | .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, | 588 | .tlb_flush_walk = arm_smmu_tlb_inv_walk, |
572 | .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, | 589 | .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, |
573 | .tlb_sync = arm_smmu_tlb_sync_context, | 590 | .tlb_add_page = arm_smmu_tlb_add_page, |
591 | .tlb_sync = arm_smmu_tlb_sync_context, | ||
592 | }, | ||
593 | .tlb_inv_range = arm_smmu_tlb_inv_range_nosync, | ||
574 | }; | 594 | }; |
575 | 595 | ||
576 | static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = { | 596 | static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = { |
577 | .tlb_flush_all = arm_smmu_tlb_inv_context_s2, | 597 | .tlb = { |
578 | .tlb_flush_walk = arm_smmu_tlb_inv_walk, | 598 | .tlb_flush_all = arm_smmu_tlb_inv_context_s2, |
579 | .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, | 599 | .tlb_flush_walk = arm_smmu_tlb_inv_walk, |
580 | .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, | 600 | .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, |
581 | .tlb_sync = arm_smmu_tlb_sync_context, | 601 | .tlb_add_page = arm_smmu_tlb_add_page, |
602 | .tlb_sync = arm_smmu_tlb_sync_context, | ||
603 | }, | ||
604 | .tlb_inv_range = arm_smmu_tlb_inv_range_nosync, | ||
582 | }; | 605 | }; |
583 | 606 | ||
584 | static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = { | 607 | static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { |
585 | .tlb_flush_all = arm_smmu_tlb_inv_context_s2, | 608 | .tlb = { |
586 | .tlb_flush_walk = arm_smmu_tlb_inv_walk, | 609 | .tlb_flush_all = arm_smmu_tlb_inv_context_s2, |
587 | .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, | 610 | .tlb_flush_walk = arm_smmu_tlb_inv_walk, |
588 | .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync, | 611 | .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, |
589 | .tlb_sync = arm_smmu_tlb_sync_vmid, | 612 | .tlb_add_page = arm_smmu_tlb_add_page, |
613 | .tlb_sync = arm_smmu_tlb_sync_vmid, | ||
614 | }, | ||
615 | .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync, | ||
590 | }; | 616 | }; |
591 | 617 | ||
592 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | 618 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) |
@@ -866,7 +892,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
866 | ias = min(ias, 32UL); | 892 | ias = min(ias, 32UL); |
867 | oas = min(oas, 32UL); | 893 | oas = min(oas, 32UL); |
868 | } | 894 | } |
869 | smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops; | 895 | smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops; |
870 | break; | 896 | break; |
871 | case ARM_SMMU_DOMAIN_NESTED: | 897 | case ARM_SMMU_DOMAIN_NESTED: |
872 | /* | 898 | /* |
@@ -886,9 +912,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
886 | oas = min(oas, 40UL); | 912 | oas = min(oas, 40UL); |
887 | } | 913 | } |
888 | if (smmu->version == ARM_SMMU_V2) | 914 | if (smmu->version == ARM_SMMU_V2) |
889 | smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2; | 915 | smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2; |
890 | else | 916 | else |
891 | smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1; | 917 | smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1; |
892 | break; | 918 | break; |
893 | default: | 919 | default: |
894 | ret = -EINVAL; | 920 | ret = -EINVAL; |
@@ -917,7 +943,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
917 | .ias = ias, | 943 | .ias = ias, |
918 | .oas = oas, | 944 | .oas = oas, |
919 | .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK, | 945 | .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK, |
920 | .tlb = smmu_domain->tlb_ops, | 946 | .tlb = &smmu_domain->flush_ops->tlb, |
921 | .iommu_dev = smmu->dev, | 947 | .iommu_dev = smmu->dev, |
922 | }; | 948 | }; |
923 | 949 | ||
@@ -1346,9 +1372,9 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) | |||
1346 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | 1372 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
1347 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 1373 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
1348 | 1374 | ||
1349 | if (smmu_domain->tlb_ops) { | 1375 | if (smmu_domain->flush_ops) { |
1350 | arm_smmu_rpm_get(smmu); | 1376 | arm_smmu_rpm_get(smmu); |
1351 | smmu_domain->tlb_ops->tlb_flush_all(smmu_domain); | 1377 | smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain); |
1352 | arm_smmu_rpm_put(smmu); | 1378 | arm_smmu_rpm_put(smmu); |
1353 | } | 1379 | } |
1354 | } | 1380 | } |
@@ -1359,9 +1385,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain, | |||
1359 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | 1385 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
1360 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 1386 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
1361 | 1387 | ||
1362 | if (smmu_domain->tlb_ops) { | 1388 | if (smmu_domain->flush_ops) { |
1363 | arm_smmu_rpm_get(smmu); | 1389 | arm_smmu_rpm_get(smmu); |
1364 | smmu_domain->tlb_ops->tlb_sync(smmu_domain); | 1390 | smmu_domain->flush_ops->tlb.tlb_sync(smmu_domain); |
1365 | arm_smmu_rpm_put(smmu); | 1391 | arm_smmu_rpm_put(smmu); |
1366 | } | 1392 | } |
1367 | } | 1393 | } |
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 8d4914fe73bc..b3f975c95f76 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c | |||
@@ -584,7 +584,7 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, | |||
584 | return __arm_v7s_unmap(data, iova, size, 2, tablep); | 584 | return __arm_v7s_unmap(data, iova, size, 2, tablep); |
585 | } | 585 | } |
586 | 586 | ||
587 | io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); | 587 | io_pgtable_tlb_add_page(&data->iop, iova, size); |
588 | return size; | 588 | return size; |
589 | } | 589 | } |
590 | 590 | ||
@@ -647,8 +647,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, | |||
647 | */ | 647 | */ |
648 | smp_wmb(); | 648 | smp_wmb(); |
649 | } else { | 649 | } else { |
650 | io_pgtable_tlb_add_flush(iop, iova, blk_size, | 650 | io_pgtable_tlb_add_page(iop, iova, blk_size); |
651 | blk_size, true); | ||
652 | } | 651 | } |
653 | iova += blk_size; | 652 | iova += blk_size; |
654 | } | 653 | } |
@@ -809,10 +808,9 @@ static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule, | |||
809 | WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); | 808 | WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); |
810 | } | 809 | } |
811 | 810 | ||
812 | static void dummy_tlb_add_flush(unsigned long iova, size_t size, | 811 | static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie) |
813 | size_t granule, bool leaf, void *cookie) | ||
814 | { | 812 | { |
815 | dummy_tlb_flush(iova, size, granule, cookie); | 813 | dummy_tlb_flush(iova, granule, granule, cookie); |
816 | } | 814 | } |
817 | 815 | ||
818 | static void dummy_tlb_sync(void *cookie) | 816 | static void dummy_tlb_sync(void *cookie) |
@@ -824,7 +822,7 @@ static const struct iommu_flush_ops dummy_tlb_ops = { | |||
824 | .tlb_flush_all = dummy_tlb_flush_all, | 822 | .tlb_flush_all = dummy_tlb_flush_all, |
825 | .tlb_flush_walk = dummy_tlb_flush, | 823 | .tlb_flush_walk = dummy_tlb_flush, |
826 | .tlb_flush_leaf = dummy_tlb_flush, | 824 | .tlb_flush_leaf = dummy_tlb_flush, |
827 | .tlb_add_flush = dummy_tlb_add_flush, | 825 | .tlb_add_page = dummy_tlb_add_page, |
828 | .tlb_sync = dummy_tlb_sync, | 826 | .tlb_sync = dummy_tlb_sync, |
829 | }; | 827 | }; |
830 | 828 | ||
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index b58338c86323..a5c0db01533e 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c | |||
@@ -582,7 +582,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, | |||
582 | 582 | ||
583 | tablep = iopte_deref(pte, data); | 583 | tablep = iopte_deref(pte, data); |
584 | } else if (unmap_idx >= 0) { | 584 | } else if (unmap_idx >= 0) { |
585 | io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); | 585 | io_pgtable_tlb_add_page(&data->iop, iova, size); |
586 | return size; | 586 | return size; |
587 | } | 587 | } |
588 | 588 | ||
@@ -623,7 +623,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, | |||
623 | */ | 623 | */ |
624 | smp_wmb(); | 624 | smp_wmb(); |
625 | } else { | 625 | } else { |
626 | io_pgtable_tlb_add_flush(iop, iova, size, size, true); | 626 | io_pgtable_tlb_add_page(iop, iova, size); |
627 | } | 627 | } |
628 | 628 | ||
629 | return size; | 629 | return size; |
@@ -1075,10 +1075,9 @@ static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule, | |||
1075 | WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); | 1075 | WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); |
1076 | } | 1076 | } |
1077 | 1077 | ||
1078 | static void dummy_tlb_add_flush(unsigned long iova, size_t size, | 1078 | static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie) |
1079 | size_t granule, bool leaf, void *cookie) | ||
1080 | { | 1079 | { |
1081 | dummy_tlb_flush(iova, size, granule, cookie); | 1080 | dummy_tlb_flush(iova, granule, granule, cookie); |
1082 | } | 1081 | } |
1083 | 1082 | ||
1084 | static void dummy_tlb_sync(void *cookie) | 1083 | static void dummy_tlb_sync(void *cookie) |
@@ -1090,7 +1089,7 @@ static const struct iommu_flush_ops dummy_tlb_ops __initconst = { | |||
1090 | .tlb_flush_all = dummy_tlb_flush_all, | 1089 | .tlb_flush_all = dummy_tlb_flush_all, |
1091 | .tlb_flush_walk = dummy_tlb_flush, | 1090 | .tlb_flush_walk = dummy_tlb_flush, |
1092 | .tlb_flush_leaf = dummy_tlb_flush, | 1091 | .tlb_flush_leaf = dummy_tlb_flush, |
1093 | .tlb_add_flush = dummy_tlb_add_flush, | 1092 | .tlb_add_page = dummy_tlb_add_page, |
1094 | .tlb_sync = dummy_tlb_sync, | 1093 | .tlb_sync = dummy_tlb_sync, |
1095 | }; | 1094 | }; |
1096 | 1095 | ||
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 9cc7bcb7e39d..c4da271af90e 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c | |||
@@ -367,17 +367,10 @@ static void ipmmu_tlb_flush(unsigned long iova, size_t size, | |||
367 | ipmmu_tlb_flush_all(cookie); | 367 | ipmmu_tlb_flush_all(cookie); |
368 | } | 368 | } |
369 | 369 | ||
370 | static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, | ||
371 | size_t granule, bool leaf, void *cookie) | ||
372 | { | ||
373 | /* The hardware doesn't support selective TLB flush. */ | ||
374 | } | ||
375 | |||
376 | static const struct iommu_flush_ops ipmmu_flush_ops = { | 370 | static const struct iommu_flush_ops ipmmu_flush_ops = { |
377 | .tlb_flush_all = ipmmu_tlb_flush_all, | 371 | .tlb_flush_all = ipmmu_tlb_flush_all, |
378 | .tlb_flush_walk = ipmmu_tlb_flush, | 372 | .tlb_flush_walk = ipmmu_tlb_flush, |
379 | .tlb_flush_leaf = ipmmu_tlb_flush, | 373 | .tlb_flush_leaf = ipmmu_tlb_flush, |
380 | .tlb_add_flush = ipmmu_tlb_add_flush, | ||
381 | .tlb_sync = ipmmu_tlb_flush_all, | 374 | .tlb_sync = ipmmu_tlb_flush_all, |
382 | }; | 375 | }; |
383 | 376 | ||
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 64132093751a..2cd83295a841 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c | |||
@@ -192,11 +192,16 @@ static void __flush_iotlb_leaf(unsigned long iova, size_t size, | |||
192 | __flush_iotlb_sync(cookie); | 192 | __flush_iotlb_sync(cookie); |
193 | } | 193 | } |
194 | 194 | ||
195 | static void __flush_iotlb_page(unsigned long iova, size_t granule, void *cookie) | ||
196 | { | ||
197 | __flush_iotlb_range(iova, granule, granule, true, cookie); | ||
198 | } | ||
199 | |||
195 | static const struct iommu_flush_ops msm_iommu_flush_ops = { | 200 | static const struct iommu_flush_ops msm_iommu_flush_ops = { |
196 | .tlb_flush_all = __flush_iotlb, | 201 | .tlb_flush_all = __flush_iotlb, |
197 | .tlb_flush_walk = __flush_iotlb_walk, | 202 | .tlb_flush_walk = __flush_iotlb_walk, |
198 | .tlb_flush_leaf = __flush_iotlb_leaf, | 203 | .tlb_flush_leaf = __flush_iotlb_leaf, |
199 | .tlb_add_flush = __flush_iotlb_range, | 204 | .tlb_add_page = __flush_iotlb_page, |
200 | .tlb_sync = __flush_iotlb_sync, | 205 | .tlb_sync = __flush_iotlb_sync, |
201 | }; | 206 | }; |
202 | 207 | ||
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 85a7176bf9ae..a0b4b4dc4b90 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c | |||
@@ -202,11 +202,17 @@ static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size, | |||
202 | mtk_iommu_tlb_sync(cookie); | 202 | mtk_iommu_tlb_sync(cookie); |
203 | } | 203 | } |
204 | 204 | ||
205 | static void mtk_iommu_tlb_flush_page_nosync(unsigned long iova, size_t granule, | ||
206 | void *cookie) | ||
207 | { | ||
208 | mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie); | ||
209 | } | ||
210 | |||
205 | static const struct iommu_flush_ops mtk_iommu_flush_ops = { | 211 | static const struct iommu_flush_ops mtk_iommu_flush_ops = { |
206 | .tlb_flush_all = mtk_iommu_tlb_flush_all, | 212 | .tlb_flush_all = mtk_iommu_tlb_flush_all, |
207 | .tlb_flush_walk = mtk_iommu_tlb_flush_walk, | 213 | .tlb_flush_walk = mtk_iommu_tlb_flush_walk, |
208 | .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf, | 214 | .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf, |
209 | .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync, | 215 | .tlb_add_page = mtk_iommu_tlb_flush_page_nosync, |
210 | .tlb_sync = mtk_iommu_tlb_sync, | 216 | .tlb_sync = mtk_iommu_tlb_sync, |
211 | }; | 217 | }; |
212 | 218 | ||
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 643079e52e69..7d8411dee4cf 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c | |||
@@ -178,11 +178,17 @@ static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size, | |||
178 | qcom_iommu_tlb_sync(cookie); | 178 | qcom_iommu_tlb_sync(cookie); |
179 | } | 179 | } |
180 | 180 | ||
181 | static void qcom_iommu_tlb_add_page(unsigned long iova, size_t granule, | ||
182 | void *cookie) | ||
183 | { | ||
184 | qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); | ||
185 | } | ||
186 | |||
181 | static const struct iommu_flush_ops qcom_flush_ops = { | 187 | static const struct iommu_flush_ops qcom_flush_ops = { |
182 | .tlb_flush_all = qcom_iommu_tlb_inv_context, | 188 | .tlb_flush_all = qcom_iommu_tlb_inv_context, |
183 | .tlb_flush_walk = qcom_iommu_tlb_flush_walk, | 189 | .tlb_flush_walk = qcom_iommu_tlb_flush_walk, |
184 | .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf, | 190 | .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf, |
185 | .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync, | 191 | .tlb_add_page = qcom_iommu_tlb_add_page, |
186 | .tlb_sync = qcom_iommu_tlb_sync, | 192 | .tlb_sync = qcom_iommu_tlb_sync, |
187 | }; | 193 | }; |
188 | 194 | ||
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 0618aac59e74..99e04bd2baa1 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h | |||
@@ -25,12 +25,11 @@ enum io_pgtable_fmt { | |||
25 | * address range. | 25 | * address range. |
26 | * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual | 26 | * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual |
27 | * address range. | 27 | * address range. |
28 | * @tlb_add_flush: Optional callback to queue up leaf TLB invalidation for a | 28 | * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a |
29 | * virtual address range. This function exists purely as an | 29 | * single page. This function exists purely as an optimisation |
30 | * optimisation for IOMMUs that cannot batch TLB invalidation | 30 | * for IOMMUs that cannot batch TLB invalidation operations |
31 | * operations efficiently and are therefore better suited to | 31 | * efficiently and are therefore better suited to issuing them |
32 | * issuing them early rather than deferring them until | 32 | * early rather than deferring them until iommu_tlb_sync(). |
33 | * iommu_tlb_sync(). | ||
34 | * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and | 33 | * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and |
35 | * any corresponding page table updates are visible to the | 34 | * any corresponding page table updates are visible to the |
36 | * IOMMU. | 35 | * IOMMU. |
@@ -44,8 +43,7 @@ struct iommu_flush_ops { | |||
44 | void *cookie); | 43 | void *cookie); |
45 | void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule, | 44 | void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule, |
46 | void *cookie); | 45 | void *cookie); |
47 | void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, | 46 | void (*tlb_add_page)(unsigned long iova, size_t granule, void *cookie); |
48 | bool leaf, void *cookie); | ||
49 | void (*tlb_sync)(void *cookie); | 47 | void (*tlb_sync)(void *cookie); |
50 | }; | 48 | }; |
51 | 49 | ||
@@ -212,10 +210,12 @@ io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova, | |||
212 | iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie); | 210 | iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie); |
213 | } | 211 | } |
214 | 212 | ||
215 | static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, | 213 | static inline void |
216 | unsigned long iova, size_t size, size_t granule, bool leaf) | 214 | io_pgtable_tlb_add_page(struct io_pgtable *iop, unsigned long iova, |
215 | size_t granule) | ||
217 | { | 216 | { |
218 | iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); | 217 | if (iop->cfg.tlb->tlb_add_page) |
218 | iop->cfg.tlb->tlb_add_page(iova, granule, iop->cookie); | ||
219 | } | 219 | } |
220 | 220 | ||
221 | static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) | 221 | static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) |