aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTomasz Figa <tfiga@chromium.org>2018-03-23 03:38:08 -0400
committerJoerg Roedel <jroedel@suse.de>2018-03-29 06:22:26 -0400
commitf2e3a5f557ad27f6a6f447717090a39cea238d6a (patch)
tree9566310cadbebebfe40ea424e3d0712a088a2009
parentbf2a5e717a47d042ca5f9f438cf186633f23033b (diff)
iommu/rockchip: Control clocks needed to access the IOMMU
Current code relies on master driver enabling necessary clocks before IOMMU is accessed, however there are cases when the IOMMU should be accessed while the master is not running yet, for example allocating V4L2 videobuf2 buffers, which is done by the VB2 framework using DMA mapping API and doesn't engage the master driver at all. This patch fixes the problem by letting clocks needed for IOMMU operation to be listed in Device Tree and making the driver enable them for the time of accessing the hardware. Signed-off-by: Tomasz Figa <tfiga@chromium.org> Signed-off-by: Jeffy Chen <jeffy.chen@rock-chips.com> Acked-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/rockchip-iommu.c50
1 files changed, 47 insertions, 3 deletions
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index c3df53e480ea..50ab9fd4eeb0 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -4,6 +4,7 @@
4 * published by the Free Software Foundation. 4 * published by the Free Software Foundation.
5 */ 5 */
6 6
7#include <linux/clk.h>
7#include <linux/compiler.h> 8#include <linux/compiler.h>
8#include <linux/delay.h> 9#include <linux/delay.h>
9#include <linux/device.h> 10#include <linux/device.h>
@@ -87,10 +88,17 @@ struct rk_iommu_domain {
87 struct iommu_domain domain; 88 struct iommu_domain domain;
88}; 89};
89 90
91/* list of clocks required by IOMMU */
92static const char * const rk_iommu_clocks[] = {
93 "aclk", "iface",
94};
95
90struct rk_iommu { 96struct rk_iommu {
91 struct device *dev; 97 struct device *dev;
92 void __iomem **bases; 98 void __iomem **bases;
93 int num_mmu; 99 int num_mmu;
100 struct clk_bulk_data *clocks;
101 int num_clocks;
94 bool reset_disabled; 102 bool reset_disabled;
95 struct iommu_device iommu; 103 struct iommu_device iommu;
96 struct list_head node; /* entry in rk_iommu_domain.iommus */ 104 struct list_head node; /* entry in rk_iommu_domain.iommus */
@@ -506,6 +514,8 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
506 irqreturn_t ret = IRQ_NONE; 514 irqreturn_t ret = IRQ_NONE;
507 int i; 515 int i;
508 516
517 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
518
509 for (i = 0; i < iommu->num_mmu; i++) { 519 for (i = 0; i < iommu->num_mmu; i++) {
510 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS); 520 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
511 if (int_status == 0) 521 if (int_status == 0)
@@ -552,6 +562,8 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
552 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status); 562 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
553 } 563 }
554 564
565 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
566
555 return ret; 567 return ret;
556} 568}
557 569
@@ -594,7 +606,9 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
594 list_for_each(pos, &rk_domain->iommus) { 606 list_for_each(pos, &rk_domain->iommus) {
595 struct rk_iommu *iommu; 607 struct rk_iommu *iommu;
596 iommu = list_entry(pos, struct rk_iommu, node); 608 iommu = list_entry(pos, struct rk_iommu, node);
609 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
597 rk_iommu_zap_lines(iommu, iova, size); 610 rk_iommu_zap_lines(iommu, iova, size);
611 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
598 } 612 }
599 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); 613 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
600} 614}
@@ -823,10 +837,14 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
823 if (!iommu) 837 if (!iommu)
824 return 0; 838 return 0;
825 839
826 ret = rk_iommu_enable_stall(iommu); 840 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
827 if (ret) 841 if (ret)
828 return ret; 842 return ret;
829 843
844 ret = rk_iommu_enable_stall(iommu);
845 if (ret)
846 goto out_disable_clocks;
847
830 ret = rk_iommu_force_reset(iommu); 848 ret = rk_iommu_force_reset(iommu);
831 if (ret) 849 if (ret)
832 goto out_disable_stall; 850 goto out_disable_stall;
@@ -852,6 +870,8 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
852 870
853out_disable_stall: 871out_disable_stall:
854 rk_iommu_disable_stall(iommu); 872 rk_iommu_disable_stall(iommu);
873out_disable_clocks:
874 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
855 return ret; 875 return ret;
856} 876}
857 877
@@ -873,6 +893,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
873 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); 893 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
874 894
875 /* Ignore error while disabling, just keep going */ 895 /* Ignore error while disabling, just keep going */
896 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
876 rk_iommu_enable_stall(iommu); 897 rk_iommu_enable_stall(iommu);
877 rk_iommu_disable_paging(iommu); 898 rk_iommu_disable_paging(iommu);
878 for (i = 0; i < iommu->num_mmu; i++) { 899 for (i = 0; i < iommu->num_mmu; i++) {
@@ -880,6 +901,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
880 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0); 901 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
881 } 902 }
882 rk_iommu_disable_stall(iommu); 903 rk_iommu_disable_stall(iommu);
904 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
883 905
884 iommu->domain = NULL; 906 iommu->domain = NULL;
885 907
@@ -1172,15 +1194,37 @@ static int rk_iommu_probe(struct platform_device *pdev)
1172 iommu->reset_disabled = device_property_read_bool(dev, 1194 iommu->reset_disabled = device_property_read_bool(dev,
1173 "rockchip,disable-mmu-reset"); 1195 "rockchip,disable-mmu-reset");
1174 1196
1175 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); 1197 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1198 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1199 sizeof(*iommu->clocks), GFP_KERNEL);
1200 if (!iommu->clocks)
1201 return -ENOMEM;
1202
1203 for (i = 0; i < iommu->num_clocks; ++i)
1204 iommu->clocks[i].id = rk_iommu_clocks[i];
1205
1206 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
1207 if (err)
1208 return err;
1209
1210 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1176 if (err) 1211 if (err)
1177 return err; 1212 return err;
1178 1213
1214 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1215 if (err)
1216 goto err_unprepare_clocks;
1217
1179 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops); 1218 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
1180 err = iommu_device_register(&iommu->iommu); 1219 err = iommu_device_register(&iommu->iommu);
1181 if (err) 1220 if (err)
1182 iommu_device_sysfs_remove(&iommu->iommu); 1221 goto err_remove_sysfs;
1183 1222
1223 return 0;
1224err_remove_sysfs:
1225 iommu_device_sysfs_remove(&iommu->iommu);
1226err_unprepare_clocks:
1227 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
1184 return err; 1228 return err;
1185} 1229}
1186 1230