aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeffy Chen <jeffy.chen@rock-chips.com>2018-03-23 03:38:13 -0400
committerJoerg Roedel <jroedel@suse.de>2018-03-29 06:22:28 -0400
commit0f181d3cf7d984b1af33d34557a8285852b0d3ec (patch)
treeb3b25569d9d99dedfa600cf5900ba8618367e496
parent4d88a8a4c345cd16f634df855148cfb2a59a204a (diff)
iommu/rockchip: Add runtime PM support
When the power domain is powered off, the IOMMU cannot be accessed and register programming must be deferred until the power domain becomes enabled. Add runtime PM support, and use runtime PM device link from IOMMU to master to enable and disable IOMMU. Signed-off-by: Jeffy Chen <jeffy.chen@rock-chips.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/rockchip-iommu.c181
1 files changed, 129 insertions, 52 deletions
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 0ce5e8a0658c..9f6f74689464 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -22,6 +22,7 @@
22#include <linux/of_iommu.h> 22#include <linux/of_iommu.h>
23#include <linux/of_platform.h> 23#include <linux/of_platform.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/pm_runtime.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <linux/spinlock.h> 27#include <linux/spinlock.h>
27 28
@@ -106,6 +107,7 @@ struct rk_iommu {
106}; 107};
107 108
108struct rk_iommudata { 109struct rk_iommudata {
110 struct device_link *link; /* runtime PM link from IOMMU to master */
109 struct rk_iommu *iommu; 111 struct rk_iommu *iommu;
110}; 112};
111 113
@@ -520,7 +522,11 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
520 irqreturn_t ret = IRQ_NONE; 522 irqreturn_t ret = IRQ_NONE;
521 int i; 523 int i;
522 524
523 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); 525 if (WARN_ON(!pm_runtime_get_if_in_use(iommu->dev)))
526 return 0;
527
528 if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
529 goto out;
524 530
525 for (i = 0; i < iommu->num_mmu; i++) { 531 for (i = 0; i < iommu->num_mmu; i++) {
526 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS); 532 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
@@ -570,6 +576,8 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
570 576
571 clk_bulk_disable(iommu->num_clocks, iommu->clocks); 577 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
572 578
579out:
580 pm_runtime_put(iommu->dev);
573 return ret; 581 return ret;
574} 582}
575 583
@@ -611,10 +619,17 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
611 spin_lock_irqsave(&rk_domain->iommus_lock, flags); 619 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
612 list_for_each(pos, &rk_domain->iommus) { 620 list_for_each(pos, &rk_domain->iommus) {
613 struct rk_iommu *iommu; 621 struct rk_iommu *iommu;
622
614 iommu = list_entry(pos, struct rk_iommu, node); 623 iommu = list_entry(pos, struct rk_iommu, node);
615 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); 624
616 rk_iommu_zap_lines(iommu, iova, size); 625 /* Only zap TLBs of IOMMUs that are powered on. */
617 clk_bulk_disable(iommu->num_clocks, iommu->clocks); 626 if (pm_runtime_get_if_in_use(iommu->dev)) {
627 WARN_ON(clk_bulk_enable(iommu->num_clocks,
628 iommu->clocks));
629 rk_iommu_zap_lines(iommu, iova, size);
630 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
631 pm_runtime_put(iommu->dev);
632 }
618 } 633 }
619 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); 634 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
620} 635}
@@ -817,22 +832,30 @@ static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
817 return data ? data->iommu : NULL; 832 return data ? data->iommu : NULL;
818} 833}
819 834
820static int rk_iommu_attach_device(struct iommu_domain *domain, 835/* Must be called with iommu powered on and attached */
821 struct device *dev) 836static void rk_iommu_disable(struct rk_iommu *iommu)
822{ 837{
823 struct rk_iommu *iommu; 838 int i;
839
840 /* Ignore error while disabling, just keep going */
841 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
842 rk_iommu_enable_stall(iommu);
843 rk_iommu_disable_paging(iommu);
844 for (i = 0; i < iommu->num_mmu; i++) {
845 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
846 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
847 }
848 rk_iommu_disable_stall(iommu);
849 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
850}
851
852/* Must be called with iommu powered on and attached */
853static int rk_iommu_enable(struct rk_iommu *iommu)
854{
855 struct iommu_domain *domain = iommu->domain;
824 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 856 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
825 unsigned long flags;
826 int ret, i; 857 int ret, i;
827 858
828 /*
829 * Allow 'virtual devices' (e.g., drm) to attach to domain.
830 * Such a device does not belong to an iommu group.
831 */
832 iommu = rk_iommu_from_dev(dev);
833 if (!iommu)
834 return 0;
835
836 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks); 859 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
837 if (ret) 860 if (ret)
838 return ret; 861 return ret;
@@ -845,8 +868,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
845 if (ret) 868 if (ret)
846 goto out_disable_stall; 869 goto out_disable_stall;
847 870
848 iommu->domain = domain;
849
850 for (i = 0; i < iommu->num_mmu; i++) { 871 for (i = 0; i < iommu->num_mmu; i++) {
851 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 872 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
852 rk_domain->dt_dma); 873 rk_domain->dt_dma);
@@ -855,14 +876,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
855 } 876 }
856 877
857 ret = rk_iommu_enable_paging(iommu); 878 ret = rk_iommu_enable_paging(iommu);
858 if (ret)
859 goto out_disable_stall;
860
861 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
862 list_add_tail(&iommu->node, &rk_domain->iommus);
863 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
864
865 dev_dbg(dev, "Attached to iommu domain\n");
866 879
867out_disable_stall: 880out_disable_stall:
868 rk_iommu_disable_stall(iommu); 881 rk_iommu_disable_stall(iommu);
@@ -877,31 +890,71 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
877 struct rk_iommu *iommu; 890 struct rk_iommu *iommu;
878 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 891 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
879 unsigned long flags; 892 unsigned long flags;
880 int i;
881 893
882 /* Allow 'virtual devices' (eg drm) to detach from domain */ 894 /* Allow 'virtual devices' (eg drm) to detach from domain */
883 iommu = rk_iommu_from_dev(dev); 895 iommu = rk_iommu_from_dev(dev);
884 if (!iommu) 896 if (!iommu)
885 return; 897 return;
886 898
899 dev_dbg(dev, "Detaching from iommu domain\n");
900
901 /* iommu already detached */
902 if (iommu->domain != domain)
903 return;
904
905 iommu->domain = NULL;
906
887 spin_lock_irqsave(&rk_domain->iommus_lock, flags); 907 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
888 list_del_init(&iommu->node); 908 list_del_init(&iommu->node);
889 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); 909 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
890 910
891 /* Ignore error while disabling, just keep going */ 911 if (pm_runtime_get_if_in_use(iommu->dev)) {
892 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); 912 rk_iommu_disable(iommu);
893 rk_iommu_enable_stall(iommu); 913 pm_runtime_put(iommu->dev);
894 rk_iommu_disable_paging(iommu);
895 for (i = 0; i < iommu->num_mmu; i++) {
896 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
897 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
898 } 914 }
899 rk_iommu_disable_stall(iommu); 915}
900 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
901 916
902 iommu->domain = NULL; 917static int rk_iommu_attach_device(struct iommu_domain *domain,
918 struct device *dev)
919{
920 struct rk_iommu *iommu;
921 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
922 unsigned long flags;
923 int ret;
903 924
904 dev_dbg(dev, "Detached from iommu domain\n"); 925 /*
926 * Allow 'virtual devices' (e.g., drm) to attach to domain.
927 * Such a device does not belong to an iommu group.
928 */
929 iommu = rk_iommu_from_dev(dev);
930 if (!iommu)
931 return 0;
932
933 dev_dbg(dev, "Attaching to iommu domain\n");
934
935 /* iommu already attached */
936 if (iommu->domain == domain)
937 return 0;
938
939 if (iommu->domain)
940 rk_iommu_detach_device(iommu->domain, dev);
941
942 iommu->domain = domain;
943
944 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
945 list_add_tail(&iommu->node, &rk_domain->iommus);
946 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
947
948 if (!pm_runtime_get_if_in_use(iommu->dev))
949 return 0;
950
951 ret = rk_iommu_enable(iommu);
952 if (ret)
953 rk_iommu_detach_device(iommu->domain, dev);
954
955 pm_runtime_put(iommu->dev);
956
957 return ret;
905} 958}
906 959
907static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) 960static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
@@ -989,17 +1042,21 @@ static int rk_iommu_add_device(struct device *dev)
989{ 1042{
990 struct iommu_group *group; 1043 struct iommu_group *group;
991 struct rk_iommu *iommu; 1044 struct rk_iommu *iommu;
1045 struct rk_iommudata *data;
992 1046
993 iommu = rk_iommu_from_dev(dev); 1047 data = dev->archdata.iommu;
994 if (!iommu) 1048 if (!data)
995 return -ENODEV; 1049 return -ENODEV;
996 1050
1051 iommu = rk_iommu_from_dev(dev);
1052
997 group = iommu_group_get_for_dev(dev); 1053 group = iommu_group_get_for_dev(dev);
998 if (IS_ERR(group)) 1054 if (IS_ERR(group))
999 return PTR_ERR(group); 1055 return PTR_ERR(group);
1000 iommu_group_put(group); 1056 iommu_group_put(group);
1001 1057
1002 iommu_device_link(&iommu->iommu, dev); 1058 iommu_device_link(&iommu->iommu, dev);
1059 data->link = device_link_add(dev, iommu->dev, DL_FLAG_PM_RUNTIME);
1003 1060
1004 return 0; 1061 return 0;
1005} 1062}
@@ -1007,9 +1064,11 @@ static int rk_iommu_add_device(struct device *dev)
1007static void rk_iommu_remove_device(struct device *dev) 1064static void rk_iommu_remove_device(struct device *dev)
1008{ 1065{
1009 struct rk_iommu *iommu; 1066 struct rk_iommu *iommu;
1067 struct rk_iommudata *data = dev->archdata.iommu;
1010 1068
1011 iommu = rk_iommu_from_dev(dev); 1069 iommu = rk_iommu_from_dev(dev);
1012 1070
1071 device_link_del(data->link);
1013 iommu_device_unlink(&iommu->iommu, dev); 1072 iommu_device_unlink(&iommu->iommu, dev);
1014 iommu_group_remove_device(dev); 1073 iommu_group_remove_device(dev);
1015} 1074}
@@ -1135,6 +1194,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
1135 1194
1136 bus_set_iommu(&platform_bus_type, &rk_iommu_ops); 1195 bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1137 1196
1197 pm_runtime_enable(dev);
1198
1138 return 0; 1199 return 0;
1139err_remove_sysfs: 1200err_remove_sysfs:
1140 iommu_device_sysfs_remove(&iommu->iommu); 1201 iommu_device_sysfs_remove(&iommu->iommu);
@@ -1145,21 +1206,36 @@ err_unprepare_clocks:
1145 1206
1146static void rk_iommu_shutdown(struct platform_device *pdev) 1207static void rk_iommu_shutdown(struct platform_device *pdev)
1147{ 1208{
1148 struct rk_iommu *iommu = platform_get_drvdata(pdev); 1209 pm_runtime_force_suspend(&pdev->dev);
1210}
1149 1211
1150 /* 1212static int __maybe_unused rk_iommu_suspend(struct device *dev)
1151 * Be careful not to try to shutdown an otherwise unused 1213{
1152 * IOMMU, as it is likely not to be clocked, and accessing it 1214 struct rk_iommu *iommu = dev_get_drvdata(dev);
1153 * would just block. An IOMMU without a domain is likely to be 1215
1154 * unused, so let's use this as a (weak) guard. 1216 if (!iommu->domain)
1155 */ 1217 return 0;
1156 if (iommu && iommu->domain) { 1218
1157 rk_iommu_enable_stall(iommu); 1219 rk_iommu_disable(iommu);
1158 rk_iommu_disable_paging(iommu); 1220 return 0;
1159 rk_iommu_force_reset(iommu); 1221}
1160 } 1222
1223static int __maybe_unused rk_iommu_resume(struct device *dev)
1224{
1225 struct rk_iommu *iommu = dev_get_drvdata(dev);
1226
1227 if (!iommu->domain)
1228 return 0;
1229
1230 return rk_iommu_enable(iommu);
1161} 1231}
1162 1232
1233static const struct dev_pm_ops rk_iommu_pm_ops = {
1234 SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
1235 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1236 pm_runtime_force_resume)
1237};
1238
1163static const struct of_device_id rk_iommu_dt_ids[] = { 1239static const struct of_device_id rk_iommu_dt_ids[] = {
1164 { .compatible = "rockchip,iommu" }, 1240 { .compatible = "rockchip,iommu" },
1165 { /* sentinel */ } 1241 { /* sentinel */ }
@@ -1172,6 +1248,7 @@ static struct platform_driver rk_iommu_driver = {
1172 .driver = { 1248 .driver = {
1173 .name = "rk_iommu", 1249 .name = "rk_iommu",
1174 .of_match_table = rk_iommu_dt_ids, 1250 .of_match_table = rk_iommu_dt_ids,
1251 .pm = &rk_iommu_pm_ops,
1175 .suppress_bind_attrs = true, 1252 .suppress_bind_attrs = true,
1176 }, 1253 },
1177}; 1254};