aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRob Clark <robdclark@gmail.com>2014-07-09 22:08:15 -0400
committerRob Clark <robdclark@gmail.com>2014-08-04 11:55:29 -0400
commit944fc36c31ed685cf8d3d125eb681ae7198f06fc (patch)
treed5f05d582f1c18e9325ed2fd1b591f036d1edea4
parent1c4997fe4157c4d715cead67fca1a5085991ac0f (diff)
drm/msm: use upstream iommu
Downstream kernel IOMMU had a non-standard way of dealing with multiple devices and multiple ports/contexts. We don't need that on upstream kernel, so rip out the crazy. Note that we have to move the pinning of the ringbuffer to after the IOMMU is attached. No idea how that managed to work properly on the downstream kernel. For now, I am leaving the IOMMU port name stuff in place, to simplify things for folks trying to backport latest drm/msm to device kernels. Once we no longer have to care about pre-DT kernels, we can drop this and instead backport upstream IOMMU driver. Signed-off-by: Rob Clark <robdclark@gmail.com>
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c31
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h8
7 files changed, 18 insertions, 43 deletions
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index f12388967856..c99c50de3226 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -2,7 +2,6 @@
2config DRM_MSM 2config DRM_MSM
3 tristate "MSM DRM" 3 tristate "MSM DRM"
4 depends on DRM 4 depends on DRM
5 depends on MSM_IOMMU
6 depends on ARCH_QCOM || (ARM && COMPILE_TEST) 5 depends on ARCH_QCOM || (ARM && COMPILE_TEST)
7 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
8 select SHMEM 7 select SHMEM
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 28ca8cd8b09e..76c1df73e747 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -91,9 +91,17 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
91int adreno_hw_init(struct msm_gpu *gpu) 91int adreno_hw_init(struct msm_gpu *gpu)
92{ 92{
93 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 93 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
94 int ret;
94 95
95 DBG("%s", gpu->name); 96 DBG("%s", gpu->name);
96 97
98 ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
99 if (ret) {
100 gpu->rb_iova = 0;
101 dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
102 return ret;
103 }
104
97 /* Setup REG_CP_RB_CNTL: */ 105 /* Setup REG_CP_RB_CNTL: */
98 gpu_write(gpu, REG_AXXX_CP_RB_CNTL, 106 gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
99 /* size is log2(quad-words): */ 107 /* size is log2(quad-words): */
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index a9579ce2ffb9..733646c0d3f8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -361,7 +361,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
361 mdelay(16); 361 mdelay(16);
362 362
363 if (config->iommu) { 363 if (config->iommu) {
364 mmu = msm_iommu_new(dev, config->iommu); 364 mmu = msm_iommu_new(&pdev->dev, config->iommu);
365 if (IS_ERR(mmu)) { 365 if (IS_ERR(mmu)) {
366 ret = PTR_ERR(mmu); 366 ret = PTR_ERR(mmu);
367 goto fail; 367 goto fail;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 33b826dc493d..17175b5035b3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -320,7 +320,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
320 mdelay(16); 320 mdelay(16);
321 321
322 if (config->iommu) { 322 if (config->iommu) {
323 mmu = msm_iommu_new(dev, config->iommu); 323 mmu = msm_iommu_new(&pdev->dev, config->iommu);
324 if (IS_ERR(mmu)) { 324 if (IS_ERR(mmu)) {
325 ret = PTR_ERR(mmu); 325 ret = PTR_ERR(mmu);
326 dev_err(dev->dev, "failed to init iommu: %d\n", ret); 326 dev_err(dev->dev, "failed to init iommu: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index c6322197db8c..915240b4b80a 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -606,7 +606,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
606 iommu = iommu_domain_alloc(&platform_bus_type); 606 iommu = iommu_domain_alloc(&platform_bus_type);
607 if (iommu) { 607 if (iommu) {
608 dev_info(drm->dev, "%s: using IOMMU\n", name); 608 dev_info(drm->dev, "%s: using IOMMU\n", name);
609 gpu->mmu = msm_iommu_new(drm, iommu); 609 gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
610 } else { 610 } else {
611 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); 611 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
612 } 612 }
@@ -621,13 +621,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
621 goto fail; 621 goto fail;
622 } 622 }
623 623
624 ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
625 if (ret) {
626 gpu->rb_iova = 0;
627 dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
628 goto fail;
629 }
630
631 bs_init(gpu); 624 bs_init(gpu);
632 625
633 return 0; 626 return 0;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 4b2ad9181edf..099af483fdf0 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -33,39 +33,14 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
33 33
34static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt) 34static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
35{ 35{
36 struct drm_device *dev = mmu->dev;
37 struct msm_iommu *iommu = to_msm_iommu(mmu); 36 struct msm_iommu *iommu = to_msm_iommu(mmu);
38 int i, ret; 37 return iommu_attach_device(iommu->domain, mmu->dev);
39
40 for (i = 0; i < cnt; i++) {
41 struct device *msm_iommu_get_ctx(const char *ctx_name);
42 struct device *ctx = msm_iommu_get_ctx(names[i]);
43 if (IS_ERR_OR_NULL(ctx)) {
44 dev_warn(dev->dev, "couldn't get %s context", names[i]);
45 continue;
46 }
47 ret = iommu_attach_device(iommu->domain, ctx);
48 if (ret) {
49 dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
50 return ret;
51 }
52 }
53
54 return 0;
55} 38}
56 39
57static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt) 40static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
58{ 41{
59 struct msm_iommu *iommu = to_msm_iommu(mmu); 42 struct msm_iommu *iommu = to_msm_iommu(mmu);
60 int i; 43 iommu_detach_device(iommu->domain, mmu->dev);
61
62 for (i = 0; i < cnt; i++) {
63 struct device *msm_iommu_get_ctx(const char *ctx_name);
64 struct device *ctx = msm_iommu_get_ctx(names[i]);
65 if (IS_ERR_OR_NULL(ctx))
66 continue;
67 iommu_detach_device(iommu->domain, ctx);
68 }
69} 44}
70 45
71static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, 46static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
@@ -149,7 +124,7 @@ static const struct msm_mmu_funcs funcs = {
149 .destroy = msm_iommu_destroy, 124 .destroy = msm_iommu_destroy,
150}; 125};
151 126
152struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain) 127struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
153{ 128{
154 struct msm_iommu *iommu; 129 struct msm_iommu *iommu;
155 130
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 21da6d154f71..7cd88d9dc155 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -32,17 +32,17 @@ struct msm_mmu_funcs {
32 32
33struct msm_mmu { 33struct msm_mmu {
34 const struct msm_mmu_funcs *funcs; 34 const struct msm_mmu_funcs *funcs;
35 struct drm_device *dev; 35 struct device *dev;
36}; 36};
37 37
38static inline void msm_mmu_init(struct msm_mmu *mmu, struct drm_device *dev, 38static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
39 const struct msm_mmu_funcs *funcs) 39 const struct msm_mmu_funcs *funcs)
40{ 40{
41 mmu->dev = dev; 41 mmu->dev = dev;
42 mmu->funcs = funcs; 42 mmu->funcs = funcs;
43} 43}
44 44
45struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain); 45struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
46struct msm_mmu *msm_gpummu_new(struct drm_device *dev, struct msm_gpu *gpu); 46struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
47 47
48#endif /* __MSM_MMU_H__ */ 48#endif /* __MSM_MMU_H__ */