diff options
author | Dave Airlie <airlied@redhat.com> | 2018-10-08 02:45:56 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-10-10 02:49:07 -0400 |
commit | d995052cade4f4e2700f6e2045cd5db400986b17 (patch) | |
tree | b625aee2515ac748e2df3240c7c33e0cd9bf52b4 /drivers/gpu | |
parent | 6952e3a1dffcb931cf8625aa01642b9afac2af61 (diff) | |
parent | 3ce36b4542b585ed0231b175aee31020b2f289c2 (diff) |
Merge tag 'drm-msm-next-2018-10-07' of git://people.freedesktop.org/~robclark/linux into drm-next
This time mostly further refinement of dpu1+a6xx for sdm845 and
beyond.. and hurray for more negative diffstat :-)
- Misc cleanups and fixes
- GPU preemption optimization
- a6xx perf improvements and clock fixes (ie. lets actually not run at
minimum clks)
- a6xx devfreq/DCVS
- Lots of code cleanup across dpu (Bruce, Jeykumar, Sean)
- Fixed a few crashes on startup relating to dsi (Sean)
- Add cursor support (Sravanthi, Sean)
- Properly free mdss irq on destroy (Jordan)
- Use correct encoder_type when initializing, fixes crash on boot (Stephen)
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGsNevCzMiLuNW1EVN6gtP3JZSir6PfnWvnCavSZM+bUFQ@mail.gmail.com
Diffstat (limited to 'drivers/gpu')
60 files changed, 1903 insertions, 3240 deletions
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 261fa79d456d..19ab521d4c3a 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile | |||
@@ -58,7 +58,6 @@ msm-y := \ | |||
58 | disp/dpu1/dpu_formats.o \ | 58 | disp/dpu1/dpu_formats.o \ |
59 | disp/dpu1/dpu_hw_blk.o \ | 59 | disp/dpu1/dpu_hw_blk.o \ |
60 | disp/dpu1/dpu_hw_catalog.o \ | 60 | disp/dpu1/dpu_hw_catalog.o \ |
61 | disp/dpu1/dpu_hw_cdm.o \ | ||
62 | disp/dpu1/dpu_hw_ctl.o \ | 61 | disp/dpu1/dpu_hw_ctl.o \ |
63 | disp/dpu1/dpu_hw_interrupts.o \ | 62 | disp/dpu1/dpu_hw_interrupts.o \ |
64 | disp/dpu1/dpu_hw_intf.o \ | 63 | disp/dpu1/dpu_hw_intf.o \ |
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h index 4bff0a740c7d..12b0ba270b5e 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h | |||
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are: | |||
12 | - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) | 12 | - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) |
13 | - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13) | 13 | - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13) |
14 | - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13) | 14 | - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13) |
15 | - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45) | 15 | - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37) |
16 | - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) | 16 | - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) |
17 | - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) | 17 | - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) |
18 | - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45) | 18 | - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37) |
19 | - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45) | 19 | - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42) |
20 | - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13) | 20 | - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07) |
21 | - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) | 21 | - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) |
22 | 22 | ||
23 | Copyright (C) 2013-2018 by the following authors: | 23 | Copyright (C) 2013-2018 by the following authors: |
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h index 645a19aef399..a89f7bb8b5cc 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h | |||
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are: | |||
12 | - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) | 12 | - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) |
13 | - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13) | 13 | - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13) |
14 | - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13) | 14 | - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13) |
15 | - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45) | 15 | - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37) |
16 | - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) | 16 | - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) |
17 | - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) | 17 | - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) |
18 | - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45) | 18 | - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37) |
19 | - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45) | 19 | - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42) |
20 | - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13) | 20 | - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07) |
21 | - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) | 21 | - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) |
22 | 22 | ||
23 | Copyright (C) 2013-2018 by the following authors: | 23 | Copyright (C) 2013-2018 by the following authors: |
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h index 19565e87aa7b..858690f52854 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h | |||
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are: | |||
12 | - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) | 12 | - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) |
13 | - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13) | 13 | - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13) |
14 | - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13) | 14 | - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13) |
15 | - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45) | 15 | - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37) |
16 | - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) | 16 | - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) |
17 | - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) | 17 | - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) |
18 | - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45) | 18 | - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37) |
19 | - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45) | 19 | - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42) |
20 | - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13) | 20 | - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07) |
21 | - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) | 21 | - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) |
22 | 22 | ||
23 | Copyright (C) 2013-2018 by the following authors: | 23 | Copyright (C) 2013-2018 by the following authors: |
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h index 182d37ff3794..b4944cc0e62f 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h | |||
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are: | |||
12 | - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) | 12 | - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) |
13 | - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13) | 13 | - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13) |
14 | - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13) | 14 | - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13) |
15 | - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45) | 15 | - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37) |
16 | - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) | 16 | - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) |
17 | - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) | 17 | - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) |
18 | - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45) | 18 | - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37) |
19 | - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45) | 19 | - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42) |
20 | - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13) | 20 | - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07) |
21 | - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) | 21 | - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) |
22 | 22 | ||
23 | Copyright (C) 2013-2018 by the following authors: | 23 | Copyright (C) 2013-2018 by the following authors: |
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c index 059ec7d394d0..d2127b1c4ece 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c | |||
@@ -132,14 +132,14 @@ reset_set(void *data, u64 val) | |||
132 | if (a5xx_gpu->pm4_bo) { | 132 | if (a5xx_gpu->pm4_bo) { |
133 | if (a5xx_gpu->pm4_iova) | 133 | if (a5xx_gpu->pm4_iova) |
134 | msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); | 134 | msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); |
135 | drm_gem_object_unreference(a5xx_gpu->pm4_bo); | 135 | drm_gem_object_put(a5xx_gpu->pm4_bo); |
136 | a5xx_gpu->pm4_bo = NULL; | 136 | a5xx_gpu->pm4_bo = NULL; |
137 | } | 137 | } |
138 | 138 | ||
139 | if (a5xx_gpu->pfp_bo) { | 139 | if (a5xx_gpu->pfp_bo) { |
140 | if (a5xx_gpu->pfp_iova) | 140 | if (a5xx_gpu->pfp_iova) |
141 | msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace); | 141 | msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace); |
142 | drm_gem_object_unreference(a5xx_gpu->pfp_bo); | 142 | drm_gem_object_put(a5xx_gpu->pfp_bo); |
143 | a5xx_gpu->pfp_bo = NULL; | 143 | a5xx_gpu->pfp_bo = NULL; |
144 | } | 144 | } |
145 | 145 | ||
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index ab1d9308c311..eabe9252ae1e 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c | |||
@@ -1234,7 +1234,7 @@ static void a5xx_crashdumper_free(struct msm_gpu *gpu, | |||
1234 | msm_gem_put_iova(dumper->bo, gpu->aspace); | 1234 | msm_gem_put_iova(dumper->bo, gpu->aspace); |
1235 | msm_gem_put_vaddr(dumper->bo); | 1235 | msm_gem_put_vaddr(dumper->bo); |
1236 | 1236 | ||
1237 | drm_gem_object_unreference(dumper->bo); | 1237 | drm_gem_object_put(dumper->bo); |
1238 | } | 1238 | } |
1239 | 1239 | ||
1240 | static int a5xx_crashdumper_run(struct msm_gpu *gpu, | 1240 | static int a5xx_crashdumper_run(struct msm_gpu *gpu, |
@@ -1436,12 +1436,22 @@ static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu) | |||
1436 | return a5xx_gpu->cur_ring; | 1436 | return a5xx_gpu->cur_ring; |
1437 | } | 1437 | } |
1438 | 1438 | ||
1439 | static int a5xx_gpu_busy(struct msm_gpu *gpu, uint64_t *value) | 1439 | static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu) |
1440 | { | 1440 | { |
1441 | *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO, | 1441 | u64 busy_cycles, busy_time; |
1442 | REG_A5XX_RBBM_PERFCTR_RBBM_0_HI); | ||
1443 | 1442 | ||
1444 | return 0; | 1443 | busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO, |
1444 | REG_A5XX_RBBM_PERFCTR_RBBM_0_HI); | ||
1445 | |||
1446 | busy_time = (busy_cycles - gpu->devfreq.busy_cycles); | ||
1447 | do_div(busy_time, (clk_get_rate(gpu->core_clk) / 1000000)); | ||
1448 | |||
1449 | gpu->devfreq.busy_cycles = busy_cycles; | ||
1450 | |||
1451 | if (WARN_ON(busy_time > ~0LU)) | ||
1452 | return ~0LU; | ||
1453 | |||
1454 | return (unsigned long)busy_time; | ||
1445 | } | 1455 | } |
1446 | 1456 | ||
1447 | static const struct adreno_gpu_funcs funcs = { | 1457 | static const struct adreno_gpu_funcs funcs = { |
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index e9c0e56dbec0..7a41e1c147e4 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c | |||
@@ -323,7 +323,7 @@ err: | |||
323 | if (a5xx_gpu->gpmu_iova) | 323 | if (a5xx_gpu->gpmu_iova) |
324 | msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace); | 324 | msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace); |
325 | if (a5xx_gpu->gpmu_bo) | 325 | if (a5xx_gpu->gpmu_bo) |
326 | drm_gem_object_unreference(a5xx_gpu->gpmu_bo); | 326 | drm_gem_object_put(a5xx_gpu->gpmu_bo); |
327 | 327 | ||
328 | a5xx_gpu->gpmu_bo = NULL; | 328 | a5xx_gpu->gpmu_bo = NULL; |
329 | a5xx_gpu->gpmu_iova = 0; | 329 | a5xx_gpu->gpmu_iova = 0; |
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index 970c7963ae29..4c357ead1be6 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c | |||
@@ -208,6 +208,13 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu) | |||
208 | struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); | 208 | struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); |
209 | int i; | 209 | int i; |
210 | 210 | ||
211 | /* Always come up on rb 0 */ | ||
212 | a5xx_gpu->cur_ring = gpu->rb[0]; | ||
213 | |||
214 | /* No preemption if we only have one ring */ | ||
215 | if (gpu->nr_rings == 1) | ||
216 | return; | ||
217 | |||
211 | for (i = 0; i < gpu->nr_rings; i++) { | 218 | for (i = 0; i < gpu->nr_rings; i++) { |
212 | a5xx_gpu->preempt[i]->wptr = 0; | 219 | a5xx_gpu->preempt[i]->wptr = 0; |
213 | a5xx_gpu->preempt[i]->rptr = 0; | 220 | a5xx_gpu->preempt[i]->rptr = 0; |
@@ -220,9 +227,6 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu) | |||
220 | 227 | ||
221 | /* Reset the preemption state */ | 228 | /* Reset the preemption state */ |
222 | set_preempt_state(a5xx_gpu, PREEMPT_NONE); | 229 | set_preempt_state(a5xx_gpu, PREEMPT_NONE); |
223 | |||
224 | /* Always come up on rb 0 */ | ||
225 | a5xx_gpu->cur_ring = gpu->rb[0]; | ||
226 | } | 230 | } |
227 | 231 | ||
228 | static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, | 232 | static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, |
@@ -272,7 +276,7 @@ void a5xx_preempt_fini(struct msm_gpu *gpu) | |||
272 | if (a5xx_gpu->preempt_iova[i]) | 276 | if (a5xx_gpu->preempt_iova[i]) |
273 | msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace); | 277 | msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace); |
274 | 278 | ||
275 | drm_gem_object_unreference(a5xx_gpu->preempt_bo[i]); | 279 | drm_gem_object_put(a5xx_gpu->preempt_bo[i]); |
276 | a5xx_gpu->preempt_bo[i] = NULL; | 280 | a5xx_gpu->preempt_bo[i] = NULL; |
277 | } | 281 | } |
278 | } | 282 | } |
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h index 87eab51f7000..a6f7c40454a6 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h | |||
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are: | |||
12 | - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) | 12 | - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) |
13 | - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13) | 13 | - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13) |
14 | - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13) | 14 | - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13) |
15 | - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45) | 15 | - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37) |
16 | - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) | 16 | - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) |
17 | - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) | 17 | - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) |
18 | - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45) | 18 | - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37) |
19 | - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45) | 19 | - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42) |
20 | - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13) | 20 | - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07) |
21 | - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) | 21 | - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) |
22 | 22 | ||
23 | Copyright (C) 2013-2018 by the following authors: | 23 | Copyright (C) 2013-2018 by the following authors: |
@@ -268,8 +268,687 @@ enum a6xx_depth_format { | |||
268 | DEPTH6_32 = 4, | 268 | DEPTH6_32 = 4, |
269 | }; | 269 | }; |
270 | 270 | ||
271 | enum a6xx_shader_id { | ||
272 | A6XX_TP0_TMO_DATA = 9, | ||
273 | A6XX_TP0_SMO_DATA = 10, | ||
274 | A6XX_TP0_MIPMAP_BASE_DATA = 11, | ||
275 | A6XX_TP1_TMO_DATA = 25, | ||
276 | A6XX_TP1_SMO_DATA = 26, | ||
277 | A6XX_TP1_MIPMAP_BASE_DATA = 27, | ||
278 | A6XX_SP_INST_DATA = 41, | ||
279 | A6XX_SP_LB_0_DATA = 42, | ||
280 | A6XX_SP_LB_1_DATA = 43, | ||
281 | A6XX_SP_LB_2_DATA = 44, | ||
282 | A6XX_SP_LB_3_DATA = 45, | ||
283 | A6XX_SP_LB_4_DATA = 46, | ||
284 | A6XX_SP_LB_5_DATA = 47, | ||
285 | A6XX_SP_CB_BINDLESS_DATA = 48, | ||
286 | A6XX_SP_CB_LEGACY_DATA = 49, | ||
287 | A6XX_SP_UAV_DATA = 50, | ||
288 | A6XX_SP_INST_TAG = 51, | ||
289 | A6XX_SP_CB_BINDLESS_TAG = 52, | ||
290 | A6XX_SP_TMO_UMO_TAG = 53, | ||
291 | A6XX_SP_SMO_TAG = 54, | ||
292 | A6XX_SP_STATE_DATA = 55, | ||
293 | A6XX_HLSQ_CHUNK_CVS_RAM = 73, | ||
294 | A6XX_HLSQ_CHUNK_CPS_RAM = 74, | ||
295 | A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 75, | ||
296 | A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 76, | ||
297 | A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 77, | ||
298 | A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 78, | ||
299 | A6XX_HLSQ_CVS_MISC_RAM = 80, | ||
300 | A6XX_HLSQ_CPS_MISC_RAM = 81, | ||
301 | A6XX_HLSQ_INST_RAM = 82, | ||
302 | A6XX_HLSQ_GFX_CVS_CONST_RAM = 83, | ||
303 | A6XX_HLSQ_GFX_CPS_CONST_RAM = 84, | ||
304 | A6XX_HLSQ_CVS_MISC_RAM_TAG = 85, | ||
305 | A6XX_HLSQ_CPS_MISC_RAM_TAG = 86, | ||
306 | A6XX_HLSQ_INST_RAM_TAG = 87, | ||
307 | A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 88, | ||
308 | A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 89, | ||
309 | A6XX_HLSQ_PWR_REST_RAM = 90, | ||
310 | A6XX_HLSQ_PWR_REST_TAG = 91, | ||
311 | A6XX_HLSQ_DATAPATH_META = 96, | ||
312 | A6XX_HLSQ_FRONTEND_META = 97, | ||
313 | A6XX_HLSQ_INDIRECT_META = 98, | ||
314 | A6XX_HLSQ_BACKEND_META = 99, | ||
315 | }; | ||
316 | |||
317 | enum a6xx_debugbus_id { | ||
318 | A6XX_DBGBUS_CP = 1, | ||
319 | A6XX_DBGBUS_RBBM = 2, | ||
320 | A6XX_DBGBUS_VBIF = 3, | ||
321 | A6XX_DBGBUS_HLSQ = 4, | ||
322 | A6XX_DBGBUS_UCHE = 5, | ||
323 | A6XX_DBGBUS_DPM = 6, | ||
324 | A6XX_DBGBUS_TESS = 7, | ||
325 | A6XX_DBGBUS_PC = 8, | ||
326 | A6XX_DBGBUS_VFDP = 9, | ||
327 | A6XX_DBGBUS_VPC = 10, | ||
328 | A6XX_DBGBUS_TSE = 11, | ||
329 | A6XX_DBGBUS_RAS = 12, | ||
330 | A6XX_DBGBUS_VSC = 13, | ||
331 | A6XX_DBGBUS_COM = 14, | ||
332 | A6XX_DBGBUS_LRZ = 16, | ||
333 | A6XX_DBGBUS_A2D = 17, | ||
334 | A6XX_DBGBUS_CCUFCHE = 18, | ||
335 | A6XX_DBGBUS_GMU_CX = 19, | ||
336 | A6XX_DBGBUS_RBP = 20, | ||
337 | A6XX_DBGBUS_DCS = 21, | ||
338 | A6XX_DBGBUS_DBGC = 22, | ||
339 | A6XX_DBGBUS_CX = 23, | ||
340 | A6XX_DBGBUS_GMU_GX = 24, | ||
341 | A6XX_DBGBUS_TPFCHE = 25, | ||
342 | A6XX_DBGBUS_GBIF_GX = 26, | ||
343 | A6XX_DBGBUS_GPC = 29, | ||
344 | A6XX_DBGBUS_LARC = 30, | ||
345 | A6XX_DBGBUS_HLSQ_SPTP = 31, | ||
346 | A6XX_DBGBUS_RB_0 = 32, | ||
347 | A6XX_DBGBUS_RB_1 = 33, | ||
348 | A6XX_DBGBUS_UCHE_WRAPPER = 36, | ||
349 | A6XX_DBGBUS_CCU_0 = 40, | ||
350 | A6XX_DBGBUS_CCU_1 = 41, | ||
351 | A6XX_DBGBUS_VFD_0 = 56, | ||
352 | A6XX_DBGBUS_VFD_1 = 57, | ||
353 | A6XX_DBGBUS_VFD_2 = 58, | ||
354 | A6XX_DBGBUS_VFD_3 = 59, | ||
355 | A6XX_DBGBUS_SP_0 = 64, | ||
356 | A6XX_DBGBUS_SP_1 = 65, | ||
357 | A6XX_DBGBUS_TPL1_0 = 72, | ||
358 | A6XX_DBGBUS_TPL1_1 = 73, | ||
359 | A6XX_DBGBUS_TPL1_2 = 74, | ||
360 | A6XX_DBGBUS_TPL1_3 = 75, | ||
361 | }; | ||
362 | |||
271 | enum a6xx_cp_perfcounter_select { | 363 | enum a6xx_cp_perfcounter_select { |
272 | PERF_CP_ALWAYS_COUNT = 0, | 364 | PERF_CP_ALWAYS_COUNT = 0, |
365 | PERF_CP_BUSY_GFX_CORE_IDLE = 1, | ||
366 | PERF_CP_BUSY_CYCLES = 2, | ||
367 | PERF_CP_NUM_PREEMPTIONS = 3, | ||
368 | PERF_CP_PREEMPTION_REACTION_DELAY = 4, | ||
369 | PERF_CP_PREEMPTION_SWITCH_OUT_TIME = 5, | ||
370 | PERF_CP_PREEMPTION_SWITCH_IN_TIME = 6, | ||
371 | PERF_CP_DEAD_DRAWS_IN_BIN_RENDER = 7, | ||
372 | PERF_CP_PREDICATED_DRAWS_KILLED = 8, | ||
373 | PERF_CP_MODE_SWITCH = 9, | ||
374 | PERF_CP_ZPASS_DONE = 10, | ||
375 | PERF_CP_CONTEXT_DONE = 11, | ||
376 | PERF_CP_CACHE_FLUSH = 12, | ||
377 | PERF_CP_LONG_PREEMPTIONS = 13, | ||
378 | PERF_CP_SQE_I_CACHE_STARVE = 14, | ||
379 | PERF_CP_SQE_IDLE = 15, | ||
380 | PERF_CP_SQE_PM4_STARVE_RB_IB = 16, | ||
381 | PERF_CP_SQE_PM4_STARVE_SDS = 17, | ||
382 | PERF_CP_SQE_MRB_STARVE = 18, | ||
383 | PERF_CP_SQE_RRB_STARVE = 19, | ||
384 | PERF_CP_SQE_VSD_STARVE = 20, | ||
385 | PERF_CP_VSD_DECODE_STARVE = 21, | ||
386 | PERF_CP_SQE_PIPE_OUT_STALL = 22, | ||
387 | PERF_CP_SQE_SYNC_STALL = 23, | ||
388 | PERF_CP_SQE_PM4_WFI_STALL = 24, | ||
389 | PERF_CP_SQE_SYS_WFI_STALL = 25, | ||
390 | PERF_CP_SQE_T4_EXEC = 26, | ||
391 | PERF_CP_SQE_LOAD_STATE_EXEC = 27, | ||
392 | PERF_CP_SQE_SAVE_SDS_STATE = 28, | ||
393 | PERF_CP_SQE_DRAW_EXEC = 29, | ||
394 | PERF_CP_SQE_CTXT_REG_BUNCH_EXEC = 30, | ||
395 | PERF_CP_SQE_EXEC_PROFILED = 31, | ||
396 | PERF_CP_MEMORY_POOL_EMPTY = 32, | ||
397 | PERF_CP_MEMORY_POOL_SYNC_STALL = 33, | ||
398 | PERF_CP_MEMORY_POOL_ABOVE_THRESH = 34, | ||
399 | PERF_CP_AHB_WR_STALL_PRE_DRAWS = 35, | ||
400 | PERF_CP_AHB_STALL_SQE_GMU = 36, | ||
401 | PERF_CP_AHB_STALL_SQE_WR_OTHER = 37, | ||
402 | PERF_CP_AHB_STALL_SQE_RD_OTHER = 38, | ||
403 | PERF_CP_CLUSTER0_EMPTY = 39, | ||
404 | PERF_CP_CLUSTER1_EMPTY = 40, | ||
405 | PERF_CP_CLUSTER2_EMPTY = 41, | ||
406 | PERF_CP_CLUSTER3_EMPTY = 42, | ||
407 | PERF_CP_CLUSTER4_EMPTY = 43, | ||
408 | PERF_CP_CLUSTER5_EMPTY = 44, | ||
409 | PERF_CP_PM4_DATA = 45, | ||
410 | PERF_CP_PM4_HEADERS = 46, | ||
411 | PERF_CP_VBIF_READ_BEATS = 47, | ||
412 | PERF_CP_VBIF_WRITE_BEATS = 48, | ||
413 | PERF_CP_SQE_INSTR_COUNTER = 49, | ||
414 | }; | ||
415 | |||
416 | enum a6xx_rbbm_perfcounter_select { | ||
417 | PERF_RBBM_ALWAYS_COUNT = 0, | ||
418 | PERF_RBBM_ALWAYS_ON = 1, | ||
419 | PERF_RBBM_TSE_BUSY = 2, | ||
420 | PERF_RBBM_RAS_BUSY = 3, | ||
421 | PERF_RBBM_PC_DCALL_BUSY = 4, | ||
422 | PERF_RBBM_PC_VSD_BUSY = 5, | ||
423 | PERF_RBBM_STATUS_MASKED = 6, | ||
424 | PERF_RBBM_COM_BUSY = 7, | ||
425 | PERF_RBBM_DCOM_BUSY = 8, | ||
426 | PERF_RBBM_VBIF_BUSY = 9, | ||
427 | PERF_RBBM_VSC_BUSY = 10, | ||
428 | PERF_RBBM_TESS_BUSY = 11, | ||
429 | PERF_RBBM_UCHE_BUSY = 12, | ||
430 | PERF_RBBM_HLSQ_BUSY = 13, | ||
431 | }; | ||
432 | |||
433 | enum a6xx_pc_perfcounter_select { | ||
434 | PERF_PC_BUSY_CYCLES = 0, | ||
435 | PERF_PC_WORKING_CYCLES = 1, | ||
436 | PERF_PC_STALL_CYCLES_VFD = 2, | ||
437 | PERF_PC_STALL_CYCLES_TSE = 3, | ||
438 | PERF_PC_STALL_CYCLES_VPC = 4, | ||
439 | PERF_PC_STALL_CYCLES_UCHE = 5, | ||
440 | PERF_PC_STALL_CYCLES_TESS = 6, | ||
441 | PERF_PC_STALL_CYCLES_TSE_ONLY = 7, | ||
442 | PERF_PC_STALL_CYCLES_VPC_ONLY = 8, | ||
443 | PERF_PC_PASS1_TF_STALL_CYCLES = 9, | ||
444 | PERF_PC_STARVE_CYCLES_FOR_INDEX = 10, | ||
445 | PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR = 11, | ||
446 | PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM = 12, | ||
447 | PERF_PC_STARVE_CYCLES_FOR_POSITION = 13, | ||
448 | PERF_PC_STARVE_CYCLES_DI = 14, | ||
449 | PERF_PC_VIS_STREAMS_LOADED = 15, | ||
450 | PERF_PC_INSTANCES = 16, | ||
451 | PERF_PC_VPC_PRIMITIVES = 17, | ||
452 | PERF_PC_DEAD_PRIM = 18, | ||
453 | PERF_PC_LIVE_PRIM = 19, | ||
454 | PERF_PC_VERTEX_HITS = 20, | ||
455 | PERF_PC_IA_VERTICES = 21, | ||
456 | PERF_PC_IA_PRIMITIVES = 22, | ||
457 | PERF_PC_GS_PRIMITIVES = 23, | ||
458 | PERF_PC_HS_INVOCATIONS = 24, | ||
459 | PERF_PC_DS_INVOCATIONS = 25, | ||
460 | PERF_PC_VS_INVOCATIONS = 26, | ||
461 | PERF_PC_GS_INVOCATIONS = 27, | ||
462 | PERF_PC_DS_PRIMITIVES = 28, | ||
463 | PERF_PC_VPC_POS_DATA_TRANSACTION = 29, | ||
464 | PERF_PC_3D_DRAWCALLS = 30, | ||
465 | PERF_PC_2D_DRAWCALLS = 31, | ||
466 | PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS = 32, | ||
467 | PERF_TESS_BUSY_CYCLES = 33, | ||
468 | PERF_TESS_WORKING_CYCLES = 34, | ||
469 | PERF_TESS_STALL_CYCLES_PC = 35, | ||
470 | PERF_TESS_STARVE_CYCLES_PC = 36, | ||
471 | PERF_PC_TSE_TRANSACTION = 37, | ||
472 | PERF_PC_TSE_VERTEX = 38, | ||
473 | PERF_PC_TESS_PC_UV_TRANS = 39, | ||
474 | PERF_PC_TESS_PC_UV_PATCHES = 40, | ||
475 | PERF_PC_TESS_FACTOR_TRANS = 41, | ||
476 | }; | ||
477 | |||
478 | enum a6xx_vfd_perfcounter_select { | ||
479 | PERF_VFD_BUSY_CYCLES = 0, | ||
480 | PERF_VFD_STALL_CYCLES_UCHE = 1, | ||
481 | PERF_VFD_STALL_CYCLES_VPC_ALLOC = 2, | ||
482 | PERF_VFD_STALL_CYCLES_SP_INFO = 3, | ||
483 | PERF_VFD_STALL_CYCLES_SP_ATTR = 4, | ||
484 | PERF_VFD_STARVE_CYCLES_UCHE = 5, | ||
485 | PERF_VFD_RBUFFER_FULL = 6, | ||
486 | PERF_VFD_ATTR_INFO_FIFO_FULL = 7, | ||
487 | PERF_VFD_DECODED_ATTRIBUTE_BYTES = 8, | ||
488 | PERF_VFD_NUM_ATTRIBUTES = 9, | ||
489 | PERF_VFD_UPPER_SHADER_FIBERS = 10, | ||
490 | PERF_VFD_LOWER_SHADER_FIBERS = 11, | ||
491 | PERF_VFD_MODE_0_FIBERS = 12, | ||
492 | PERF_VFD_MODE_1_FIBERS = 13, | ||
493 | PERF_VFD_MODE_2_FIBERS = 14, | ||
494 | PERF_VFD_MODE_3_FIBERS = 15, | ||
495 | PERF_VFD_MODE_4_FIBERS = 16, | ||
496 | PERF_VFD_TOTAL_VERTICES = 17, | ||
497 | PERF_VFDP_STALL_CYCLES_VFD = 18, | ||
498 | PERF_VFDP_STALL_CYCLES_VFD_INDEX = 19, | ||
499 | PERF_VFDP_STALL_CYCLES_VFD_PROG = 20, | ||
500 | PERF_VFDP_STARVE_CYCLES_PC = 21, | ||
501 | PERF_VFDP_VS_STAGE_WAVES = 22, | ||
502 | }; | ||
503 | |||
504 | enum a6xx_hslq_perfcounter_select { | ||
505 | PERF_HLSQ_BUSY_CYCLES = 0, | ||
506 | PERF_HLSQ_STALL_CYCLES_UCHE = 1, | ||
507 | PERF_HLSQ_STALL_CYCLES_SP_STATE = 2, | ||
508 | PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE = 3, | ||
509 | PERF_HLSQ_UCHE_LATENCY_CYCLES = 4, | ||
510 | PERF_HLSQ_UCHE_LATENCY_COUNT = 5, | ||
511 | PERF_HLSQ_FS_STAGE_1X_WAVES = 6, | ||
512 | PERF_HLSQ_FS_STAGE_2X_WAVES = 7, | ||
513 | PERF_HLSQ_QUADS = 8, | ||
514 | PERF_HLSQ_CS_INVOCATIONS = 9, | ||
515 | PERF_HLSQ_COMPUTE_DRAWCALLS = 10, | ||
516 | PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING = 11, | ||
517 | PERF_HLSQ_DUAL_FS_PROG_ACTIVE = 12, | ||
518 | PERF_HLSQ_DUAL_VS_PROG_ACTIVE = 13, | ||
519 | PERF_HLSQ_FS_BATCH_COUNT_ZERO = 14, | ||
520 | PERF_HLSQ_VS_BATCH_COUNT_ZERO = 15, | ||
521 | PERF_HLSQ_WAVE_PENDING_NO_QUAD = 16, | ||
522 | PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE = 17, | ||
523 | PERF_HLSQ_STALL_CYCLES_VPC = 18, | ||
524 | PERF_HLSQ_PIXELS = 19, | ||
525 | PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC = 20, | ||
526 | }; | ||
527 | |||
528 | enum a6xx_vpc_perfcounter_select { | ||
529 | PERF_VPC_BUSY_CYCLES = 0, | ||
530 | PERF_VPC_WORKING_CYCLES = 1, | ||
531 | PERF_VPC_STALL_CYCLES_UCHE = 2, | ||
532 | PERF_VPC_STALL_CYCLES_VFD_WACK = 3, | ||
533 | PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC = 4, | ||
534 | PERF_VPC_STALL_CYCLES_PC = 5, | ||
535 | PERF_VPC_STALL_CYCLES_SP_LM = 6, | ||
536 | PERF_VPC_STARVE_CYCLES_SP = 7, | ||
537 | PERF_VPC_STARVE_CYCLES_LRZ = 8, | ||
538 | PERF_VPC_PC_PRIMITIVES = 9, | ||
539 | PERF_VPC_SP_COMPONENTS = 10, | ||
540 | PERF_VPC_STALL_CYCLES_VPCRAM_POS = 11, | ||
541 | PERF_VPC_LRZ_ASSIGN_PRIMITIVES = 12, | ||
542 | PERF_VPC_RB_VISIBLE_PRIMITIVES = 13, | ||
543 | PERF_VPC_LM_TRANSACTION = 14, | ||
544 | PERF_VPC_STREAMOUT_TRANSACTION = 15, | ||
545 | PERF_VPC_VS_BUSY_CYCLES = 16, | ||
546 | PERF_VPC_PS_BUSY_CYCLES = 17, | ||
547 | PERF_VPC_VS_WORKING_CYCLES = 18, | ||
548 | PERF_VPC_PS_WORKING_CYCLES = 19, | ||
549 | PERF_VPC_STARVE_CYCLES_RB = 20, | ||
550 | PERF_VPC_NUM_VPCRAM_READ_POS = 21, | ||
551 | PERF_VPC_WIT_FULL_CYCLES = 22, | ||
552 | PERF_VPC_VPCRAM_FULL_CYCLES = 23, | ||
553 | PERF_VPC_LM_FULL_WAIT_FOR_INTP_END = 24, | ||
554 | PERF_VPC_NUM_VPCRAM_WRITE = 25, | ||
555 | PERF_VPC_NUM_VPCRAM_READ_SO = 26, | ||
556 | PERF_VPC_NUM_ATTR_REQ_LM = 27, | ||
557 | }; | ||
558 | |||
559 | enum a6xx_tse_perfcounter_select { | ||
560 | PERF_TSE_BUSY_CYCLES = 0, | ||
561 | PERF_TSE_CLIPPING_CYCLES = 1, | ||
562 | PERF_TSE_STALL_CYCLES_RAS = 2, | ||
563 | PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE = 3, | ||
564 | PERF_TSE_STALL_CYCLES_LRZ_ZPLANE = 4, | ||
565 | PERF_TSE_STARVE_CYCLES_PC = 5, | ||
566 | PERF_TSE_INPUT_PRIM = 6, | ||
567 | PERF_TSE_INPUT_NULL_PRIM = 7, | ||
568 | PERF_TSE_TRIVAL_REJ_PRIM = 8, | ||
569 | PERF_TSE_CLIPPED_PRIM = 9, | ||
570 | PERF_TSE_ZERO_AREA_PRIM = 10, | ||
571 | PERF_TSE_FACENESS_CULLED_PRIM = 11, | ||
572 | PERF_TSE_ZERO_PIXEL_PRIM = 12, | ||
573 | PERF_TSE_OUTPUT_NULL_PRIM = 13, | ||
574 | PERF_TSE_OUTPUT_VISIBLE_PRIM = 14, | ||
575 | PERF_TSE_CINVOCATION = 15, | ||
576 | PERF_TSE_CPRIMITIVES = 16, | ||
577 | PERF_TSE_2D_INPUT_PRIM = 17, | ||
578 | PERF_TSE_2D_ALIVE_CYCLES = 18, | ||
579 | PERF_TSE_CLIP_PLANES = 19, | ||
580 | }; | ||
581 | |||
582 | enum a6xx_ras_perfcounter_select { | ||
583 | PERF_RAS_BUSY_CYCLES = 0, | ||
584 | PERF_RAS_SUPERTILE_ACTIVE_CYCLES = 1, | ||
585 | PERF_RAS_STALL_CYCLES_LRZ = 2, | ||
586 | PERF_RAS_STARVE_CYCLES_TSE = 3, | ||
587 | PERF_RAS_SUPER_TILES = 4, | ||
588 | PERF_RAS_8X4_TILES = 5, | ||
589 | PERF_RAS_MASKGEN_ACTIVE = 6, | ||
590 | PERF_RAS_FULLY_COVERED_SUPER_TILES = 7, | ||
591 | PERF_RAS_FULLY_COVERED_8X4_TILES = 8, | ||
592 | PERF_RAS_PRIM_KILLED_INVISILBE = 9, | ||
593 | PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES = 10, | ||
594 | PERF_RAS_LRZ_INTF_WORKING_CYCLES = 11, | ||
595 | PERF_RAS_BLOCKS = 12, | ||
596 | }; | ||
597 | |||
598 | enum a6xx_uche_perfcounter_select { | ||
599 | PERF_UCHE_BUSY_CYCLES = 0, | ||
600 | PERF_UCHE_STALL_CYCLES_ARBITER = 1, | ||
601 | PERF_UCHE_VBIF_LATENCY_CYCLES = 2, | ||
602 | PERF_UCHE_VBIF_LATENCY_SAMPLES = 3, | ||
603 | PERF_UCHE_VBIF_READ_BEATS_TP = 4, | ||
604 | PERF_UCHE_VBIF_READ_BEATS_VFD = 5, | ||
605 | PERF_UCHE_VBIF_READ_BEATS_HLSQ = 6, | ||
606 | PERF_UCHE_VBIF_READ_BEATS_LRZ = 7, | ||
607 | PERF_UCHE_VBIF_READ_BEATS_SP = 8, | ||
608 | PERF_UCHE_READ_REQUESTS_TP = 9, | ||
609 | PERF_UCHE_READ_REQUESTS_VFD = 10, | ||
610 | PERF_UCHE_READ_REQUESTS_HLSQ = 11, | ||
611 | PERF_UCHE_READ_REQUESTS_LRZ = 12, | ||
612 | PERF_UCHE_READ_REQUESTS_SP = 13, | ||
613 | PERF_UCHE_WRITE_REQUESTS_LRZ = 14, | ||
614 | PERF_UCHE_WRITE_REQUESTS_SP = 15, | ||
615 | PERF_UCHE_WRITE_REQUESTS_VPC = 16, | ||
616 | PERF_UCHE_WRITE_REQUESTS_VSC = 17, | ||
617 | PERF_UCHE_EVICTS = 18, | ||
618 | PERF_UCHE_BANK_REQ0 = 19, | ||
619 | PERF_UCHE_BANK_REQ1 = 20, | ||
620 | PERF_UCHE_BANK_REQ2 = 21, | ||
621 | PERF_UCHE_BANK_REQ3 = 22, | ||
622 | PERF_UCHE_BANK_REQ4 = 23, | ||
623 | PERF_UCHE_BANK_REQ5 = 24, | ||
624 | PERF_UCHE_BANK_REQ6 = 25, | ||
625 | PERF_UCHE_BANK_REQ7 = 26, | ||
626 | PERF_UCHE_VBIF_READ_BEATS_CH0 = 27, | ||
627 | PERF_UCHE_VBIF_READ_BEATS_CH1 = 28, | ||
628 | PERF_UCHE_GMEM_READ_BEATS = 29, | ||
629 | PERF_UCHE_TPH_REF_FULL = 30, | ||
630 | PERF_UCHE_TPH_VICTIM_FULL = 31, | ||
631 | PERF_UCHE_TPH_EXT_FULL = 32, | ||
632 | PERF_UCHE_VBIF_STALL_WRITE_DATA = 33, | ||
633 | PERF_UCHE_DCMP_LATENCY_SAMPLES = 34, | ||
634 | PERF_UCHE_DCMP_LATENCY_CYCLES = 35, | ||
635 | PERF_UCHE_VBIF_READ_BEATS_PC = 36, | ||
636 | PERF_UCHE_READ_REQUESTS_PC = 37, | ||
637 | PERF_UCHE_RAM_READ_REQ = 38, | ||
638 | PERF_UCHE_RAM_WRITE_REQ = 39, | ||
639 | }; | ||
640 | |||
641 | enum a6xx_tp_perfcounter_select { | ||
642 | PERF_TP_BUSY_CYCLES = 0, | ||
643 | PERF_TP_STALL_CYCLES_UCHE = 1, | ||
644 | PERF_TP_LATENCY_CYCLES = 2, | ||
645 | PERF_TP_LATENCY_TRANS = 3, | ||
646 | PERF_TP_FLAG_CACHE_REQUEST_SAMPLES = 4, | ||
647 | PERF_TP_FLAG_CACHE_REQUEST_LATENCY = 5, | ||
648 | PERF_TP_L1_CACHELINE_REQUESTS = 6, | ||
649 | PERF_TP_L1_CACHELINE_MISSES = 7, | ||
650 | PERF_TP_SP_TP_TRANS = 8, | ||
651 | PERF_TP_TP_SP_TRANS = 9, | ||
652 | PERF_TP_OUTPUT_PIXELS = 10, | ||
653 | PERF_TP_FILTER_WORKLOAD_16BIT = 11, | ||
654 | PERF_TP_FILTER_WORKLOAD_32BIT = 12, | ||
655 | PERF_TP_QUADS_RECEIVED = 13, | ||
656 | PERF_TP_QUADS_OFFSET = 14, | ||
657 | PERF_TP_QUADS_SHADOW = 15, | ||
658 | PERF_TP_QUADS_ARRAY = 16, | ||
659 | PERF_TP_QUADS_GRADIENT = 17, | ||
660 | PERF_TP_QUADS_1D = 18, | ||
661 | PERF_TP_QUADS_2D = 19, | ||
662 | PERF_TP_QUADS_BUFFER = 20, | ||
663 | PERF_TP_QUADS_3D = 21, | ||
664 | PERF_TP_QUADS_CUBE = 22, | ||
665 | PERF_TP_DIVERGENT_QUADS_RECEIVED = 23, | ||
666 | PERF_TP_PRT_NON_RESIDENT_EVENTS = 24, | ||
667 | PERF_TP_OUTPUT_PIXELS_POINT = 25, | ||
668 | PERF_TP_OUTPUT_PIXELS_BILINEAR = 26, | ||
669 | PERF_TP_OUTPUT_PIXELS_MIP = 27, | ||
670 | PERF_TP_OUTPUT_PIXELS_ANISO = 28, | ||
671 | PERF_TP_OUTPUT_PIXELS_ZERO_LOD = 29, | ||
672 | PERF_TP_FLAG_CACHE_REQUESTS = 30, | ||
673 | PERF_TP_FLAG_CACHE_MISSES = 31, | ||
674 | PERF_TP_L1_5_L2_REQUESTS = 32, | ||
675 | PERF_TP_2D_OUTPUT_PIXELS = 33, | ||
676 | PERF_TP_2D_OUTPUT_PIXELS_POINT = 34, | ||
677 | PERF_TP_2D_OUTPUT_PIXELS_BILINEAR = 35, | ||
678 | PERF_TP_2D_FILTER_WORKLOAD_16BIT = 36, | ||
679 | PERF_TP_2D_FILTER_WORKLOAD_32BIT = 37, | ||
680 | PERF_TP_TPA2TPC_TRANS = 38, | ||
681 | PERF_TP_L1_MISSES_ASTC_1TILE = 39, | ||
682 | PERF_TP_L1_MISSES_ASTC_2TILE = 40, | ||
683 | PERF_TP_L1_MISSES_ASTC_4TILE = 41, | ||
684 | PERF_TP_L1_5_L2_COMPRESS_REQS = 42, | ||
685 | PERF_TP_L1_5_L2_COMPRESS_MISS = 43, | ||
686 | PERF_TP_L1_BANK_CONFLICT = 44, | ||
687 | PERF_TP_L1_5_MISS_LATENCY_CYCLES = 45, | ||
688 | PERF_TP_L1_5_MISS_LATENCY_TRANS = 46, | ||
689 | PERF_TP_QUADS_CONSTANT_MULTIPLIED = 47, | ||
690 | PERF_TP_FRONTEND_WORKING_CYCLES = 48, | ||
691 | PERF_TP_L1_TAG_WORKING_CYCLES = 49, | ||
692 | PERF_TP_L1_DATA_WRITE_WORKING_CYCLES = 50, | ||
693 | PERF_TP_PRE_L1_DECOM_WORKING_CYCLES = 51, | ||
694 | PERF_TP_BACKEND_WORKING_CYCLES = 52, | ||
695 | PERF_TP_FLAG_CACHE_WORKING_CYCLES = 53, | ||
696 | PERF_TP_L1_5_CACHE_WORKING_CYCLES = 54, | ||
697 | PERF_TP_STARVE_CYCLES_SP = 55, | ||
698 | PERF_TP_STARVE_CYCLES_UCHE = 56, | ||
699 | }; | ||
700 | |||
701 | enum a6xx_sp_perfcounter_select { | ||
702 | PERF_SP_BUSY_CYCLES = 0, | ||
703 | PERF_SP_ALU_WORKING_CYCLES = 1, | ||
704 | PERF_SP_EFU_WORKING_CYCLES = 2, | ||
705 | PERF_SP_STALL_CYCLES_VPC = 3, | ||
706 | PERF_SP_STALL_CYCLES_TP = 4, | ||
707 | PERF_SP_STALL_CYCLES_UCHE = 5, | ||
708 | PERF_SP_STALL_CYCLES_RB = 6, | ||
709 | PERF_SP_NON_EXECUTION_CYCLES = 7, | ||
710 | PERF_SP_WAVE_CONTEXTS = 8, | ||
711 | PERF_SP_WAVE_CONTEXT_CYCLES = 9, | ||
712 | PERF_SP_FS_STAGE_WAVE_CYCLES = 10, | ||
713 | PERF_SP_FS_STAGE_WAVE_SAMPLES = 11, | ||
714 | PERF_SP_VS_STAGE_WAVE_CYCLES = 12, | ||
715 | PERF_SP_VS_STAGE_WAVE_SAMPLES = 13, | ||
716 | PERF_SP_FS_STAGE_DURATION_CYCLES = 14, | ||
717 | PERF_SP_VS_STAGE_DURATION_CYCLES = 15, | ||
718 | PERF_SP_WAVE_CTRL_CYCLES = 16, | ||
719 | PERF_SP_WAVE_LOAD_CYCLES = 17, | ||
720 | PERF_SP_WAVE_EMIT_CYCLES = 18, | ||
721 | PERF_SP_WAVE_NOP_CYCLES = 19, | ||
722 | PERF_SP_WAVE_WAIT_CYCLES = 20, | ||
723 | PERF_SP_WAVE_FETCH_CYCLES = 21, | ||
724 | PERF_SP_WAVE_IDLE_CYCLES = 22, | ||
725 | PERF_SP_WAVE_END_CYCLES = 23, | ||
726 | PERF_SP_WAVE_LONG_SYNC_CYCLES = 24, | ||
727 | PERF_SP_WAVE_SHORT_SYNC_CYCLES = 25, | ||
728 | PERF_SP_WAVE_JOIN_CYCLES = 26, | ||
729 | PERF_SP_LM_LOAD_INSTRUCTIONS = 27, | ||
730 | PERF_SP_LM_STORE_INSTRUCTIONS = 28, | ||
731 | PERF_SP_LM_ATOMICS = 29, | ||
732 | PERF_SP_GM_LOAD_INSTRUCTIONS = 30, | ||
733 | PERF_SP_GM_STORE_INSTRUCTIONS = 31, | ||
734 | PERF_SP_GM_ATOMICS = 32, | ||
735 | PERF_SP_VS_STAGE_TEX_INSTRUCTIONS = 33, | ||
736 | PERF_SP_VS_STAGE_EFU_INSTRUCTIONS = 34, | ||
737 | PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 35, | ||
738 | PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 36, | ||
739 | PERF_SP_FS_STAGE_TEX_INSTRUCTIONS = 37, | ||
740 | PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS = 38, | ||
741 | PERF_SP_FS_STAGE_EFU_INSTRUCTIONS = 39, | ||
742 | PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 40, | ||
743 | PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 41, | ||
744 | PERF_SP_FS_STAGE_BARY_INSTRUCTIONS = 42, | ||
745 | PERF_SP_VS_INSTRUCTIONS = 43, | ||
746 | PERF_SP_FS_INSTRUCTIONS = 44, | ||
747 | PERF_SP_ADDR_LOCK_COUNT = 45, | ||
748 | PERF_SP_UCHE_READ_TRANS = 46, | ||
749 | PERF_SP_UCHE_WRITE_TRANS = 47, | ||
750 | PERF_SP_EXPORT_VPC_TRANS = 48, | ||
751 | PERF_SP_EXPORT_RB_TRANS = 49, | ||
752 | PERF_SP_PIXELS_KILLED = 50, | ||
753 | PERF_SP_ICL1_REQUESTS = 51, | ||
754 | PERF_SP_ICL1_MISSES = 52, | ||
755 | PERF_SP_HS_INSTRUCTIONS = 53, | ||
756 | PERF_SP_DS_INSTRUCTIONS = 54, | ||
757 | PERF_SP_GS_INSTRUCTIONS = 55, | ||
758 | PERF_SP_CS_INSTRUCTIONS = 56, | ||
759 | PERF_SP_GPR_READ = 57, | ||
760 | PERF_SP_GPR_WRITE = 58, | ||
761 | PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS = 59, | ||
762 | PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS = 60, | ||
763 | PERF_SP_LM_BANK_CONFLICTS = 61, | ||
764 | PERF_SP_TEX_CONTROL_WORKING_CYCLES = 62, | ||
765 | PERF_SP_LOAD_CONTROL_WORKING_CYCLES = 63, | ||
766 | PERF_SP_FLOW_CONTROL_WORKING_CYCLES = 64, | ||
767 | PERF_SP_LM_WORKING_CYCLES = 65, | ||
768 | PERF_SP_DISPATCHER_WORKING_CYCLES = 66, | ||
769 | PERF_SP_SEQUENCER_WORKING_CYCLES = 67, | ||
770 | PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP = 68, | ||
771 | PERF_SP_STARVE_CYCLES_HLSQ = 69, | ||
772 | PERF_SP_NON_EXECUTION_LS_CYCLES = 70, | ||
773 | PERF_SP_WORKING_EU = 71, | ||
774 | PERF_SP_ANY_EU_WORKING = 72, | ||
775 | PERF_SP_WORKING_EU_FS_STAGE = 73, | ||
776 | PERF_SP_ANY_EU_WORKING_FS_STAGE = 74, | ||
777 | PERF_SP_WORKING_EU_VS_STAGE = 75, | ||
778 | PERF_SP_ANY_EU_WORKING_VS_STAGE = 76, | ||
779 | PERF_SP_WORKING_EU_CS_STAGE = 77, | ||
780 | PERF_SP_ANY_EU_WORKING_CS_STAGE = 78, | ||
781 | PERF_SP_GPR_READ_PREFETCH = 79, | ||
782 | PERF_SP_GPR_READ_CONFLICT = 80, | ||
783 | PERF_SP_GPR_WRITE_CONFLICT = 81, | ||
784 | PERF_SP_GM_LOAD_LATENCY_CYCLES = 82, | ||
785 | PERF_SP_GM_LOAD_LATENCY_SAMPLES = 83, | ||
786 | PERF_SP_EXECUTABLE_WAVES = 84, | ||
787 | }; | ||
788 | |||
789 | enum a6xx_rb_perfcounter_select { | ||
790 | PERF_RB_BUSY_CYCLES = 0, | ||
791 | PERF_RB_STALL_CYCLES_HLSQ = 1, | ||
792 | PERF_RB_STALL_CYCLES_FIFO0_FULL = 2, | ||
793 | PERF_RB_STALL_CYCLES_FIFO1_FULL = 3, | ||
794 | PERF_RB_STALL_CYCLES_FIFO2_FULL = 4, | ||
795 | PERF_RB_STARVE_CYCLES_SP = 5, | ||
796 | PERF_RB_STARVE_CYCLES_LRZ_TILE = 6, | ||
797 | PERF_RB_STARVE_CYCLES_CCU = 7, | ||
798 | PERF_RB_STARVE_CYCLES_Z_PLANE = 8, | ||
799 | PERF_RB_STARVE_CYCLES_BARY_PLANE = 9, | ||
800 | PERF_RB_Z_WORKLOAD = 10, | ||
801 | PERF_RB_HLSQ_ACTIVE = 11, | ||
802 | PERF_RB_Z_READ = 12, | ||
803 | PERF_RB_Z_WRITE = 13, | ||
804 | PERF_RB_C_READ = 14, | ||
805 | PERF_RB_C_WRITE = 15, | ||
806 | PERF_RB_TOTAL_PASS = 16, | ||
807 | PERF_RB_Z_PASS = 17, | ||
808 | PERF_RB_Z_FAIL = 18, | ||
809 | PERF_RB_S_FAIL = 19, | ||
810 | PERF_RB_BLENDED_FXP_COMPONENTS = 20, | ||
811 | PERF_RB_BLENDED_FP16_COMPONENTS = 21, | ||
812 | PERF_RB_PS_INVOCATIONS = 22, | ||
813 | PERF_RB_2D_ALIVE_CYCLES = 23, | ||
814 | PERF_RB_2D_STALL_CYCLES_A2D = 24, | ||
815 | PERF_RB_2D_STARVE_CYCLES_SRC = 25, | ||
816 | PERF_RB_2D_STARVE_CYCLES_SP = 26, | ||
817 | PERF_RB_2D_STARVE_CYCLES_DST = 27, | ||
818 | PERF_RB_2D_VALID_PIXELS = 28, | ||
819 | PERF_RB_3D_PIXELS = 29, | ||
820 | PERF_RB_BLENDER_WORKING_CYCLES = 30, | ||
821 | PERF_RB_ZPROC_WORKING_CYCLES = 31, | ||
822 | PERF_RB_CPROC_WORKING_CYCLES = 32, | ||
823 | PERF_RB_SAMPLER_WORKING_CYCLES = 33, | ||
824 | PERF_RB_STALL_CYCLES_CCU_COLOR_READ = 34, | ||
825 | PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE = 35, | ||
826 | PERF_RB_STALL_CYCLES_CCU_DEPTH_READ = 36, | ||
827 | PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE = 37, | ||
828 | PERF_RB_STALL_CYCLES_VPC = 38, | ||
829 | PERF_RB_2D_INPUT_TRANS = 39, | ||
830 | PERF_RB_2D_OUTPUT_RB_DST_TRANS = 40, | ||
831 | PERF_RB_2D_OUTPUT_RB_SRC_TRANS = 41, | ||
832 | PERF_RB_BLENDED_FP32_COMPONENTS = 42, | ||
833 | PERF_RB_COLOR_PIX_TILES = 43, | ||
834 | PERF_RB_STALL_CYCLES_CCU = 44, | ||
835 | PERF_RB_EARLY_Z_ARB3_GRANT = 45, | ||
836 | PERF_RB_LATE_Z_ARB3_GRANT = 46, | ||
837 | PERF_RB_EARLY_Z_SKIP_GRANT = 47, | ||
838 | }; | ||
839 | |||
840 | enum a6xx_vsc_perfcounter_select { | ||
841 | PERF_VSC_BUSY_CYCLES = 0, | ||
842 | PERF_VSC_WORKING_CYCLES = 1, | ||
843 | PERF_VSC_STALL_CYCLES_UCHE = 2, | ||
844 | PERF_VSC_EOT_NUM = 3, | ||
845 | PERF_VSC_INPUT_TILES = 4, | ||
846 | }; | ||
847 | |||
848 | enum a6xx_ccu_perfcounter_select { | ||
849 | PERF_CCU_BUSY_CYCLES = 0, | ||
850 | PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN = 1, | ||
851 | PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN = 2, | ||
852 | PERF_CCU_STARVE_CYCLES_FLAG_RETURN = 3, | ||
853 | PERF_CCU_DEPTH_BLOCKS = 4, | ||
854 | PERF_CCU_COLOR_BLOCKS = 5, | ||
855 | PERF_CCU_DEPTH_BLOCK_HIT = 6, | ||
856 | PERF_CCU_COLOR_BLOCK_HIT = 7, | ||
857 | PERF_CCU_PARTIAL_BLOCK_READ = 8, | ||
858 | PERF_CCU_GMEM_READ = 9, | ||
859 | PERF_CCU_GMEM_WRITE = 10, | ||
860 | PERF_CCU_DEPTH_READ_FLAG0_COUNT = 11, | ||
861 | PERF_CCU_DEPTH_READ_FLAG1_COUNT = 12, | ||
862 | PERF_CCU_DEPTH_READ_FLAG2_COUNT = 13, | ||
863 | PERF_CCU_DEPTH_READ_FLAG3_COUNT = 14, | ||
864 | PERF_CCU_DEPTH_READ_FLAG4_COUNT = 15, | ||
865 | PERF_CCU_DEPTH_READ_FLAG5_COUNT = 16, | ||
866 | PERF_CCU_DEPTH_READ_FLAG6_COUNT = 17, | ||
867 | PERF_CCU_DEPTH_READ_FLAG8_COUNT = 18, | ||
868 | PERF_CCU_COLOR_READ_FLAG0_COUNT = 19, | ||
869 | PERF_CCU_COLOR_READ_FLAG1_COUNT = 20, | ||
870 | PERF_CCU_COLOR_READ_FLAG2_COUNT = 21, | ||
871 | PERF_CCU_COLOR_READ_FLAG3_COUNT = 22, | ||
872 | PERF_CCU_COLOR_READ_FLAG4_COUNT = 23, | ||
873 | PERF_CCU_COLOR_READ_FLAG5_COUNT = 24, | ||
874 | PERF_CCU_COLOR_READ_FLAG6_COUNT = 25, | ||
875 | PERF_CCU_COLOR_READ_FLAG8_COUNT = 26, | ||
876 | PERF_CCU_2D_RD_REQ = 27, | ||
877 | PERF_CCU_2D_WR_REQ = 28, | ||
878 | }; | ||
879 | |||
880 | enum a6xx_lrz_perfcounter_select { | ||
881 | PERF_LRZ_BUSY_CYCLES = 0, | ||
882 | PERF_LRZ_STARVE_CYCLES_RAS = 1, | ||
883 | PERF_LRZ_STALL_CYCLES_RB = 2, | ||
884 | PERF_LRZ_STALL_CYCLES_VSC = 3, | ||
885 | PERF_LRZ_STALL_CYCLES_VPC = 4, | ||
886 | PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH = 5, | ||
887 | PERF_LRZ_STALL_CYCLES_UCHE = 6, | ||
888 | PERF_LRZ_LRZ_READ = 7, | ||
889 | PERF_LRZ_LRZ_WRITE = 8, | ||
890 | PERF_LRZ_READ_LATENCY = 9, | ||
891 | PERF_LRZ_MERGE_CACHE_UPDATING = 10, | ||
892 | PERF_LRZ_PRIM_KILLED_BY_MASKGEN = 11, | ||
893 | PERF_LRZ_PRIM_KILLED_BY_LRZ = 12, | ||
894 | PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ = 13, | ||
895 | PERF_LRZ_FULL_8X8_TILES = 14, | ||
896 | PERF_LRZ_PARTIAL_8X8_TILES = 15, | ||
897 | PERF_LRZ_TILE_KILLED = 16, | ||
898 | PERF_LRZ_TOTAL_PIXEL = 17, | ||
899 | PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ = 18, | ||
900 | PERF_LRZ_FULLY_COVERED_TILES = 19, | ||
901 | PERF_LRZ_PARTIAL_COVERED_TILES = 20, | ||
902 | PERF_LRZ_FEEDBACK_ACCEPT = 21, | ||
903 | PERF_LRZ_FEEDBACK_DISCARD = 22, | ||
904 | PERF_LRZ_FEEDBACK_STALL = 23, | ||
905 | PERF_LRZ_STALL_CYCLES_RB_ZPLANE = 24, | ||
906 | PERF_LRZ_STALL_CYCLES_RB_BPLANE = 25, | ||
907 | PERF_LRZ_STALL_CYCLES_VC = 26, | ||
908 | PERF_LRZ_RAS_MASK_TRANS = 27, | ||
909 | }; | ||
910 | |||
911 | enum a6xx_cmp_perfcounter_select { | ||
912 | PERF_CMPDECMP_STALL_CYCLES_ARB = 0, | ||
913 | PERF_CMPDECMP_VBIF_LATENCY_CYCLES = 1, | ||
914 | PERF_CMPDECMP_VBIF_LATENCY_SAMPLES = 2, | ||
915 | PERF_CMPDECMP_VBIF_READ_DATA_CCU = 3, | ||
916 | PERF_CMPDECMP_VBIF_WRITE_DATA_CCU = 4, | ||
917 | PERF_CMPDECMP_VBIF_READ_REQUEST = 5, | ||
918 | PERF_CMPDECMP_VBIF_WRITE_REQUEST = 6, | ||
919 | PERF_CMPDECMP_VBIF_READ_DATA = 7, | ||
920 | PERF_CMPDECMP_VBIF_WRITE_DATA = 8, | ||
921 | PERF_CMPDECMP_FLAG_FETCH_CYCLES = 9, | ||
922 | PERF_CMPDECMP_FLAG_FETCH_SAMPLES = 10, | ||
923 | PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT = 11, | ||
924 | PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT = 12, | ||
925 | PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT = 13, | ||
926 | PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT = 14, | ||
927 | PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT = 15, | ||
928 | PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT = 16, | ||
929 | PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT = 17, | ||
930 | PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT = 18, | ||
931 | PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT = 19, | ||
932 | PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT = 20, | ||
933 | PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT = 21, | ||
934 | PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT = 22, | ||
935 | PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT = 23, | ||
936 | PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT = 24, | ||
937 | PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ = 25, | ||
938 | PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR = 26, | ||
939 | PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN = 27, | ||
940 | PERF_CMPDECMP_2D_RD_DATA = 28, | ||
941 | PERF_CMPDECMP_2D_WR_DATA = 29, | ||
942 | PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0 = 30, | ||
943 | PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1 = 31, | ||
944 | PERF_CMPDECMP_2D_OUTPUT_TRANS = 32, | ||
945 | PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE = 33, | ||
946 | PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT = 34, | ||
947 | PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT = 35, | ||
948 | PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT = 36, | ||
949 | PERF_CMPDECMP_2D_BUSY_CYCLES = 37, | ||
950 | PERF_CMPDECMP_2D_REORDER_STARVE_CYCLES = 38, | ||
951 | PERF_CMPDECMP_2D_PIXELS = 39, | ||
273 | }; | 952 | }; |
274 | 953 | ||
275 | enum a6xx_tex_filter { | 954 | enum a6xx_tex_filter { |
@@ -1765,12 +2444,39 @@ static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val) | |||
1765 | 2444 | ||
1766 | #define REG_A6XX_VBIF_VERSION 0x00003000 | 2445 | #define REG_A6XX_VBIF_VERSION 0x00003000 |
1767 | 2446 | ||
2447 | #define REG_A6XX_VBIF_CLKON 0x00003001 | ||
2448 | #define A6XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000002 | ||
2449 | |||
1768 | #define REG_A6XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a | 2450 | #define REG_A6XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a |
1769 | 2451 | ||
1770 | #define REG_A6XX_VBIF_XIN_HALT_CTRL0 0x00003080 | 2452 | #define REG_A6XX_VBIF_XIN_HALT_CTRL0 0x00003080 |
1771 | 2453 | ||
1772 | #define REG_A6XX_VBIF_XIN_HALT_CTRL1 0x00003081 | 2454 | #define REG_A6XX_VBIF_XIN_HALT_CTRL1 0x00003081 |
1773 | 2455 | ||
2456 | #define REG_A6XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084 | ||
2457 | |||
2458 | #define REG_A6XX_VBIF_TEST_BUS1_CTRL0 0x00003085 | ||
2459 | |||
2460 | #define REG_A6XX_VBIF_TEST_BUS1_CTRL1 0x00003086 | ||
2461 | #define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK 0x0000000f | ||
2462 | #define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT 0 | ||
2463 | static inline uint32_t A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL(uint32_t val) | ||
2464 | { | ||
2465 | return ((val) << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK; | ||
2466 | } | ||
2467 | |||
2468 | #define REG_A6XX_VBIF_TEST_BUS2_CTRL0 0x00003087 | ||
2469 | |||
2470 | #define REG_A6XX_VBIF_TEST_BUS2_CTRL1 0x00003088 | ||
2471 | #define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK 0x000001ff | ||
2472 | #define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT 0 | ||
2473 | static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val) | ||
2474 | { | ||
2475 | return ((val) << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK; | ||
2476 | } | ||
2477 | |||
2478 | #define REG_A6XX_VBIF_TEST_BUS_OUT 0x0000308c | ||
2479 | |||
1774 | #define REG_A6XX_VBIF_PERF_CNT_SEL0 0x000030d0 | 2480 | #define REG_A6XX_VBIF_PERF_CNT_SEL0 0x000030d0 |
1775 | 2481 | ||
1776 | #define REG_A6XX_VBIF_PERF_CNT_SEL1 0x000030d1 | 2482 | #define REG_A6XX_VBIF_PERF_CNT_SEL1 0x000030d1 |
@@ -1813,313 +2519,79 @@ static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val) | |||
1813 | 2519 | ||
1814 | #define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a | 2520 | #define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a |
1815 | 2521 | ||
1816 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00018400 | 2522 | #define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4 |
1817 | 2523 | #define A6XX_RB_WINDOW_OFFSET2_WINDOW_OFFSET_DISABLE 0x80000000 | |
1818 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00018401 | 2524 | #define A6XX_RB_WINDOW_OFFSET2_X__MASK 0x00007fff |
1819 | 2525 | #define A6XX_RB_WINDOW_OFFSET2_X__SHIFT 0 | |
1820 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00018402 | 2526 | static inline uint32_t A6XX_RB_WINDOW_OFFSET2_X(uint32_t val) |
1821 | |||
1822 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00018403 | ||
1823 | #define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff | ||
1824 | #define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0 | ||
1825 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val) | ||
1826 | { | ||
1827 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK; | ||
1828 | } | ||
1829 | #define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00 | ||
1830 | #define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8 | ||
1831 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val) | ||
1832 | { | ||
1833 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK; | ||
1834 | } | ||
1835 | |||
1836 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00018404 | ||
1837 | #define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f | ||
1838 | #define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0 | ||
1839 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val) | ||
1840 | { | ||
1841 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK; | ||
1842 | } | ||
1843 | #define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000 | ||
1844 | #define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12 | ||
1845 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val) | ||
1846 | { | ||
1847 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK; | ||
1848 | } | ||
1849 | #define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000 | ||
1850 | #define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28 | ||
1851 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val) | ||
1852 | { | ||
1853 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK; | ||
1854 | } | ||
1855 | |||
1856 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00018405 | ||
1857 | #define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000 | ||
1858 | #define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24 | ||
1859 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val) | ||
1860 | { | ||
1861 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK; | ||
1862 | } | ||
1863 | |||
1864 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00018408 | ||
1865 | |||
1866 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00018409 | ||
1867 | |||
1868 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0001840a | ||
1869 | |||
1870 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0001840b | ||
1871 | |||
1872 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0001840c | ||
1873 | |||
1874 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0001840d | ||
1875 | |||
1876 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0001840e | ||
1877 | |||
1878 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0001840f | ||
1879 | |||
1880 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00018410 | ||
1881 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f | ||
1882 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0 | ||
1883 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val) | ||
1884 | { | ||
1885 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK; | ||
1886 | } | ||
1887 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0 | ||
1888 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4 | ||
1889 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val) | ||
1890 | { | ||
1891 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK; | ||
1892 | } | ||
1893 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00 | ||
1894 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8 | ||
1895 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val) | ||
1896 | { | ||
1897 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK; | ||
1898 | } | ||
1899 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000 | ||
1900 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12 | ||
1901 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val) | ||
1902 | { | ||
1903 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK; | ||
1904 | } | ||
1905 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000 | ||
1906 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16 | ||
1907 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val) | ||
1908 | { | ||
1909 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK; | ||
1910 | } | ||
1911 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000 | ||
1912 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20 | ||
1913 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val) | ||
1914 | { | ||
1915 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK; | ||
1916 | } | ||
1917 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000 | ||
1918 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24 | ||
1919 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val) | ||
1920 | { | ||
1921 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK; | ||
1922 | } | ||
1923 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000 | ||
1924 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28 | ||
1925 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val) | ||
1926 | { | ||
1927 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK; | ||
1928 | } | ||
1929 | |||
1930 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00018411 | ||
1931 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f | ||
1932 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0 | ||
1933 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val) | ||
1934 | { | ||
1935 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK; | ||
1936 | } | ||
1937 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0 | ||
1938 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4 | ||
1939 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val) | ||
1940 | { | ||
1941 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK; | ||
1942 | } | ||
1943 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00 | ||
1944 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8 | ||
1945 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val) | ||
1946 | { | ||
1947 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK; | ||
1948 | } | ||
1949 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000 | ||
1950 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12 | ||
1951 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val) | ||
1952 | { | ||
1953 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK; | ||
1954 | } | ||
1955 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000 | ||
1956 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16 | ||
1957 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val) | ||
1958 | { | ||
1959 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK; | ||
1960 | } | ||
1961 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000 | ||
1962 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20 | ||
1963 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val) | ||
1964 | { | ||
1965 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK; | ||
1966 | } | ||
1967 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000 | ||
1968 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24 | ||
1969 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val) | ||
1970 | { | ||
1971 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK; | ||
1972 | } | ||
1973 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000 | ||
1974 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28 | ||
1975 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val) | ||
1976 | { | ||
1977 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK; | ||
1978 | } | ||
1979 | |||
1980 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0001842f | ||
1981 | |||
1982 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00018430 | ||
1983 | |||
1984 | #define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00021140 | ||
1985 | |||
1986 | #define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00021148 | ||
1987 | |||
1988 | #define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00021540 | ||
1989 | |||
1990 | #define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00021541 | ||
1991 | |||
1992 | #define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00021542 | ||
1993 | |||
1994 | #define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00021543 | ||
1995 | |||
1996 | #define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00021544 | ||
1997 | |||
1998 | #define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00021545 | ||
1999 | |||
2000 | #define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00021572 | ||
2001 | |||
2002 | #define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00021573 | ||
2003 | |||
2004 | #define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00021574 | ||
2005 | |||
2006 | #define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00021575 | ||
2007 | |||
2008 | #define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00021576 | ||
2009 | |||
2010 | #define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00021577 | ||
2011 | |||
2012 | #define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000215a4 | ||
2013 | |||
2014 | #define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000215a5 | ||
2015 | |||
2016 | #define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000215a6 | ||
2017 | |||
2018 | #define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000215a7 | ||
2019 | |||
2020 | #define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000215a8 | ||
2021 | |||
2022 | #define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000215a9 | ||
2023 | |||
2024 | #define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000215d6 | ||
2025 | |||
2026 | #define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000215d7 | ||
2027 | |||
2028 | #define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000215d8 | ||
2029 | |||
2030 | #define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000215d9 | ||
2031 | |||
2032 | #define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000215da | ||
2033 | |||
2034 | #define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000215db | ||
2035 | |||
2036 | #define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x000a0000 | ||
2037 | |||
2038 | #define REG_A6XX_X1_WINDOW_OFFSET 0x000088d4 | ||
2039 | #define A6XX_X1_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000 | ||
2040 | #define A6XX_X1_WINDOW_OFFSET_X__MASK 0x00007fff | ||
2041 | #define A6XX_X1_WINDOW_OFFSET_X__SHIFT 0 | ||
2042 | static inline uint32_t A6XX_X1_WINDOW_OFFSET_X(uint32_t val) | ||
2043 | { | ||
2044 | return ((val) << A6XX_X1_WINDOW_OFFSET_X__SHIFT) & A6XX_X1_WINDOW_OFFSET_X__MASK; | ||
2045 | } | ||
2046 | #define A6XX_X1_WINDOW_OFFSET_Y__MASK 0x7fff0000 | ||
2047 | #define A6XX_X1_WINDOW_OFFSET_Y__SHIFT 16 | ||
2048 | static inline uint32_t A6XX_X1_WINDOW_OFFSET_Y(uint32_t val) | ||
2049 | { | ||
2050 | return ((val) << A6XX_X1_WINDOW_OFFSET_Y__SHIFT) & A6XX_X1_WINDOW_OFFSET_Y__MASK; | ||
2051 | } | ||
2052 | |||
2053 | #define REG_A6XX_X2_WINDOW_OFFSET 0x0000b4d1 | ||
2054 | #define A6XX_X2_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000 | ||
2055 | #define A6XX_X2_WINDOW_OFFSET_X__MASK 0x00007fff | ||
2056 | #define A6XX_X2_WINDOW_OFFSET_X__SHIFT 0 | ||
2057 | static inline uint32_t A6XX_X2_WINDOW_OFFSET_X(uint32_t val) | ||
2058 | { | 2527 | { |
2059 | return ((val) << A6XX_X2_WINDOW_OFFSET_X__SHIFT) & A6XX_X2_WINDOW_OFFSET_X__MASK; | 2528 | return ((val) << A6XX_RB_WINDOW_OFFSET2_X__SHIFT) & A6XX_RB_WINDOW_OFFSET2_X__MASK; |
2060 | } | 2529 | } |
2061 | #define A6XX_X2_WINDOW_OFFSET_Y__MASK 0x7fff0000 | 2530 | #define A6XX_RB_WINDOW_OFFSET2_Y__MASK 0x7fff0000 |
2062 | #define A6XX_X2_WINDOW_OFFSET_Y__SHIFT 16 | 2531 | #define A6XX_RB_WINDOW_OFFSET2_Y__SHIFT 16 |
2063 | static inline uint32_t A6XX_X2_WINDOW_OFFSET_Y(uint32_t val) | 2532 | static inline uint32_t A6XX_RB_WINDOW_OFFSET2_Y(uint32_t val) |
2064 | { | 2533 | { |
2065 | return ((val) << A6XX_X2_WINDOW_OFFSET_Y__SHIFT) & A6XX_X2_WINDOW_OFFSET_Y__MASK; | 2534 | return ((val) << A6XX_RB_WINDOW_OFFSET2_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET2_Y__MASK; |
2066 | } | 2535 | } |
2067 | 2536 | ||
2068 | #define REG_A6XX_X3_WINDOW_OFFSET 0x0000b307 | 2537 | #define REG_A6XX_SP_WINDOW_OFFSET 0x0000b4d1 |
2069 | #define A6XX_X3_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000 | 2538 | #define A6XX_SP_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000 |
2070 | #define A6XX_X3_WINDOW_OFFSET_X__MASK 0x00007fff | 2539 | #define A6XX_SP_WINDOW_OFFSET_X__MASK 0x00007fff |
2071 | #define A6XX_X3_WINDOW_OFFSET_X__SHIFT 0 | 2540 | #define A6XX_SP_WINDOW_OFFSET_X__SHIFT 0 |
2072 | static inline uint32_t A6XX_X3_WINDOW_OFFSET_X(uint32_t val) | 2541 | static inline uint32_t A6XX_SP_WINDOW_OFFSET_X(uint32_t val) |
2073 | { | 2542 | { |
2074 | return ((val) << A6XX_X3_WINDOW_OFFSET_X__SHIFT) & A6XX_X3_WINDOW_OFFSET_X__MASK; | 2543 | return ((val) << A6XX_SP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_WINDOW_OFFSET_X__MASK; |
2075 | } | 2544 | } |
2076 | #define A6XX_X3_WINDOW_OFFSET_Y__MASK 0x7fff0000 | 2545 | #define A6XX_SP_WINDOW_OFFSET_Y__MASK 0x7fff0000 |
2077 | #define A6XX_X3_WINDOW_OFFSET_Y__SHIFT 16 | 2546 | #define A6XX_SP_WINDOW_OFFSET_Y__SHIFT 16 |
2078 | static inline uint32_t A6XX_X3_WINDOW_OFFSET_Y(uint32_t val) | 2547 | static inline uint32_t A6XX_SP_WINDOW_OFFSET_Y(uint32_t val) |
2079 | { | 2548 | { |
2080 | return ((val) << A6XX_X3_WINDOW_OFFSET_Y__SHIFT) & A6XX_X3_WINDOW_OFFSET_Y__MASK; | 2549 | return ((val) << A6XX_SP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_WINDOW_OFFSET_Y__MASK; |
2081 | } | 2550 | } |
2082 | 2551 | ||
2083 | #define REG_A6XX_X1_BIN_SIZE 0x000080a1 | 2552 | #define REG_A6XX_SP_TP_WINDOW_OFFSET 0x0000b307 |
2084 | #define A6XX_X1_BIN_SIZE_WIDTH__MASK 0x000000ff | 2553 | #define A6XX_SP_TP_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000 |
2085 | #define A6XX_X1_BIN_SIZE_WIDTH__SHIFT 0 | 2554 | #define A6XX_SP_TP_WINDOW_OFFSET_X__MASK 0x00007fff |
2086 | static inline uint32_t A6XX_X1_BIN_SIZE_WIDTH(uint32_t val) | 2555 | #define A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT 0 |
2556 | static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_X(uint32_t val) | ||
2087 | { | 2557 | { |
2088 | return ((val >> 5) << A6XX_X1_BIN_SIZE_WIDTH__SHIFT) & A6XX_X1_BIN_SIZE_WIDTH__MASK; | 2558 | return ((val) << A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_X__MASK; |
2089 | } | 2559 | } |
2090 | #define A6XX_X1_BIN_SIZE_HEIGHT__MASK 0x0001ff00 | 2560 | #define A6XX_SP_TP_WINDOW_OFFSET_Y__MASK 0x7fff0000 |
2091 | #define A6XX_X1_BIN_SIZE_HEIGHT__SHIFT 8 | 2561 | #define A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT 16 |
2092 | static inline uint32_t A6XX_X1_BIN_SIZE_HEIGHT(uint32_t val) | 2562 | static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_Y(uint32_t val) |
2093 | { | 2563 | { |
2094 | return ((val >> 4) << A6XX_X1_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X1_BIN_SIZE_HEIGHT__MASK; | 2564 | return ((val) << A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_Y__MASK; |
2095 | } | 2565 | } |
2096 | 2566 | ||
2097 | #define REG_A6XX_X2_BIN_SIZE 0x00008800 | 2567 | #define REG_A6XX_GRAS_BIN_CONTROL 0x000080a1 |
2098 | #define A6XX_X2_BIN_SIZE_WIDTH__MASK 0x000000ff | 2568 | #define A6XX_GRAS_BIN_CONTROL_BINW__MASK 0x000000ff |
2099 | #define A6XX_X2_BIN_SIZE_WIDTH__SHIFT 0 | 2569 | #define A6XX_GRAS_BIN_CONTROL_BINW__SHIFT 0 |
2100 | static inline uint32_t A6XX_X2_BIN_SIZE_WIDTH(uint32_t val) | 2570 | static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINW(uint32_t val) |
2101 | { | 2571 | { |
2102 | return ((val >> 5) << A6XX_X2_BIN_SIZE_WIDTH__SHIFT) & A6XX_X2_BIN_SIZE_WIDTH__MASK; | 2572 | return ((val >> 5) << A6XX_GRAS_BIN_CONTROL_BINW__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINW__MASK; |
2103 | } | 2573 | } |
2104 | #define A6XX_X2_BIN_SIZE_HEIGHT__MASK 0x0001ff00 | 2574 | #define A6XX_GRAS_BIN_CONTROL_BINH__MASK 0x0001ff00 |
2105 | #define A6XX_X2_BIN_SIZE_HEIGHT__SHIFT 8 | 2575 | #define A6XX_GRAS_BIN_CONTROL_BINH__SHIFT 8 |
2106 | static inline uint32_t A6XX_X2_BIN_SIZE_HEIGHT(uint32_t val) | 2576 | static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINH(uint32_t val) |
2107 | { | 2577 | { |
2108 | return ((val >> 4) << A6XX_X2_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X2_BIN_SIZE_HEIGHT__MASK; | 2578 | return ((val >> 4) << A6XX_GRAS_BIN_CONTROL_BINH__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINH__MASK; |
2109 | } | 2579 | } |
2580 | #define A6XX_GRAS_BIN_CONTROL_BINNING_PASS 0x00040000 | ||
2581 | #define A6XX_GRAS_BIN_CONTROL_USE_VIZ 0x00200000 | ||
2110 | 2582 | ||
2111 | #define REG_A6XX_X3_BIN_SIZE 0x000088d3 | 2583 | #define REG_A6XX_RB_BIN_CONTROL2 0x000088d3 |
2112 | #define A6XX_X3_BIN_SIZE_WIDTH__MASK 0x000000ff | 2584 | #define A6XX_RB_BIN_CONTROL2_BINW__MASK 0x000000ff |
2113 | #define A6XX_X3_BIN_SIZE_WIDTH__SHIFT 0 | 2585 | #define A6XX_RB_BIN_CONTROL2_BINW__SHIFT 0 |
2114 | static inline uint32_t A6XX_X3_BIN_SIZE_WIDTH(uint32_t val) | 2586 | static inline uint32_t A6XX_RB_BIN_CONTROL2_BINW(uint32_t val) |
2115 | { | 2587 | { |
2116 | return ((val >> 5) << A6XX_X3_BIN_SIZE_WIDTH__SHIFT) & A6XX_X3_BIN_SIZE_WIDTH__MASK; | 2588 | return ((val >> 5) << A6XX_RB_BIN_CONTROL2_BINW__SHIFT) & A6XX_RB_BIN_CONTROL2_BINW__MASK; |
2117 | } | 2589 | } |
2118 | #define A6XX_X3_BIN_SIZE_HEIGHT__MASK 0x0001ff00 | 2590 | #define A6XX_RB_BIN_CONTROL2_BINH__MASK 0x0001ff00 |
2119 | #define A6XX_X3_BIN_SIZE_HEIGHT__SHIFT 8 | 2591 | #define A6XX_RB_BIN_CONTROL2_BINH__SHIFT 8 |
2120 | static inline uint32_t A6XX_X3_BIN_SIZE_HEIGHT(uint32_t val) | 2592 | static inline uint32_t A6XX_RB_BIN_CONTROL2_BINH(uint32_t val) |
2121 | { | 2593 | { |
2122 | return ((val >> 4) << A6XX_X3_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X3_BIN_SIZE_HEIGHT__MASK; | 2594 | return ((val >> 4) << A6XX_RB_BIN_CONTROL2_BINH__SHIFT) & A6XX_RB_BIN_CONTROL2_BINH__MASK; |
2123 | } | 2595 | } |
2124 | 2596 | ||
2125 | #define REG_A6XX_VSC_BIN_SIZE 0x00000c02 | 2597 | #define REG_A6XX_VSC_BIN_SIZE 0x00000c02 |
@@ -2182,11 +2654,19 @@ static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val) | |||
2182 | return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK; | 2654 | return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK; |
2183 | } | 2655 | } |
2184 | 2656 | ||
2185 | #define REG_A6XX_VSC_XXX_ADDRESS_LO 0x00000c30 | 2657 | #define REG_A6XX_VSC_PIPE_DATA2_ADDRESS_LO 0x00000c30 |
2186 | 2658 | ||
2187 | #define REG_A6XX_VSC_XXX_ADDRESS_HI 0x00000c31 | 2659 | #define REG_A6XX_VSC_PIPE_DATA2_ADDRESS_HI 0x00000c31 |
2188 | 2660 | ||
2189 | #define REG_A6XX_VSC_XXX_PITCH 0x00000c32 | 2661 | #define REG_A6XX_VSC_PIPE_DATA2_PITCH 0x00000c32 |
2662 | |||
2663 | #define REG_A6XX_VSC_PIPE_DATA2_ARRAY_PITCH 0x00000c33 | ||
2664 | #define A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__MASK 0xffffffff | ||
2665 | #define A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__SHIFT 0 | ||
2666 | static inline uint32_t A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(uint32_t val) | ||
2667 | { | ||
2668 | return ((val >> 4) << A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__SHIFT) & A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__MASK; | ||
2669 | } | ||
2190 | 2670 | ||
2191 | #define REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO 0x00000c34 | 2671 | #define REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO 0x00000c34 |
2192 | 2672 | ||
@@ -2194,18 +2674,29 @@ static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val) | |||
2194 | 2674 | ||
2195 | #define REG_A6XX_VSC_PIPE_DATA_PITCH 0x00000c36 | 2675 | #define REG_A6XX_VSC_PIPE_DATA_PITCH 0x00000c36 |
2196 | 2676 | ||
2677 | #define REG_A6XX_VSC_PIPE_DATA_ARRAY_PITCH 0x00000c37 | ||
2678 | #define A6XX_VSC_PIPE_DATA_ARRAY_PITCH__MASK 0xffffffff | ||
2679 | #define A6XX_VSC_PIPE_DATA_ARRAY_PITCH__SHIFT 0 | ||
2680 | static inline uint32_t A6XX_VSC_PIPE_DATA_ARRAY_PITCH(uint32_t val) | ||
2681 | { | ||
2682 | return ((val >> 4) << A6XX_VSC_PIPE_DATA_ARRAY_PITCH__SHIFT) & A6XX_VSC_PIPE_DATA_ARRAY_PITCH__MASK; | ||
2683 | } | ||
2684 | |||
2197 | static inline uint32_t REG_A6XX_VSC_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; } | 2685 | static inline uint32_t REG_A6XX_VSC_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; } |
2198 | 2686 | ||
2199 | static inline uint32_t REG_A6XX_VSC_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; } | 2687 | static inline uint32_t REG_A6XX_VSC_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; } |
2200 | 2688 | ||
2201 | #define REG_A6XX_UCHE_UNKNOWN_0E12 0x00000e12 | 2689 | #define REG_A6XX_UCHE_UNKNOWN_0E12 0x00000e12 |
2202 | 2690 | ||
2691 | #define REG_A6XX_GRAS_UNKNOWN_8000 0x00008000 | ||
2692 | |||
2203 | #define REG_A6XX_GRAS_UNKNOWN_8001 0x00008001 | 2693 | #define REG_A6XX_GRAS_UNKNOWN_8001 0x00008001 |
2204 | 2694 | ||
2205 | #define REG_A6XX_GRAS_UNKNOWN_8004 0x00008004 | 2695 | #define REG_A6XX_GRAS_UNKNOWN_8004 0x00008004 |
2206 | 2696 | ||
2207 | #define REG_A6XX_GRAS_CNTL 0x00008005 | 2697 | #define REG_A6XX_GRAS_CNTL 0x00008005 |
2208 | #define A6XX_GRAS_CNTL_VARYING 0x00000001 | 2698 | #define A6XX_GRAS_CNTL_VARYING 0x00000001 |
2699 | #define A6XX_GRAS_CNTL_UNK3 0x00000008 | ||
2209 | #define A6XX_GRAS_CNTL_XCOORD 0x00000040 | 2700 | #define A6XX_GRAS_CNTL_XCOORD 0x00000040 |
2210 | #define A6XX_GRAS_CNTL_YCOORD 0x00000080 | 2701 | #define A6XX_GRAS_CNTL_YCOORD 0x00000080 |
2211 | #define A6XX_GRAS_CNTL_ZCOORD 0x00000100 | 2702 | #define A6XX_GRAS_CNTL_ZCOORD 0x00000100 |
@@ -2308,6 +2799,9 @@ static inline uint32_t A6XX_GRAS_SU_POINT_SIZE(float val) | |||
2308 | return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_SIZE__SHIFT) & A6XX_GRAS_SU_POINT_SIZE__MASK; | 2799 | return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_SIZE__SHIFT) & A6XX_GRAS_SU_POINT_SIZE__MASK; |
2309 | } | 2800 | } |
2310 | 2801 | ||
2802 | #define REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL 0x00008094 | ||
2803 | #define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001 | ||
2804 | |||
2311 | #define REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE 0x00008095 | 2805 | #define REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE 0x00008095 |
2312 | #define A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff | 2806 | #define A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff |
2313 | #define A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0 | 2807 | #define A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0 |
@@ -2344,6 +2838,8 @@ static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_dep | |||
2344 | 2838 | ||
2345 | #define REG_A6XX_GRAS_UNKNOWN_809B 0x0000809b | 2839 | #define REG_A6XX_GRAS_UNKNOWN_809B 0x0000809b |
2346 | 2840 | ||
2841 | #define REG_A6XX_GRAS_UNKNOWN_80A0 0x000080a0 | ||
2842 | |||
2347 | #define REG_A6XX_GRAS_RAS_MSAA_CNTL 0x000080a2 | 2843 | #define REG_A6XX_GRAS_RAS_MSAA_CNTL 0x000080a2 |
2348 | #define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 | 2844 | #define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 |
2349 | #define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 | 2845 | #define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 |
@@ -2464,6 +2960,8 @@ static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val) | |||
2464 | #define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002 | 2960 | #define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002 |
2465 | #define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004 | 2961 | #define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004 |
2466 | 2962 | ||
2963 | #define REG_A6XX_GRAS_UNKNOWN_8101 0x00008101 | ||
2964 | |||
2467 | #define REG_A6XX_GRAS_2D_BLIT_INFO 0x00008102 | 2965 | #define REG_A6XX_GRAS_2D_BLIT_INFO 0x00008102 |
2468 | #define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK 0x000000ff | 2966 | #define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK 0x000000ff |
2469 | #define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT 0 | 2967 | #define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT 0 |
@@ -2494,6 +2992,10 @@ static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val) | |||
2494 | 2992 | ||
2495 | #define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x00008107 | 2993 | #define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x00008107 |
2496 | 2994 | ||
2995 | #define REG_A6XX_GRAS_UNKNOWN_8109 0x00008109 | ||
2996 | |||
2997 | #define REG_A6XX_GRAS_UNKNOWN_8110 0x00008110 | ||
2998 | |||
2497 | #define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400 | 2999 | #define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400 |
2498 | 3000 | ||
2499 | #define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401 | 3001 | #define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401 |
@@ -2590,6 +3092,33 @@ static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_2_Y(uint32_t val) | |||
2590 | 3092 | ||
2591 | #define REG_A6XX_GRAS_UNKNOWN_8600 0x00008600 | 3093 | #define REG_A6XX_GRAS_UNKNOWN_8600 0x00008600 |
2592 | 3094 | ||
3095 | #define REG_A6XX_RB_BIN_CONTROL 0x00008800 | ||
3096 | #define A6XX_RB_BIN_CONTROL_BINW__MASK 0x000000ff | ||
3097 | #define A6XX_RB_BIN_CONTROL_BINW__SHIFT 0 | ||
3098 | static inline uint32_t A6XX_RB_BIN_CONTROL_BINW(uint32_t val) | ||
3099 | { | ||
3100 | return ((val >> 5) << A6XX_RB_BIN_CONTROL_BINW__SHIFT) & A6XX_RB_BIN_CONTROL_BINW__MASK; | ||
3101 | } | ||
3102 | #define A6XX_RB_BIN_CONTROL_BINH__MASK 0x0001ff00 | ||
3103 | #define A6XX_RB_BIN_CONTROL_BINH__SHIFT 8 | ||
3104 | static inline uint32_t A6XX_RB_BIN_CONTROL_BINH(uint32_t val) | ||
3105 | { | ||
3106 | return ((val >> 4) << A6XX_RB_BIN_CONTROL_BINH__SHIFT) & A6XX_RB_BIN_CONTROL_BINH__MASK; | ||
3107 | } | ||
3108 | #define A6XX_RB_BIN_CONTROL_BINNING_PASS 0x00040000 | ||
3109 | #define A6XX_RB_BIN_CONTROL_USE_VIZ 0x00200000 | ||
3110 | |||
3111 | #define REG_A6XX_RB_RENDER_CNTL 0x00008801 | ||
3112 | #define A6XX_RB_RENDER_CNTL_UNK4 0x00000010 | ||
3113 | #define A6XX_RB_RENDER_CNTL_BINNING 0x00000080 | ||
3114 | #define A6XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000 | ||
3115 | #define A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000 | ||
3116 | #define A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16 | ||
3117 | static inline uint32_t A6XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val) | ||
3118 | { | ||
3119 | return ((val) << A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK; | ||
3120 | } | ||
3121 | |||
2593 | #define REG_A6XX_RB_RAS_MSAA_CNTL 0x00008802 | 3122 | #define REG_A6XX_RB_RAS_MSAA_CNTL 0x00008802 |
2594 | #define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 | 3123 | #define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 |
2595 | #define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 | 3124 | #define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 |
@@ -2615,6 +3144,7 @@ static inline uint32_t A6XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val | |||
2615 | 3144 | ||
2616 | #define REG_A6XX_RB_RENDER_CONTROL0 0x00008809 | 3145 | #define REG_A6XX_RB_RENDER_CONTROL0 0x00008809 |
2617 | #define A6XX_RB_RENDER_CONTROL0_VARYING 0x00000001 | 3146 | #define A6XX_RB_RENDER_CONTROL0_VARYING 0x00000001 |
3147 | #define A6XX_RB_RENDER_CONTROL0_UNK3 0x00000008 | ||
2618 | #define A6XX_RB_RENDER_CONTROL0_XCOORD 0x00000040 | 3148 | #define A6XX_RB_RENDER_CONTROL0_XCOORD 0x00000040 |
2619 | #define A6XX_RB_RENDER_CONTROL0_YCOORD 0x00000080 | 3149 | #define A6XX_RB_RENDER_CONTROL0_YCOORD 0x00000080 |
2620 | #define A6XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100 | 3150 | #define A6XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100 |
@@ -2747,6 +3277,10 @@ static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7(enum adreno_rb_dithe | |||
2747 | #define A6XX_RB_SRGB_CNTL_SRGB_MRT6 0x00000040 | 3277 | #define A6XX_RB_SRGB_CNTL_SRGB_MRT6 0x00000040 |
2748 | #define A6XX_RB_SRGB_CNTL_SRGB_MRT7 0x00000080 | 3278 | #define A6XX_RB_SRGB_CNTL_SRGB_MRT7 0x00000080 |
2749 | 3279 | ||
3280 | #define REG_A6XX_RB_UNKNOWN_8810 0x00008810 | ||
3281 | |||
3282 | #define REG_A6XX_RB_UNKNOWN_8811 0x00008811 | ||
3283 | |||
2750 | #define REG_A6XX_RB_UNKNOWN_8818 0x00008818 | 3284 | #define REG_A6XX_RB_UNKNOWN_8818 0x00008818 |
2751 | 3285 | ||
2752 | #define REG_A6XX_RB_UNKNOWN_8819 0x00008819 | 3286 | #define REG_A6XX_RB_UNKNOWN_8819 0x00008819 |
@@ -2837,7 +3371,6 @@ static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val) | |||
2837 | { | 3371 | { |
2838 | return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; | 3372 | return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; |
2839 | } | 3373 | } |
2840 | #define A6XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000 | ||
2841 | 3374 | ||
2842 | static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; } | 3375 | static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; } |
2843 | #define A6XX_RB_MRT_PITCH__MASK 0xffffffff | 3376 | #define A6XX_RB_MRT_PITCH__MASK 0xffffffff |
@@ -2923,6 +3456,9 @@ static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val) | |||
2923 | return ((val) << A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK; | 3456 | return ((val) << A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK; |
2924 | } | 3457 | } |
2925 | 3458 | ||
3459 | #define REG_A6XX_RB_DEPTH_PLANE_CNTL 0x00008870 | ||
3460 | #define A6XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001 | ||
3461 | |||
2926 | #define REG_A6XX_RB_DEPTH_CNTL 0x00008871 | 3462 | #define REG_A6XX_RB_DEPTH_CNTL 0x00008871 |
2927 | #define A6XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001 | 3463 | #define A6XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001 |
2928 | #define A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002 | 3464 | #define A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002 |
@@ -3053,6 +3589,12 @@ static inline uint32_t A6XX_RB_STENCILREF_REF(uint32_t val) | |||
3053 | { | 3589 | { |
3054 | return ((val) << A6XX_RB_STENCILREF_REF__SHIFT) & A6XX_RB_STENCILREF_REF__MASK; | 3590 | return ((val) << A6XX_RB_STENCILREF_REF__SHIFT) & A6XX_RB_STENCILREF_REF__MASK; |
3055 | } | 3591 | } |
3592 | #define A6XX_RB_STENCILREF_BFREF__MASK 0x0000ff00 | ||
3593 | #define A6XX_RB_STENCILREF_BFREF__SHIFT 8 | ||
3594 | static inline uint32_t A6XX_RB_STENCILREF_BFREF(uint32_t val) | ||
3595 | { | ||
3596 | return ((val) << A6XX_RB_STENCILREF_BFREF__SHIFT) & A6XX_RB_STENCILREF_BFREF__MASK; | ||
3597 | } | ||
3056 | 3598 | ||
3057 | #define REG_A6XX_RB_STENCILMASK 0x00008888 | 3599 | #define REG_A6XX_RB_STENCILMASK 0x00008888 |
3058 | #define A6XX_RB_STENCILMASK_MASK__MASK 0x000000ff | 3600 | #define A6XX_RB_STENCILMASK_MASK__MASK 0x000000ff |
@@ -3061,6 +3603,12 @@ static inline uint32_t A6XX_RB_STENCILMASK_MASK(uint32_t val) | |||
3061 | { | 3603 | { |
3062 | return ((val) << A6XX_RB_STENCILMASK_MASK__SHIFT) & A6XX_RB_STENCILMASK_MASK__MASK; | 3604 | return ((val) << A6XX_RB_STENCILMASK_MASK__SHIFT) & A6XX_RB_STENCILMASK_MASK__MASK; |
3063 | } | 3605 | } |
3606 | #define A6XX_RB_STENCILMASK_BFMASK__MASK 0x0000ff00 | ||
3607 | #define A6XX_RB_STENCILMASK_BFMASK__SHIFT 8 | ||
3608 | static inline uint32_t A6XX_RB_STENCILMASK_BFMASK(uint32_t val) | ||
3609 | { | ||
3610 | return ((val) << A6XX_RB_STENCILMASK_BFMASK__SHIFT) & A6XX_RB_STENCILMASK_BFMASK__MASK; | ||
3611 | } | ||
3064 | 3612 | ||
3065 | #define REG_A6XX_RB_STENCILWRMASK 0x00008889 | 3613 | #define REG_A6XX_RB_STENCILWRMASK 0x00008889 |
3066 | #define A6XX_RB_STENCILWRMASK_WRMASK__MASK 0x000000ff | 3614 | #define A6XX_RB_STENCILWRMASK_WRMASK__MASK 0x000000ff |
@@ -3069,6 +3617,12 @@ static inline uint32_t A6XX_RB_STENCILWRMASK_WRMASK(uint32_t val) | |||
3069 | { | 3617 | { |
3070 | return ((val) << A6XX_RB_STENCILWRMASK_WRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_WRMASK__MASK; | 3618 | return ((val) << A6XX_RB_STENCILWRMASK_WRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_WRMASK__MASK; |
3071 | } | 3619 | } |
3620 | #define A6XX_RB_STENCILWRMASK_BFWRMASK__MASK 0x0000ff00 | ||
3621 | #define A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT 8 | ||
3622 | static inline uint32_t A6XX_RB_STENCILWRMASK_BFWRMASK(uint32_t val) | ||
3623 | { | ||
3624 | return ((val) << A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_BFWRMASK__MASK; | ||
3625 | } | ||
3072 | 3626 | ||
3073 | #define REG_A6XX_RB_WINDOW_OFFSET 0x00008890 | 3627 | #define REG_A6XX_RB_WINDOW_OFFSET 0x00008890 |
3074 | #define A6XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000 | 3628 | #define A6XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000 |
@@ -3177,14 +3731,14 @@ static inline uint32_t A6XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val) | |||
3177 | 3731 | ||
3178 | #define REG_A6XX_RB_BLIT_INFO 0x000088e3 | 3732 | #define REG_A6XX_RB_BLIT_INFO 0x000088e3 |
3179 | #define A6XX_RB_BLIT_INFO_UNK0 0x00000001 | 3733 | #define A6XX_RB_BLIT_INFO_UNK0 0x00000001 |
3180 | #define A6XX_RB_BLIT_INFO_FAST_CLEAR 0x00000002 | 3734 | #define A6XX_RB_BLIT_INFO_GMEM 0x00000002 |
3181 | #define A6XX_RB_BLIT_INFO_INTEGER 0x00000004 | 3735 | #define A6XX_RB_BLIT_INFO_INTEGER 0x00000004 |
3182 | #define A6XX_RB_BLIT_INFO_UNK3 0x00000008 | 3736 | #define A6XX_RB_BLIT_INFO_DEPTH 0x00000008 |
3183 | #define A6XX_RB_BLIT_INFO_MASK__MASK 0x000000f0 | 3737 | #define A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK 0x000000f0 |
3184 | #define A6XX_RB_BLIT_INFO_MASK__SHIFT 4 | 3738 | #define A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT 4 |
3185 | static inline uint32_t A6XX_RB_BLIT_INFO_MASK(uint32_t val) | 3739 | static inline uint32_t A6XX_RB_BLIT_INFO_CLEAR_MASK(uint32_t val) |
3186 | { | 3740 | { |
3187 | return ((val) << A6XX_RB_BLIT_INFO_MASK__SHIFT) & A6XX_RB_BLIT_INFO_MASK__MASK; | 3741 | return ((val) << A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT) & A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK; |
3188 | } | 3742 | } |
3189 | 3743 | ||
3190 | #define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0 | 3744 | #define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0 |
@@ -3274,12 +3828,16 @@ static inline uint32_t A6XX_RB_2D_DST_SIZE_PITCH(uint32_t val) | |||
3274 | 3828 | ||
3275 | #define REG_A6XX_RB_UNKNOWN_8E01 0x00008e01 | 3829 | #define REG_A6XX_RB_UNKNOWN_8E01 0x00008e01 |
3276 | 3830 | ||
3831 | #define REG_A6XX_RB_UNKNOWN_8E04 0x00008e04 | ||
3832 | |||
3277 | #define REG_A6XX_RB_CCU_CNTL 0x00008e07 | 3833 | #define REG_A6XX_RB_CCU_CNTL 0x00008e07 |
3278 | 3834 | ||
3279 | #define REG_A6XX_VPC_UNKNOWN_9101 0x00009101 | 3835 | #define REG_A6XX_VPC_UNKNOWN_9101 0x00009101 |
3280 | 3836 | ||
3281 | #define REG_A6XX_VPC_GS_SIV_CNTL 0x00009104 | 3837 | #define REG_A6XX_VPC_GS_SIV_CNTL 0x00009104 |
3282 | 3838 | ||
3839 | #define REG_A6XX_VPC_UNKNOWN_9107 0x00009107 | ||
3840 | |||
3283 | #define REG_A6XX_VPC_UNKNOWN_9108 0x00009108 | 3841 | #define REG_A6XX_VPC_UNKNOWN_9108 0x00009108 |
3284 | 3842 | ||
3285 | static inline uint32_t REG_A6XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00009200 + 0x1*i0; } | 3843 | static inline uint32_t REG_A6XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00009200 + 0x1*i0; } |
@@ -3385,6 +3943,9 @@ static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val) | |||
3385 | #define A6XX_VPC_SO_BUF_CNTL_BUF3 0x00000200 | 3943 | #define A6XX_VPC_SO_BUF_CNTL_BUF3 0x00000200 |
3386 | #define A6XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000 | 3944 | #define A6XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000 |
3387 | 3945 | ||
3946 | #define REG_A6XX_VPC_SO_OVERRIDE 0x00009306 | ||
3947 | #define A6XX_VPC_SO_OVERRIDE_SO_DISABLE 0x00000001 | ||
3948 | |||
3388 | #define REG_A6XX_VPC_UNKNOWN_9600 0x00009600 | 3949 | #define REG_A6XX_VPC_UNKNOWN_9600 0x00009600 |
3389 | 3950 | ||
3390 | #define REG_A6XX_VPC_UNKNOWN_9602 0x00009602 | 3951 | #define REG_A6XX_VPC_UNKNOWN_9602 0x00009602 |
@@ -3397,8 +3958,14 @@ static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val) | |||
3397 | 3958 | ||
3398 | #define REG_A6XX_PC_UNKNOWN_9805 0x00009805 | 3959 | #define REG_A6XX_PC_UNKNOWN_9805 0x00009805 |
3399 | 3960 | ||
3961 | #define REG_A6XX_PC_UNKNOWN_9806 0x00009806 | ||
3962 | |||
3963 | #define REG_A6XX_PC_UNKNOWN_9980 0x00009980 | ||
3964 | |||
3400 | #define REG_A6XX_PC_UNKNOWN_9981 0x00009981 | 3965 | #define REG_A6XX_PC_UNKNOWN_9981 0x00009981 |
3401 | 3966 | ||
3967 | #define REG_A6XX_PC_UNKNOWN_9990 0x00009990 | ||
3968 | |||
3402 | #define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00 | 3969 | #define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00 |
3403 | #define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001 | 3970 | #define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001 |
3404 | #define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002 | 3971 | #define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002 |
@@ -3410,6 +3977,7 @@ static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC(uint32_t val) | |||
3410 | { | 3977 | { |
3411 | return ((val) << A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK; | 3978 | return ((val) << A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK; |
3412 | } | 3979 | } |
3980 | #define A6XX_PC_PRIMITIVE_CNTL_1_PSIZE 0x00000100 | ||
3413 | 3981 | ||
3414 | #define REG_A6XX_PC_UNKNOWN_9B06 0x00009b06 | 3982 | #define REG_A6XX_PC_UNKNOWN_9B06 0x00009b06 |
3415 | 3983 | ||
@@ -3488,6 +4056,8 @@ static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val) | |||
3488 | 4056 | ||
3489 | #define REG_A6XX_VFD_UNKNOWN_A008 0x0000a008 | 4057 | #define REG_A6XX_VFD_UNKNOWN_A008 0x0000a008 |
3490 | 4058 | ||
4059 | #define REG_A6XX_VFD_UNKNOWN_A009 0x0000a009 | ||
4060 | |||
3491 | #define REG_A6XX_VFD_INDEX_OFFSET 0x0000a00e | 4061 | #define REG_A6XX_VFD_INDEX_OFFSET 0x0000a00e |
3492 | 4062 | ||
3493 | #define REG_A6XX_VFD_INSTANCE_START_OFFSET 0x0000a00f | 4063 | #define REG_A6XX_VFD_INSTANCE_START_OFFSET 0x0000a00f |
@@ -3640,6 +4210,8 @@ static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) | |||
3640 | #define A6XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x04000000 | 4210 | #define A6XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x04000000 |
3641 | #define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x80000000 | 4211 | #define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x80000000 |
3642 | 4212 | ||
4213 | #define REG_A6XX_SP_UNKNOWN_A81B 0x0000a81b | ||
4214 | |||
3643 | #define REG_A6XX_SP_VS_OBJ_START_LO 0x0000a81c | 4215 | #define REG_A6XX_SP_VS_OBJ_START_LO 0x0000a81c |
3644 | 4216 | ||
3645 | #define REG_A6XX_SP_VS_OBJ_START_HI 0x0000a81d | 4217 | #define REG_A6XX_SP_VS_OBJ_START_HI 0x0000a81d |
@@ -3884,6 +4456,8 @@ static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) | |||
3884 | #define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000 | 4456 | #define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000 |
3885 | #define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000 | 4457 | #define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000 |
3886 | 4458 | ||
4459 | #define REG_A6XX_SP_UNKNOWN_A982 0x0000a982 | ||
4460 | |||
3887 | #define REG_A6XX_SP_FS_OBJ_START_LO 0x0000a983 | 4461 | #define REG_A6XX_SP_FS_OBJ_START_LO 0x0000a983 |
3888 | 4462 | ||
3889 | #define REG_A6XX_SP_FS_OBJ_START_HI 0x0000a984 | 4463 | #define REG_A6XX_SP_FS_OBJ_START_HI 0x0000a984 |
@@ -3979,7 +4553,8 @@ static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_color_fmt val) | |||
3979 | } | 4553 | } |
3980 | #define A6XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100 | 4554 | #define A6XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100 |
3981 | #define A6XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200 | 4555 | #define A6XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200 |
3982 | #define A6XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400 | 4556 | |
4557 | #define REG_A6XX_SP_UNKNOWN_A99E 0x0000a99e | ||
3983 | 4558 | ||
3984 | #define REG_A6XX_SP_FS_TEX_COUNT 0x0000a9a7 | 4559 | #define REG_A6XX_SP_FS_TEX_COUNT 0x0000a9a7 |
3985 | 4560 | ||
@@ -4066,14 +4641,20 @@ static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val) | |||
4066 | 4641 | ||
4067 | #define REG_A6XX_SP_FS_INSTRLEN 0x0000ab05 | 4642 | #define REG_A6XX_SP_FS_INSTRLEN 0x0000ab05 |
4068 | 4643 | ||
4644 | #define REG_A6XX_SP_UNKNOWN_AB20 0x0000ab20 | ||
4645 | |||
4069 | #define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00 | 4646 | #define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00 |
4070 | 4647 | ||
4648 | #define REG_A6XX_SP_UNKNOWN_AE03 0x0000ae03 | ||
4649 | |||
4071 | #define REG_A6XX_SP_UNKNOWN_AE04 0x0000ae04 | 4650 | #define REG_A6XX_SP_UNKNOWN_AE04 0x0000ae04 |
4072 | 4651 | ||
4073 | #define REG_A6XX_SP_UNKNOWN_AE0F 0x0000ae0f | 4652 | #define REG_A6XX_SP_UNKNOWN_AE0F 0x0000ae0f |
4074 | 4653 | ||
4075 | #define REG_A6XX_SP_UNKNOWN_B182 0x0000b182 | 4654 | #define REG_A6XX_SP_UNKNOWN_B182 0x0000b182 |
4076 | 4655 | ||
4656 | #define REG_A6XX_SP_UNKNOWN_B183 0x0000b183 | ||
4657 | |||
4077 | #define REG_A6XX_SP_TP_RAS_MSAA_CNTL 0x0000b300 | 4658 | #define REG_A6XX_SP_TP_RAS_MSAA_CNTL 0x0000b300 |
4078 | #define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 | 4659 | #define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 |
4079 | #define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 | 4660 | #define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 |
@@ -4097,6 +4678,8 @@ static inline uint32_t A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples | |||
4097 | 4678 | ||
4098 | #define REG_A6XX_SP_TP_UNKNOWN_B304 0x0000b304 | 4679 | #define REG_A6XX_SP_TP_UNKNOWN_B304 0x0000b304 |
4099 | 4680 | ||
4681 | #define REG_A6XX_SP_TP_UNKNOWN_B309 0x0000b309 | ||
4682 | |||
4100 | #define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0 | 4683 | #define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0 |
4101 | #define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff | 4684 | #define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff |
4102 | #define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0 | 4685 | #define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0 |
@@ -4162,6 +4745,8 @@ static inline uint32_t A6XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val) | |||
4162 | return ((val >> 2) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK; | 4745 | return ((val >> 2) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK; |
4163 | } | 4746 | } |
4164 | 4747 | ||
4748 | #define REG_A6XX_HLSQ_UNKNOWN_B980 0x0000b980 | ||
4749 | |||
4165 | #define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982 | 4750 | #define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982 |
4166 | 4751 | ||
4167 | #define REG_A6XX_HLSQ_CONTROL_2_REG 0x0000b983 | 4752 | #define REG_A6XX_HLSQ_CONTROL_2_REG 0x0000b983 |
@@ -4537,11 +5122,11 @@ static inline uint32_t A6XX_TEX_CONST_7_FLAG_LO(uint32_t val) | |||
4537 | } | 5122 | } |
4538 | 5123 | ||
4539 | #define REG_A6XX_TEX_CONST_8 0x00000008 | 5124 | #define REG_A6XX_TEX_CONST_8 0x00000008 |
4540 | #define A6XX_TEX_CONST_8_BASE_HI__MASK 0x0001ffff | 5125 | #define A6XX_TEX_CONST_8_FLAG_HI__MASK 0x0001ffff |
4541 | #define A6XX_TEX_CONST_8_BASE_HI__SHIFT 0 | 5126 | #define A6XX_TEX_CONST_8_FLAG_HI__SHIFT 0 |
4542 | static inline uint32_t A6XX_TEX_CONST_8_BASE_HI(uint32_t val) | 5127 | static inline uint32_t A6XX_TEX_CONST_8_FLAG_HI(uint32_t val) |
4543 | { | 5128 | { |
4544 | return ((val) << A6XX_TEX_CONST_8_BASE_HI__SHIFT) & A6XX_TEX_CONST_8_BASE_HI__MASK; | 5129 | return ((val) << A6XX_TEX_CONST_8_FLAG_HI__SHIFT) & A6XX_TEX_CONST_8_FLAG_HI__MASK; |
4545 | } | 5130 | } |
4546 | 5131 | ||
4547 | #define REG_A6XX_TEX_CONST_9 0x00000009 | 5132 | #define REG_A6XX_TEX_CONST_9 0x00000009 |
@@ -4558,5 +5143,227 @@ static inline uint32_t A6XX_TEX_CONST_8_BASE_HI(uint32_t val) | |||
4558 | 5143 | ||
4559 | #define REG_A6XX_TEX_CONST_15 0x0000000f | 5144 | #define REG_A6XX_TEX_CONST_15 0x0000000f |
4560 | 5145 | ||
5146 | #define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00001140 | ||
5147 | |||
5148 | #define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00001148 | ||
5149 | |||
5150 | #define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00001540 | ||
5151 | |||
5152 | #define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00001541 | ||
5153 | |||
5154 | #define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00001542 | ||
5155 | |||
5156 | #define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00001543 | ||
5157 | |||
5158 | #define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00001544 | ||
5159 | |||
5160 | #define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00001545 | ||
5161 | |||
5162 | #define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00001572 | ||
5163 | |||
5164 | #define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00001573 | ||
5165 | |||
5166 | #define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00001574 | ||
5167 | |||
5168 | #define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00001575 | ||
5169 | |||
5170 | #define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00001576 | ||
5171 | |||
5172 | #define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00001577 | ||
5173 | |||
5174 | #define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000015a4 | ||
5175 | |||
5176 | #define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000015a5 | ||
5177 | |||
5178 | #define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000015a6 | ||
5179 | |||
5180 | #define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000015a7 | ||
5181 | |||
5182 | #define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000015a8 | ||
5183 | |||
5184 | #define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000015a9 | ||
5185 | |||
5186 | #define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000015d6 | ||
5187 | |||
5188 | #define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000015d7 | ||
5189 | |||
5190 | #define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000015d8 | ||
5191 | |||
5192 | #define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000015d9 | ||
5193 | |||
5194 | #define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000015da | ||
5195 | |||
5196 | #define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000015db | ||
5197 | |||
5198 | #define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x00000000 | ||
5199 | |||
5200 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00000000 | ||
5201 | #define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK 0x000000ff | ||
5202 | #define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT 0 | ||
5203 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(uint32_t val) | ||
5204 | { | ||
5205 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK; | ||
5206 | } | ||
5207 | #define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK 0x0000ff00 | ||
5208 | #define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT 8 | ||
5209 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(uint32_t val) | ||
5210 | { | ||
5211 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK; | ||
5212 | } | ||
5213 | |||
5214 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00000001 | ||
5215 | |||
5216 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00000002 | ||
5217 | |||
5218 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00000003 | ||
5219 | |||
5220 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00000004 | ||
5221 | #define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f | ||
5222 | #define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0 | ||
5223 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val) | ||
5224 | { | ||
5225 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK; | ||
5226 | } | ||
5227 | #define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000 | ||
5228 | #define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12 | ||
5229 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val) | ||
5230 | { | ||
5231 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK; | ||
5232 | } | ||
5233 | #define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000 | ||
5234 | #define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28 | ||
5235 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val) | ||
5236 | { | ||
5237 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK; | ||
5238 | } | ||
5239 | |||
5240 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00000005 | ||
5241 | #define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000 | ||
5242 | #define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24 | ||
5243 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val) | ||
5244 | { | ||
5245 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK; | ||
5246 | } | ||
5247 | |||
5248 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00000008 | ||
5249 | |||
5250 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00000009 | ||
5251 | |||
5252 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0000000a | ||
5253 | |||
5254 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0000000b | ||
5255 | |||
5256 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0000000c | ||
5257 | |||
5258 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0000000d | ||
5259 | |||
5260 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0000000e | ||
5261 | |||
5262 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0000000f | ||
5263 | |||
5264 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000010 | ||
5265 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f | ||
5266 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0 | ||
5267 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val) | ||
5268 | { | ||
5269 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK; | ||
5270 | } | ||
5271 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0 | ||
5272 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4 | ||
5273 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val) | ||
5274 | { | ||
5275 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK; | ||
5276 | } | ||
5277 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00 | ||
5278 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8 | ||
5279 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val) | ||
5280 | { | ||
5281 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK; | ||
5282 | } | ||
5283 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000 | ||
5284 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12 | ||
5285 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val) | ||
5286 | { | ||
5287 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK; | ||
5288 | } | ||
5289 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000 | ||
5290 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16 | ||
5291 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val) | ||
5292 | { | ||
5293 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK; | ||
5294 | } | ||
5295 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000 | ||
5296 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20 | ||
5297 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val) | ||
5298 | { | ||
5299 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK; | ||
5300 | } | ||
5301 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000 | ||
5302 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24 | ||
5303 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val) | ||
5304 | { | ||
5305 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK; | ||
5306 | } | ||
5307 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000 | ||
5308 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28 | ||
5309 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val) | ||
5310 | { | ||
5311 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK; | ||
5312 | } | ||
5313 | |||
5314 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000011 | ||
5315 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f | ||
5316 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0 | ||
5317 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val) | ||
5318 | { | ||
5319 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK; | ||
5320 | } | ||
5321 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0 | ||
5322 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4 | ||
5323 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val) | ||
5324 | { | ||
5325 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK; | ||
5326 | } | ||
5327 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00 | ||
5328 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8 | ||
5329 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val) | ||
5330 | { | ||
5331 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK; | ||
5332 | } | ||
5333 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000 | ||
5334 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12 | ||
5335 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val) | ||
5336 | { | ||
5337 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK; | ||
5338 | } | ||
5339 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000 | ||
5340 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16 | ||
5341 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val) | ||
5342 | { | ||
5343 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK; | ||
5344 | } | ||
5345 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000 | ||
5346 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20 | ||
5347 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val) | ||
5348 | { | ||
5349 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK; | ||
5350 | } | ||
5351 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000 | ||
5352 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24 | ||
5353 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val) | ||
5354 | { | ||
5355 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK; | ||
5356 | } | ||
5357 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000 | ||
5358 | #define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28 | ||
5359 | static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val) | ||
5360 | { | ||
5361 | return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK; | ||
5362 | } | ||
5363 | |||
5364 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000002f | ||
5365 | |||
5366 | #define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000030 | ||
5367 | |||
4561 | 5368 | ||
4562 | #endif /* A6XX_XML */ | 5369 | #endif /* A6XX_XML */ |
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index bbb8126ec5c5..d4e98e5876bc 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c | |||
@@ -2,7 +2,6 @@ | |||
2 | /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ | 2 | /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ |
3 | 3 | ||
4 | #include <linux/clk.h> | 4 | #include <linux/clk.h> |
5 | #include <linux/iopoll.h> | ||
6 | #include <linux/pm_opp.h> | 5 | #include <linux/pm_opp.h> |
7 | #include <soc/qcom/cmd-db.h> | 6 | #include <soc/qcom/cmd-db.h> |
8 | 7 | ||
@@ -42,9 +41,6 @@ static irqreturn_t a6xx_hfi_irq(int irq, void *data) | |||
42 | status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); | 41 | status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); |
43 | gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); | 42 | gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); |
44 | 43 | ||
45 | if (status & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ) | ||
46 | tasklet_schedule(&gmu->hfi_tasklet); | ||
47 | |||
48 | if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { | 44 | if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { |
49 | dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); | 45 | dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); |
50 | 46 | ||
@@ -65,12 +61,14 @@ static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) | |||
65 | A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); | 61 | A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); |
66 | } | 62 | } |
67 | 63 | ||
68 | static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) | 64 | static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) |
69 | { | 65 | { |
66 | int ret; | ||
67 | |||
70 | gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); | 68 | gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); |
71 | 69 | ||
72 | gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, | 70 | gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, |
73 | ((index << 24) & 0xff) | (3 & 0xf)); | 71 | ((3 & 0xf) << 28) | index); |
74 | 72 | ||
75 | /* | 73 | /* |
76 | * Send an invalid index as a vote for the bus bandwidth and let the | 74 | * Send an invalid index as a vote for the bus bandwidth and let the |
@@ -82,7 +80,37 @@ static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) | |||
82 | a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); | 80 | a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); |
83 | a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); | 81 | a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); |
84 | 82 | ||
85 | return gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); | 83 | ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); |
84 | if (ret) | ||
85 | dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); | ||
86 | |||
87 | gmu->freq = gmu->gpu_freqs[index]; | ||
88 | } | ||
89 | |||
90 | void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) | ||
91 | { | ||
92 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | ||
93 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | ||
94 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; | ||
95 | u32 perf_index = 0; | ||
96 | |||
97 | if (freq == gmu->freq) | ||
98 | return; | ||
99 | |||
100 | for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) | ||
101 | if (freq == gmu->gpu_freqs[perf_index]) | ||
102 | break; | ||
103 | |||
104 | __a6xx_gmu_set_freq(gmu, perf_index); | ||
105 | } | ||
106 | |||
107 | unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) | ||
108 | { | ||
109 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | ||
110 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | ||
111 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; | ||
112 | |||
113 | return gmu->freq; | ||
86 | } | 114 | } |
87 | 115 | ||
88 | static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) | 116 | static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) |
@@ -135,9 +163,6 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) | |||
135 | u32 val; | 163 | u32 val; |
136 | int ret; | 164 | int ret; |
137 | 165 | ||
138 | gmu_rmw(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, | ||
139 | A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 0); | ||
140 | |||
141 | gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); | 166 | gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); |
142 | 167 | ||
143 | ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, | 168 | ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, |
@@ -348,8 +373,23 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) | |||
348 | gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); | 373 | gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); |
349 | } | 374 | } |
350 | 375 | ||
376 | static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) | ||
377 | { | ||
378 | return msm_writel(value, ptr + (offset << 2)); | ||
379 | } | ||
380 | |||
381 | static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, | ||
382 | const char *name); | ||
383 | |||
351 | static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) | 384 | static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) |
352 | { | 385 | { |
386 | struct platform_device *pdev = to_platform_device(gmu->dev); | ||
387 | void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); | ||
388 | void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); | ||
389 | |||
390 | if (!pdcptr || !seqptr) | ||
391 | goto err; | ||
392 | |||
353 | /* Disable SDE clock gating */ | 393 | /* Disable SDE clock gating */ |
354 | gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); | 394 | gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); |
355 | 395 | ||
@@ -374,44 +414,48 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) | |||
374 | gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); | 414 | gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); |
375 | 415 | ||
376 | /* Load PDC sequencer uCode for power up and power down sequence */ | 416 | /* Load PDC sequencer uCode for power up and power down sequence */ |
377 | pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1); | 417 | pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1); |
378 | pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2); | 418 | pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2); |
379 | pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0); | 419 | pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0); |
380 | pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284); | 420 | pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284); |
381 | pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc); | 421 | pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc); |
382 | 422 | ||
383 | /* Set TCS commands used by PDC sequence for low power modes */ | 423 | /* Set TCS commands used by PDC sequence for low power modes */ |
384 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7); | 424 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7); |
385 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0); | 425 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0); |
386 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0); | 426 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0); |
387 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108); | 427 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108); |
388 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010); | 428 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010); |
389 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1); | 429 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1); |
390 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108); | 430 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108); |
391 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000); | 431 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000); |
392 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); | 432 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); |
393 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); | 433 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); |
394 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080); | 434 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080); |
395 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); | 435 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); |
396 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); | 436 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); |
397 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0); | 437 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0); |
398 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0); | 438 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0); |
399 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108); | 439 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108); |
400 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010); | 440 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010); |
401 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2); | 441 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2); |
402 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); | 442 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); |
403 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); | 443 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); |
404 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); | 444 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); |
405 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); | 445 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); |
406 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080); | 446 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080); |
407 | pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); | 447 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); |
408 | 448 | ||
409 | /* Setup GPU PDC */ | 449 | /* Setup GPU PDC */ |
410 | pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0); | 450 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0); |
411 | pdc_write(gmu, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001); | 451 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001); |
412 | 452 | ||
413 | /* ensure no writes happen before the uCode is fully written */ | 453 | /* ensure no writes happen before the uCode is fully written */ |
414 | wmb(); | 454 | wmb(); |
455 | |||
456 | err: | ||
457 | devm_iounmap(gmu->dev, pdcptr); | ||
458 | devm_iounmap(gmu->dev, seqptr); | ||
415 | } | 459 | } |
416 | 460 | ||
417 | /* | 461 | /* |
@@ -547,8 +591,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) | |||
547 | } | 591 | } |
548 | 592 | ||
549 | #define A6XX_HFI_IRQ_MASK \ | 593 | #define A6XX_HFI_IRQ_MASK \ |
550 | (A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ | \ | 594 | (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) |
551 | A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) | ||
552 | 595 | ||
553 | #define A6XX_GMU_IRQ_MASK \ | 596 | #define A6XX_GMU_IRQ_MASK \ |
554 | (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \ | 597 | (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \ |
@@ -626,7 +669,7 @@ int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu) | |||
626 | ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT); | 669 | ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT); |
627 | 670 | ||
628 | /* Set the GPU back to the highest power frequency */ | 671 | /* Set the GPU back to the highest power frequency */ |
629 | a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); | 672 | __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); |
630 | 673 | ||
631 | out: | 674 | out: |
632 | if (ret) | 675 | if (ret) |
@@ -665,7 +708,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) | |||
665 | ret = a6xx_hfi_start(gmu, status); | 708 | ret = a6xx_hfi_start(gmu, status); |
666 | 709 | ||
667 | /* Set the GPU to the highest power frequency */ | 710 | /* Set the GPU to the highest power frequency */ |
668 | a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); | 711 | __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); |
669 | 712 | ||
670 | out: | 713 | out: |
671 | /* Make sure to turn off the boot OOB request on error */ | 714 | /* Make sure to turn off the boot OOB request on error */ |
@@ -1140,7 +1183,7 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node) | |||
1140 | 1183 | ||
1141 | gmu->dev = &pdev->dev; | 1184 | gmu->dev = &pdev->dev; |
1142 | 1185 | ||
1143 | of_dma_configure(gmu->dev, node, false); | 1186 | of_dma_configure(gmu->dev, node, true); |
1144 | 1187 | ||
1145 | /* Fow now, don't do anything fancy until we get our feet under us */ | 1188 | /* Fow now, don't do anything fancy until we get our feet under us */ |
1146 | gmu->idle_level = GMU_IDLE_STATE_ACTIVE; | 1189 | gmu->idle_level = GMU_IDLE_STATE_ACTIVE; |
@@ -1170,11 +1213,7 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node) | |||
1170 | 1213 | ||
1171 | /* Map the GMU registers */ | 1214 | /* Map the GMU registers */ |
1172 | gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); | 1215 | gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); |
1173 | 1216 | if (IS_ERR(gmu->mmio)) | |
1174 | /* Map the GPU power domain controller registers */ | ||
1175 | gmu->pdc_mmio = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); | ||
1176 | |||
1177 | if (IS_ERR(gmu->mmio) || IS_ERR(gmu->pdc_mmio)) | ||
1178 | goto err; | 1217 | goto err; |
1179 | 1218 | ||
1180 | /* Get the HFI and GMU interrupts */ | 1219 | /* Get the HFI and GMU interrupts */ |
@@ -1184,9 +1223,6 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node) | |||
1184 | if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) | 1223 | if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) |
1185 | goto err; | 1224 | goto err; |
1186 | 1225 | ||
1187 | /* Set up a tasklet to handle GMU HFI responses */ | ||
1188 | tasklet_init(&gmu->hfi_tasklet, a6xx_hfi_task, (unsigned long) gmu); | ||
1189 | |||
1190 | /* Get the power levels for the GMU and GPU */ | 1226 | /* Get the power levels for the GMU and GPU */ |
1191 | a6xx_gmu_pwrlevels_probe(gmu); | 1227 | a6xx_gmu_pwrlevels_probe(gmu); |
1192 | 1228 | ||
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index d9a386c18799..35f765afae45 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #ifndef _A6XX_GMU_H_ | 4 | #ifndef _A6XX_GMU_H_ |
5 | #define _A6XX_GMU_H_ | 5 | #define _A6XX_GMU_H_ |
6 | 6 | ||
7 | #include <linux/iopoll.h> | ||
7 | #include <linux/interrupt.h> | 8 | #include <linux/interrupt.h> |
8 | #include "msm_drv.h" | 9 | #include "msm_drv.h" |
9 | #include "a6xx_hfi.h" | 10 | #include "a6xx_hfi.h" |
@@ -47,7 +48,6 @@ struct a6xx_gmu { | |||
47 | struct device *dev; | 48 | struct device *dev; |
48 | 49 | ||
49 | void * __iomem mmio; | 50 | void * __iomem mmio; |
50 | void * __iomem pdc_mmio; | ||
51 | 51 | ||
52 | int hfi_irq; | 52 | int hfi_irq; |
53 | int gmu_irq; | 53 | int gmu_irq; |
@@ -74,6 +74,8 @@ struct a6xx_gmu { | |||
74 | unsigned long gmu_freqs[4]; | 74 | unsigned long gmu_freqs[4]; |
75 | u32 cx_arc_votes[4]; | 75 | u32 cx_arc_votes[4]; |
76 | 76 | ||
77 | unsigned long freq; | ||
78 | |||
77 | struct a6xx_hfi_queue queues[2]; | 79 | struct a6xx_hfi_queue queues[2]; |
78 | 80 | ||
79 | struct tasklet_struct hfi_tasklet; | 81 | struct tasklet_struct hfi_tasklet; |
@@ -89,11 +91,6 @@ static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value) | |||
89 | return msm_writel(value, gmu->mmio + (offset << 2)); | 91 | return msm_writel(value, gmu->mmio + (offset << 2)); |
90 | } | 92 | } |
91 | 93 | ||
92 | static inline void pdc_write(struct a6xx_gmu *gmu, u32 offset, u32 value) | ||
93 | { | ||
94 | return msm_writel(value, gmu->pdc_mmio + (offset << 2)); | ||
95 | } | ||
96 | |||
97 | static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or) | 94 | static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or) |
98 | { | 95 | { |
99 | u32 val = gmu_read(gmu, reg); | 96 | u32 val = gmu_read(gmu, reg); |
@@ -103,6 +100,16 @@ static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or) | |||
103 | gmu_write(gmu, reg, val | or); | 100 | gmu_write(gmu, reg, val | or); |
104 | } | 101 | } |
105 | 102 | ||
103 | static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi) | ||
104 | { | ||
105 | u64 val; | ||
106 | |||
107 | val = (u64) msm_readl(gmu->mmio + (lo << 2)); | ||
108 | val |= ((u64) msm_readl(gmu->mmio + (hi << 2)) << 32); | ||
109 | |||
110 | return val; | ||
111 | } | ||
112 | |||
106 | #define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \ | 113 | #define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \ |
107 | readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \ | 114 | readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \ |
108 | interval, timeout) | 115 | interval, timeout) |
@@ -157,6 +164,4 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu); | |||
157 | int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state); | 164 | int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state); |
158 | void a6xx_hfi_stop(struct a6xx_gmu *gmu); | 165 | void a6xx_hfi_stop(struct a6xx_gmu *gmu); |
159 | 166 | ||
160 | void a6xx_hfi_task(unsigned long data); | ||
161 | |||
162 | #endif | 167 | #endif |
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h index ef68098d2adc..db56f263ed77 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h | |||
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are: | |||
12 | - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) | 12 | - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) |
13 | - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13) | 13 | - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13) |
14 | - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13) | 14 | - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13) |
15 | - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45) | 15 | - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37) |
16 | - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) | 16 | - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) |
17 | - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) | 17 | - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) |
18 | - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45) | 18 | - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37) |
19 | - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45) | 19 | - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42) |
20 | - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13) | 20 | - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07) |
21 | - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) | 21 | - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) |
22 | 22 | ||
23 | Copyright (C) 2013-2018 by the following authors: | 23 | Copyright (C) 2013-2018 by the following authors: |
@@ -167,8 +167,8 @@ static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH(uint32_ | |||
167 | #define REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS 0x000050d0 | 167 | #define REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS 0x000050d0 |
168 | #define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_OFF 0x00000001 | 168 | #define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_OFF 0x00000001 |
169 | #define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_ON 0x00000002 | 169 | #define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_ON 0x00000002 |
170 | #define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000004 | 170 | #define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000004 |
171 | #define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000008 | 171 | #define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000008 |
172 | #define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF 0x00000010 | 172 | #define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF 0x00000010 |
173 | #define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GMU_UP_POWER_STATE 0x00000020 | 173 | #define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GMU_UP_POWER_STATE 0x00000020 |
174 | #define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF 0x00000040 | 174 | #define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF 0x00000040 |
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index c629f742a1d1..cdc3d59a659d 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #include "a6xx_gpu.h" | 7 | #include "a6xx_gpu.h" |
8 | #include "a6xx_gmu.xml.h" | 8 | #include "a6xx_gmu.xml.h" |
9 | 9 | ||
10 | #include <linux/devfreq.h> | ||
11 | |||
10 | static inline bool _a6xx_check_idle(struct msm_gpu *gpu) | 12 | static inline bool _a6xx_check_idle(struct msm_gpu *gpu) |
11 | { | 13 | { |
12 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | 14 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
@@ -438,10 +440,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu) | |||
438 | gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d)); | 440 | gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d)); |
439 | gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76)); | 441 | gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76)); |
440 | gpu_write(gpu, REG_A6XX_CP_PROTECT(24), | 442 | gpu_write(gpu, REG_A6XX_CP_PROTECT(24), |
441 | A6XX_PROTECT_RDONLY(0x8d0, 0x23)); | ||
442 | gpu_write(gpu, REG_A6XX_CP_PROTECT(25), | ||
443 | A6XX_PROTECT_RDONLY(0x980, 0x4)); | 443 | A6XX_PROTECT_RDONLY(0x980, 0x4)); |
444 | gpu_write(gpu, REG_A6XX_CP_PROTECT(26), A6XX_PROTECT_RW(0xa630, 0x0)); | 444 | gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0)); |
445 | 445 | ||
446 | /* Enable interrupts */ | 446 | /* Enable interrupts */ |
447 | gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK); | 447 | gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK); |
@@ -682,6 +682,8 @@ static int a6xx_pm_resume(struct msm_gpu *gpu) | |||
682 | 682 | ||
683 | gpu->needs_hw_init = true; | 683 | gpu->needs_hw_init = true; |
684 | 684 | ||
685 | msm_gpu_resume_devfreq(gpu); | ||
686 | |||
685 | return ret; | 687 | return ret; |
686 | } | 688 | } |
687 | 689 | ||
@@ -690,6 +692,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) | |||
690 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | 692 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
691 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | 693 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
692 | 694 | ||
695 | devfreq_suspend_device(gpu->devfreq.devfreq); | ||
696 | |||
693 | /* | 697 | /* |
694 | * Make sure the GMU is idle before continuing (because some transitions | 698 | * Make sure the GMU is idle before continuing (because some transitions |
695 | * may use VBIF | 699 | * may use VBIF |
@@ -744,7 +748,7 @@ static void a6xx_destroy(struct msm_gpu *gpu) | |||
744 | if (a6xx_gpu->sqe_bo) { | 748 | if (a6xx_gpu->sqe_bo) { |
745 | if (a6xx_gpu->sqe_iova) | 749 | if (a6xx_gpu->sqe_iova) |
746 | msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace); | 750 | msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace); |
747 | drm_gem_object_unreference_unlocked(a6xx_gpu->sqe_bo); | 751 | drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo); |
748 | } | 752 | } |
749 | 753 | ||
750 | a6xx_gmu_remove(a6xx_gpu); | 754 | a6xx_gmu_remove(a6xx_gpu); |
@@ -753,6 +757,24 @@ static void a6xx_destroy(struct msm_gpu *gpu) | |||
753 | kfree(a6xx_gpu); | 757 | kfree(a6xx_gpu); |
754 | } | 758 | } |
755 | 759 | ||
760 | static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) | ||
761 | { | ||
762 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | ||
763 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | ||
764 | u64 busy_cycles; | ||
765 | unsigned long busy_time; | ||
766 | |||
767 | busy_cycles = gmu_read64(&a6xx_gpu->gmu, | ||
768 | REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L, | ||
769 | REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H); | ||
770 | |||
771 | busy_time = ((busy_cycles - gpu->devfreq.busy_cycles) * 10) / 192; | ||
772 | |||
773 | gpu->devfreq.busy_cycles = busy_cycles; | ||
774 | |||
775 | return busy_time; | ||
776 | } | ||
777 | |||
756 | static const struct adreno_gpu_funcs funcs = { | 778 | static const struct adreno_gpu_funcs funcs = { |
757 | .base = { | 779 | .base = { |
758 | .get_param = adreno_get_param, | 780 | .get_param = adreno_get_param, |
@@ -768,6 +790,9 @@ static const struct adreno_gpu_funcs funcs = { | |||
768 | #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) | 790 | #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) |
769 | .show = a6xx_show, | 791 | .show = a6xx_show, |
770 | #endif | 792 | #endif |
793 | .gpu_busy = a6xx_gpu_busy, | ||
794 | .gpu_get_freq = a6xx_gmu_get_freq, | ||
795 | .gpu_set_freq = a6xx_gmu_set_freq, | ||
771 | }, | 796 | }, |
772 | .get_timestamp = a6xx_get_timestamp, | 797 | .get_timestamp = a6xx_get_timestamp, |
773 | }; | 798 | }; |
@@ -799,7 +824,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) | |||
799 | } | 824 | } |
800 | 825 | ||
801 | /* Check if there is a GMU phandle and set it up */ | 826 | /* Check if there is a GMU phandle and set it up */ |
802 | node = of_parse_phandle(pdev->dev.of_node, "gmu", 0); | 827 | node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0); |
803 | 828 | ||
804 | /* FIXME: How do we gracefully handle this? */ | 829 | /* FIXME: How do we gracefully handle this? */ |
805 | BUG_ON(!node); | 830 | BUG_ON(!node); |
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index dd69e5b0e692..4127dcebc202 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h | |||
@@ -56,5 +56,6 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); | |||
56 | 56 | ||
57 | int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node); | 57 | int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node); |
58 | void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu); | 58 | void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu); |
59 | 59 | void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq); | |
60 | unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu); | ||
60 | #endif /* __A6XX_GPU_H__ */ | 61 | #endif /* __A6XX_GPU_H__ */ |
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index f19ef4cb6ea4..6ff9baec2658 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c | |||
@@ -79,83 +79,72 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, | |||
79 | return 0; | 79 | return 0; |
80 | } | 80 | } |
81 | 81 | ||
82 | struct a6xx_hfi_response { | 82 | static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, |
83 | u32 id; | 83 | u32 *payload, u32 payload_size) |
84 | u32 seqnum; | 84 | { |
85 | struct list_head node; | 85 | struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; |
86 | struct completion complete; | 86 | u32 val; |
87 | 87 | int ret; | |
88 | u32 error; | ||
89 | u32 payload[16]; | ||
90 | }; | ||
91 | 88 | ||
92 | /* | 89 | /* Wait for a response */ |
93 | * Incoming HFI ack messages can come in out of order so we need to store all | 90 | ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, |
94 | * the pending messages on a list until they are handled. | 91 | val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000); |
95 | */ | ||
96 | static spinlock_t hfi_ack_lock = __SPIN_LOCK_UNLOCKED(message_lock); | ||
97 | static LIST_HEAD(hfi_ack_list); | ||
98 | 92 | ||
99 | static void a6xx_hfi_handle_ack(struct a6xx_gmu *gmu, | 93 | if (ret) { |
100 | struct a6xx_hfi_msg_response *msg) | 94 | dev_err(gmu->dev, |
101 | { | 95 | "Message %s id %d timed out waiting for response\n", |
102 | struct a6xx_hfi_response *resp; | 96 | a6xx_hfi_msg_id[id], seqnum); |
103 | u32 id, seqnum; | 97 | return -ETIMEDOUT; |
104 | |||
105 | /* msg->ret_header contains the header of the message being acked */ | ||
106 | id = HFI_HEADER_ID(msg->ret_header); | ||
107 | seqnum = HFI_HEADER_SEQNUM(msg->ret_header); | ||
108 | |||
109 | spin_lock(&hfi_ack_lock); | ||
110 | list_for_each_entry(resp, &hfi_ack_list, node) { | ||
111 | if (resp->id == id && resp->seqnum == seqnum) { | ||
112 | resp->error = msg->error; | ||
113 | memcpy(resp->payload, msg->payload, | ||
114 | sizeof(resp->payload)); | ||
115 | |||
116 | complete(&resp->complete); | ||
117 | spin_unlock(&hfi_ack_lock); | ||
118 | return; | ||
119 | } | ||
120 | } | 98 | } |
121 | spin_unlock(&hfi_ack_lock); | ||
122 | 99 | ||
123 | dev_err(gmu->dev, "Nobody was waiting for HFI message %d\n", seqnum); | 100 | /* Clear the interrupt */ |
124 | } | 101 | gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, |
102 | A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ); | ||
125 | 103 | ||
126 | static void a6xx_hfi_handle_error(struct a6xx_gmu *gmu, | 104 | for (;;) { |
127 | struct a6xx_hfi_msg_response *msg) | 105 | struct a6xx_hfi_msg_response resp; |
128 | { | ||
129 | struct a6xx_hfi_msg_error *error = (struct a6xx_hfi_msg_error *) msg; | ||
130 | 106 | ||
131 | dev_err(gmu->dev, "GMU firmware error %d\n", error->code); | 107 | /* Get the next packet */ |
132 | } | 108 | ret = a6xx_hfi_queue_read(queue, (u32 *) &resp, |
109 | sizeof(resp) >> 2); | ||
133 | 110 | ||
134 | void a6xx_hfi_task(unsigned long data) | 111 | /* If the queue is empty our response never made it */ |
135 | { | 112 | if (!ret) { |
136 | struct a6xx_gmu *gmu = (struct a6xx_gmu *) data; | 113 | dev_err(gmu->dev, |
137 | struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; | 114 | "The HFI response queue is unexpectedly empty\n"); |
138 | struct a6xx_hfi_msg_response resp; | ||
139 | 115 | ||
140 | for (;;) { | 116 | return -ENOENT; |
141 | u32 id; | 117 | } |
142 | int ret = a6xx_hfi_queue_read(queue, (u32 *) &resp, | 118 | |
143 | sizeof(resp) >> 2); | 119 | if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) { |
120 | struct a6xx_hfi_msg_error *error = | ||
121 | (struct a6xx_hfi_msg_error *) &resp; | ||
144 | 122 | ||
145 | /* Returns the number of bytes copied or negative on error */ | 123 | dev_err(gmu->dev, "GMU firmware error %d\n", |
146 | if (ret <= 0) { | 124 | error->code); |
147 | if (ret < 0) | 125 | continue; |
148 | dev_err(gmu->dev, | 126 | } |
149 | "Unable to read the HFI message queue\n"); | 127 | |
150 | break; | 128 | if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) { |
129 | dev_err(gmu->dev, | ||
130 | "Unexpected message id %d on the response queue\n", | ||
131 | HFI_HEADER_SEQNUM(resp.ret_header)); | ||
132 | continue; | ||
133 | } | ||
134 | |||
135 | if (resp.error) { | ||
136 | dev_err(gmu->dev, | ||
137 | "Message %s id %d returned error %d\n", | ||
138 | a6xx_hfi_msg_id[id], seqnum, resp.error); | ||
139 | return -EINVAL; | ||
151 | } | 140 | } |
152 | 141 | ||
153 | id = HFI_HEADER_ID(resp.header); | 142 | /* All is well, copy over the buffer */ |
143 | if (payload && payload_size) | ||
144 | memcpy(payload, resp.payload, | ||
145 | min_t(u32, payload_size, sizeof(resp.payload))); | ||
154 | 146 | ||
155 | if (id == HFI_F2H_MSG_ACK) | 147 | return 0; |
156 | a6xx_hfi_handle_ack(gmu, &resp); | ||
157 | else if (id == HFI_F2H_MSG_ERROR) | ||
158 | a6xx_hfi_handle_error(gmu, &resp); | ||
159 | } | 148 | } |
160 | } | 149 | } |
161 | 150 | ||
@@ -163,7 +152,6 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id, | |||
163 | void *data, u32 size, u32 *payload, u32 payload_size) | 152 | void *data, u32 size, u32 *payload, u32 payload_size) |
164 | { | 153 | { |
165 | struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE]; | 154 | struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE]; |
166 | struct a6xx_hfi_response resp = { 0 }; | ||
167 | int ret, dwords = size >> 2; | 155 | int ret, dwords = size >> 2; |
168 | u32 seqnum; | 156 | u32 seqnum; |
169 | 157 | ||
@@ -173,53 +161,14 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id, | |||
173 | *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) | | 161 | *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) | |
174 | (dwords << 8) | id; | 162 | (dwords << 8) | id; |
175 | 163 | ||
176 | init_completion(&resp.complete); | ||
177 | resp.id = id; | ||
178 | resp.seqnum = seqnum; | ||
179 | |||
180 | spin_lock_bh(&hfi_ack_lock); | ||
181 | list_add_tail(&resp.node, &hfi_ack_list); | ||
182 | spin_unlock_bh(&hfi_ack_lock); | ||
183 | |||
184 | ret = a6xx_hfi_queue_write(gmu, queue, data, dwords); | 164 | ret = a6xx_hfi_queue_write(gmu, queue, data, dwords); |
185 | if (ret) { | 165 | if (ret) { |
186 | dev_err(gmu->dev, "Unable to send message %s id %d\n", | 166 | dev_err(gmu->dev, "Unable to send message %s id %d\n", |
187 | a6xx_hfi_msg_id[id], seqnum); | 167 | a6xx_hfi_msg_id[id], seqnum); |
188 | goto out; | ||
189 | } | ||
190 | |||
191 | /* Wait up to 5 seconds for the response */ | ||
192 | ret = wait_for_completion_timeout(&resp.complete, | ||
193 | msecs_to_jiffies(5000)); | ||
194 | if (!ret) { | ||
195 | dev_err(gmu->dev, | ||
196 | "Message %s id %d timed out waiting for response\n", | ||
197 | a6xx_hfi_msg_id[id], seqnum); | ||
198 | ret = -ETIMEDOUT; | ||
199 | } else | ||
200 | ret = 0; | ||
201 | |||
202 | out: | ||
203 | spin_lock_bh(&hfi_ack_lock); | ||
204 | list_del(&resp.node); | ||
205 | spin_unlock_bh(&hfi_ack_lock); | ||
206 | |||
207 | if (ret) | ||
208 | return ret; | 168 | return ret; |
209 | |||
210 | if (resp.error) { | ||
211 | dev_err(gmu->dev, "Message %s id %d returned error %d\n", | ||
212 | a6xx_hfi_msg_id[id], seqnum, resp.error); | ||
213 | return -EINVAL; | ||
214 | } | 169 | } |
215 | 170 | ||
216 | if (payload && payload_size) { | 171 | return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size); |
217 | int copy = min_t(u32, payload_size, sizeof(resp.payload)); | ||
218 | |||
219 | memcpy(payload, resp.payload, copy); | ||
220 | } | ||
221 | |||
222 | return 0; | ||
223 | } | 172 | } |
224 | 173 | ||
225 | static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state) | 174 | static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state) |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h index 5dace1350810..1318959d504d 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h | |||
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are: | |||
12 | - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) | 12 | - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) |
13 | - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13) | 13 | - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13) |
14 | - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13) | 14 | - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13) |
15 | - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45) | 15 | - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37) |
16 | - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) | 16 | - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) |
17 | - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) | 17 | - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) |
18 | - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45) | 18 | - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37) |
19 | - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45) | 19 | - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42) |
20 | - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13) | 20 | - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07) |
21 | - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) | 21 | - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) |
22 | 22 | ||
23 | Copyright (C) 2013-2018 by the following authors: | 23 | Copyright (C) 2013-2018 by the following authors: |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 7d3e9a129ac7..86abdb2b3a9c 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c | |||
@@ -120,6 +120,7 @@ static const struct adreno_info gpulist[] = { | |||
120 | [ADRENO_FW_GMU] = "a630_gmu.bin", | 120 | [ADRENO_FW_GMU] = "a630_gmu.bin", |
121 | }, | 121 | }, |
122 | .gmem = SZ_1M, | 122 | .gmem = SZ_1M, |
123 | .inactive_period = DRM_MSM_INACTIVE_PERIOD, | ||
123 | .init = a6xx_gpu_init, | 124 | .init = a6xx_gpu_init, |
124 | }, | 125 | }, |
125 | }; | 126 | }; |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h index 03a91e10b310..15eb03bed984 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h | |||
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are: | |||
12 | - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) | 12 | - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13) |
13 | - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13) | 13 | - /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13) |
14 | - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13) | 14 | - /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13) |
15 | - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45) | 15 | - /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37) |
16 | - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) | 16 | - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) |
17 | - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) | 17 | - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) |
18 | - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45) | 18 | - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37) |
19 | - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45) | 19 | - /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42) |
20 | - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13) | 20 | - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07) |
21 | - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) | 21 | - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) |
22 | 22 | ||
23 | Copyright (C) 2013-2018 by the following authors: | 23 | Copyright (C) 2013-2018 by the following authors: |
@@ -237,7 +237,7 @@ enum adreno_pm4_type3_packets { | |||
237 | CP_UNK_A6XX_14 = 20, | 237 | CP_UNK_A6XX_14 = 20, |
238 | CP_UNK_A6XX_36 = 54, | 238 | CP_UNK_A6XX_36 = 54, |
239 | CP_UNK_A6XX_55 = 85, | 239 | CP_UNK_A6XX_55 = 85, |
240 | UNK_A6XX_6D = 109, | 240 | CP_REG_WRITE = 109, |
241 | }; | 241 | }; |
242 | 242 | ||
243 | enum adreno_state_block { | 243 | enum adreno_state_block { |
@@ -968,19 +968,19 @@ static inline uint32_t CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI(uint32_t val) | |||
968 | } | 968 | } |
969 | 969 | ||
970 | #define REG_CP_SET_BIN_DATA5_5 0x00000005 | 970 | #define REG_CP_SET_BIN_DATA5_5 0x00000005 |
971 | #define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK 0xffffffff | 971 | #define CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__MASK 0xffffffff |
972 | #define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT 0 | 972 | #define CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__SHIFT 0 |
973 | static inline uint32_t CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO(uint32_t val) | 973 | static inline uint32_t CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO(uint32_t val) |
974 | { | 974 | { |
975 | return ((val) << CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT) & CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK; | 975 | return ((val) << CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__SHIFT) & CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__MASK; |
976 | } | 976 | } |
977 | 977 | ||
978 | #define REG_CP_SET_BIN_DATA5_6 0x00000006 | 978 | #define REG_CP_SET_BIN_DATA5_6 0x00000006 |
979 | #define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK 0xffffffff | 979 | #define CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__MASK 0xffffffff |
980 | #define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT 0 | 980 | #define CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__SHIFT 0 |
981 | static inline uint32_t CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI(uint32_t val) | 981 | static inline uint32_t CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO(uint32_t val) |
982 | { | 982 | { |
983 | return ((val) << CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK; | 983 | return ((val) << CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__SHIFT) & CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__MASK; |
984 | } | 984 | } |
985 | 985 | ||
986 | #define REG_CP_REG_TO_MEM_0 0x00000000 | 986 | #define REG_CP_REG_TO_MEM_0 0x00000000 |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 80cbf75bc2ff..d4530d60767b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c | |||
@@ -47,237 +47,17 @@ | |||
47 | #define LEFT_MIXER 0 | 47 | #define LEFT_MIXER 0 |
48 | #define RIGHT_MIXER 1 | 48 | #define RIGHT_MIXER 1 |
49 | 49 | ||
50 | #define MISR_BUFF_SIZE 256 | 50 | static inline int _dpu_crtc_get_mixer_width(struct dpu_crtc_state *cstate, |
51 | 51 | struct drm_display_mode *mode) | |
52 | static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) | ||
53 | { | ||
54 | struct msm_drm_private *priv; | ||
55 | |||
56 | if (!crtc || !crtc->dev || !crtc->dev->dev_private) { | ||
57 | DPU_ERROR("invalid crtc\n"); | ||
58 | return NULL; | ||
59 | } | ||
60 | priv = crtc->dev->dev_private; | ||
61 | if (!priv || !priv->kms) { | ||
62 | DPU_ERROR("invalid kms\n"); | ||
63 | return NULL; | ||
64 | } | ||
65 | |||
66 | return to_dpu_kms(priv->kms); | ||
67 | } | ||
68 | |||
69 | static inline int _dpu_crtc_power_enable(struct dpu_crtc *dpu_crtc, bool enable) | ||
70 | { | ||
71 | struct drm_crtc *crtc; | ||
72 | struct msm_drm_private *priv; | ||
73 | struct dpu_kms *dpu_kms; | ||
74 | |||
75 | if (!dpu_crtc) { | ||
76 | DPU_ERROR("invalid dpu crtc\n"); | ||
77 | return -EINVAL; | ||
78 | } | ||
79 | |||
80 | crtc = &dpu_crtc->base; | ||
81 | if (!crtc->dev || !crtc->dev->dev_private) { | ||
82 | DPU_ERROR("invalid drm device\n"); | ||
83 | return -EINVAL; | ||
84 | } | ||
85 | |||
86 | priv = crtc->dev->dev_private; | ||
87 | if (!priv->kms) { | ||
88 | DPU_ERROR("invalid kms\n"); | ||
89 | return -EINVAL; | ||
90 | } | ||
91 | |||
92 | dpu_kms = to_dpu_kms(priv->kms); | ||
93 | |||
94 | if (enable) | ||
95 | pm_runtime_get_sync(&dpu_kms->pdev->dev); | ||
96 | else | ||
97 | pm_runtime_put_sync(&dpu_kms->pdev->dev); | ||
98 | |||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * _dpu_crtc_rp_to_crtc - get crtc from resource pool object | ||
104 | * @rp: Pointer to resource pool | ||
105 | * return: Pointer to drm crtc if success; null otherwise | ||
106 | */ | ||
107 | static struct drm_crtc *_dpu_crtc_rp_to_crtc(struct dpu_crtc_respool *rp) | ||
108 | { | ||
109 | if (!rp) | ||
110 | return NULL; | ||
111 | |||
112 | return container_of(rp, struct dpu_crtc_state, rp)->base.crtc; | ||
113 | } | ||
114 | |||
115 | /** | ||
116 | * _dpu_crtc_rp_reclaim - reclaim unused, or all if forced, resources in pool | ||
117 | * @rp: Pointer to resource pool | ||
118 | * @force: True to reclaim all resources; otherwise, reclaim only unused ones | ||
119 | * return: None | ||
120 | */ | ||
121 | static void _dpu_crtc_rp_reclaim(struct dpu_crtc_respool *rp, bool force) | ||
122 | { | 52 | { |
123 | struct dpu_crtc_res *res, *next; | 53 | return mode->hdisplay / cstate->num_mixers; |
124 | struct drm_crtc *crtc; | ||
125 | |||
126 | crtc = _dpu_crtc_rp_to_crtc(rp); | ||
127 | if (!crtc) { | ||
128 | DPU_ERROR("invalid crtc\n"); | ||
129 | return; | ||
130 | } | ||
131 | |||
132 | DPU_DEBUG("crtc%d.%u %s\n", crtc->base.id, rp->sequence_id, | ||
133 | force ? "destroy" : "free_unused"); | ||
134 | |||
135 | list_for_each_entry_safe(res, next, &rp->res_list, list) { | ||
136 | if (!force && !(res->flags & DPU_CRTC_RES_FLAG_FREE)) | ||
137 | continue; | ||
138 | DPU_DEBUG("crtc%d.%u reclaim res:0x%x/0x%llx/%pK/%d\n", | ||
139 | crtc->base.id, rp->sequence_id, | ||
140 | res->type, res->tag, res->val, | ||
141 | atomic_read(&res->refcount)); | ||
142 | list_del(&res->list); | ||
143 | if (res->ops.put) | ||
144 | res->ops.put(res->val); | ||
145 | kfree(res); | ||
146 | } | ||
147 | } | 54 | } |
148 | 55 | ||
149 | /** | 56 | static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) |
150 | * _dpu_crtc_rp_free_unused - free unused resource in pool | ||
151 | * @rp: Pointer to resource pool | ||
152 | * return: none | ||
153 | */ | ||
154 | static void _dpu_crtc_rp_free_unused(struct dpu_crtc_respool *rp) | ||
155 | { | ||
156 | mutex_lock(rp->rp_lock); | ||
157 | _dpu_crtc_rp_reclaim(rp, false); | ||
158 | mutex_unlock(rp->rp_lock); | ||
159 | } | ||
160 | |||
161 | /** | ||
162 | * _dpu_crtc_rp_destroy - destroy resource pool | ||
163 | * @rp: Pointer to resource pool | ||
164 | * return: None | ||
165 | */ | ||
166 | static void _dpu_crtc_rp_destroy(struct dpu_crtc_respool *rp) | ||
167 | { | ||
168 | mutex_lock(rp->rp_lock); | ||
169 | list_del_init(&rp->rp_list); | ||
170 | _dpu_crtc_rp_reclaim(rp, true); | ||
171 | mutex_unlock(rp->rp_lock); | ||
172 | } | ||
173 | |||
174 | /** | ||
175 | * _dpu_crtc_hw_blk_get - get callback for hardware block | ||
176 | * @val: Resource handle | ||
177 | * @type: Resource type | ||
178 | * @tag: Search tag for given resource | ||
179 | * return: Resource handle | ||
180 | */ | ||
181 | static void *_dpu_crtc_hw_blk_get(void *val, u32 type, u64 tag) | ||
182 | { | ||
183 | DPU_DEBUG("res:%d/0x%llx/%pK\n", type, tag, val); | ||
184 | return dpu_hw_blk_get(val, type, tag); | ||
185 | } | ||
186 | |||
187 | /** | ||
188 | * _dpu_crtc_hw_blk_put - put callback for hardware block | ||
189 | * @val: Resource handle | ||
190 | * return: None | ||
191 | */ | ||
192 | static void _dpu_crtc_hw_blk_put(void *val) | ||
193 | { | ||
194 | DPU_DEBUG("res://%pK\n", val); | ||
195 | dpu_hw_blk_put(val); | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * _dpu_crtc_rp_duplicate - duplicate resource pool and reset reference count | ||
200 | * @rp: Pointer to original resource pool | ||
201 | * @dup_rp: Pointer to duplicated resource pool | ||
202 | * return: None | ||
203 | */ | ||
204 | static void _dpu_crtc_rp_duplicate(struct dpu_crtc_respool *rp, | ||
205 | struct dpu_crtc_respool *dup_rp) | ||
206 | { | ||
207 | struct dpu_crtc_res *res, *dup_res; | ||
208 | struct drm_crtc *crtc; | ||
209 | |||
210 | if (!rp || !dup_rp || !rp->rp_head) { | ||
211 | DPU_ERROR("invalid resource pool\n"); | ||
212 | return; | ||
213 | } | ||
214 | |||
215 | crtc = _dpu_crtc_rp_to_crtc(rp); | ||
216 | if (!crtc) { | ||
217 | DPU_ERROR("invalid crtc\n"); | ||
218 | return; | ||
219 | } | ||
220 | |||
221 | DPU_DEBUG("crtc%d.%u duplicate\n", crtc->base.id, rp->sequence_id); | ||
222 | |||
223 | mutex_lock(rp->rp_lock); | ||
224 | dup_rp->sequence_id = rp->sequence_id + 1; | ||
225 | INIT_LIST_HEAD(&dup_rp->res_list); | ||
226 | dup_rp->ops = rp->ops; | ||
227 | list_for_each_entry(res, &rp->res_list, list) { | ||
228 | dup_res = kzalloc(sizeof(struct dpu_crtc_res), GFP_KERNEL); | ||
229 | if (!dup_res) { | ||
230 | mutex_unlock(rp->rp_lock); | ||
231 | return; | ||
232 | } | ||
233 | INIT_LIST_HEAD(&dup_res->list); | ||
234 | atomic_set(&dup_res->refcount, 0); | ||
235 | dup_res->type = res->type; | ||
236 | dup_res->tag = res->tag; | ||
237 | dup_res->val = res->val; | ||
238 | dup_res->ops = res->ops; | ||
239 | dup_res->flags = DPU_CRTC_RES_FLAG_FREE; | ||
240 | DPU_DEBUG("crtc%d.%u dup res:0x%x/0x%llx/%pK/%d\n", | ||
241 | crtc->base.id, dup_rp->sequence_id, | ||
242 | dup_res->type, dup_res->tag, dup_res->val, | ||
243 | atomic_read(&dup_res->refcount)); | ||
244 | list_add_tail(&dup_res->list, &dup_rp->res_list); | ||
245 | if (dup_res->ops.get) | ||
246 | dup_res->ops.get(dup_res->val, 0, -1); | ||
247 | } | ||
248 | |||
249 | dup_rp->rp_lock = rp->rp_lock; | ||
250 | dup_rp->rp_head = rp->rp_head; | ||
251 | INIT_LIST_HEAD(&dup_rp->rp_list); | ||
252 | list_add_tail(&dup_rp->rp_list, rp->rp_head); | ||
253 | mutex_unlock(rp->rp_lock); | ||
254 | } | ||
255 | |||
256 | /** | ||
257 | * _dpu_crtc_rp_reset - reset resource pool after allocation | ||
258 | * @rp: Pointer to original resource pool | ||
259 | * @rp_lock: Pointer to serialization resource pool lock | ||
260 | * @rp_head: Pointer to crtc resource pool head | ||
261 | * return: None | ||
262 | */ | ||
263 | static void _dpu_crtc_rp_reset(struct dpu_crtc_respool *rp, | ||
264 | struct mutex *rp_lock, struct list_head *rp_head) | ||
265 | { | 57 | { |
266 | if (!rp || !rp_lock || !rp_head) { | 58 | struct msm_drm_private *priv = crtc->dev->dev_private; |
267 | DPU_ERROR("invalid resource pool\n"); | ||
268 | return; | ||
269 | } | ||
270 | 59 | ||
271 | mutex_lock(rp_lock); | 60 | return to_dpu_kms(priv->kms); |
272 | rp->rp_lock = rp_lock; | ||
273 | rp->rp_head = rp_head; | ||
274 | INIT_LIST_HEAD(&rp->rp_list); | ||
275 | rp->sequence_id = 0; | ||
276 | INIT_LIST_HEAD(&rp->res_list); | ||
277 | rp->ops.get = _dpu_crtc_hw_blk_get; | ||
278 | rp->ops.put = _dpu_crtc_hw_blk_put; | ||
279 | list_add_tail(&rp->rp_list, rp->rp_head); | ||
280 | mutex_unlock(rp_lock); | ||
281 | } | 61 | } |
282 | 62 | ||
283 | static void dpu_crtc_destroy(struct drm_crtc *crtc) | 63 | static void dpu_crtc_destroy(struct drm_crtc *crtc) |
@@ -297,14 +77,29 @@ static void dpu_crtc_destroy(struct drm_crtc *crtc) | |||
297 | } | 77 | } |
298 | 78 | ||
299 | static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, | 79 | static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, |
300 | struct dpu_plane_state *pstate) | 80 | struct dpu_plane_state *pstate, struct dpu_format *format) |
301 | { | 81 | { |
302 | struct dpu_hw_mixer *lm = mixer->hw_lm; | 82 | struct dpu_hw_mixer *lm = mixer->hw_lm; |
83 | uint32_t blend_op; | ||
84 | struct drm_format_name_buf format_name; | ||
303 | 85 | ||
304 | /* default to opaque blending */ | 86 | /* default to opaque blending */ |
305 | lm->ops.setup_blend_config(lm, pstate->stage, 0XFF, 0, | 87 | blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | |
306 | DPU_BLEND_FG_ALPHA_FG_CONST | | 88 | DPU_BLEND_BG_ALPHA_BG_CONST; |
307 | DPU_BLEND_BG_ALPHA_BG_CONST); | 89 | |
90 | if (format->alpha_enable) { | ||
91 | /* coverage blending */ | ||
92 | blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL | | ||
93 | DPU_BLEND_BG_ALPHA_FG_PIXEL | | ||
94 | DPU_BLEND_BG_INV_ALPHA; | ||
95 | } | ||
96 | |||
97 | lm->ops.setup_blend_config(lm, pstate->stage, | ||
98 | 0xFF, 0, blend_op); | ||
99 | |||
100 | DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n", | ||
101 | drm_get_format_name(format->base.pixel_format, &format_name), | ||
102 | format->alpha_enable, blend_op); | ||
308 | } | 103 | } |
309 | 104 | ||
310 | static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) | 105 | static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) |
@@ -317,9 +112,9 @@ static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) | |||
317 | crtc_state = to_dpu_crtc_state(crtc->state); | 112 | crtc_state = to_dpu_crtc_state(crtc->state); |
318 | 113 | ||
319 | lm_horiz_position = 0; | 114 | lm_horiz_position = 0; |
320 | for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) { | 115 | for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) { |
321 | const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx]; | 116 | const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx]; |
322 | struct dpu_hw_mixer *hw_lm = dpu_crtc->mixers[lm_idx].hw_lm; | 117 | struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm; |
323 | struct dpu_hw_mixer_cfg cfg; | 118 | struct dpu_hw_mixer_cfg cfg; |
324 | 119 | ||
325 | if (!lm_roi || !drm_rect_visible(lm_roi)) | 120 | if (!lm_roi || !drm_rect_visible(lm_roi)) |
@@ -339,28 +134,17 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, | |||
339 | struct drm_plane *plane; | 134 | struct drm_plane *plane; |
340 | struct drm_framebuffer *fb; | 135 | struct drm_framebuffer *fb; |
341 | struct drm_plane_state *state; | 136 | struct drm_plane_state *state; |
342 | struct dpu_crtc_state *cstate; | 137 | struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); |
343 | struct dpu_plane_state *pstate = NULL; | 138 | struct dpu_plane_state *pstate = NULL; |
344 | struct dpu_format *format; | 139 | struct dpu_format *format; |
345 | struct dpu_hw_ctl *ctl; | 140 | struct dpu_hw_ctl *ctl = mixer->lm_ctl; |
346 | struct dpu_hw_mixer *lm; | 141 | struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg; |
347 | struct dpu_hw_stage_cfg *stage_cfg; | ||
348 | 142 | ||
349 | u32 flush_mask; | 143 | u32 flush_mask; |
350 | uint32_t stage_idx, lm_idx; | 144 | uint32_t stage_idx, lm_idx; |
351 | int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 }; | 145 | int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 }; |
352 | bool bg_alpha_enable = false; | 146 | bool bg_alpha_enable = false; |
353 | 147 | ||
354 | if (!dpu_crtc || !mixer) { | ||
355 | DPU_ERROR("invalid dpu_crtc or mixer\n"); | ||
356 | return; | ||
357 | } | ||
358 | |||
359 | ctl = mixer->hw_ctl; | ||
360 | lm = mixer->hw_lm; | ||
361 | stage_cfg = &dpu_crtc->stage_cfg; | ||
362 | cstate = to_dpu_crtc_state(crtc->state); | ||
363 | |||
364 | drm_atomic_crtc_for_each_plane(plane, crtc) { | 148 | drm_atomic_crtc_for_each_plane(plane, crtc) { |
365 | state = plane->state; | 149 | state = plane->state; |
366 | if (!state) | 150 | if (!state) |
@@ -379,10 +163,6 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, | |||
379 | state->fb ? state->fb->base.id : -1); | 163 | state->fb ? state->fb->base.id : -1); |
380 | 164 | ||
381 | format = to_dpu_format(msm_framebuffer_format(pstate->base.fb)); | 165 | format = to_dpu_format(msm_framebuffer_format(pstate->base.fb)); |
382 | if (!format) { | ||
383 | DPU_ERROR("invalid format\n"); | ||
384 | return; | ||
385 | } | ||
386 | 166 | ||
387 | if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable) | 167 | if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable) |
388 | bg_alpha_enable = true; | 168 | bg_alpha_enable = true; |
@@ -400,8 +180,9 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, | |||
400 | fb ? fb->modifier : 0); | 180 | fb ? fb->modifier : 0); |
401 | 181 | ||
402 | /* blend config update */ | 182 | /* blend config update */ |
403 | for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) { | 183 | for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) { |
404 | _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate); | 184 | _dpu_crtc_setup_blend_cfg(mixer + lm_idx, |
185 | pstate, format); | ||
405 | 186 | ||
406 | mixer[lm_idx].flush_mask |= flush_mask; | 187 | mixer[lm_idx].flush_mask |= flush_mask; |
407 | 188 | ||
@@ -422,38 +203,25 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, | |||
422 | */ | 203 | */ |
423 | static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) | 204 | static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) |
424 | { | 205 | { |
425 | struct dpu_crtc *dpu_crtc; | 206 | struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); |
426 | struct dpu_crtc_state *dpu_crtc_state; | 207 | struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); |
427 | struct dpu_crtc_mixer *mixer; | 208 | struct dpu_crtc_mixer *mixer = cstate->mixers; |
428 | struct dpu_hw_ctl *ctl; | 209 | struct dpu_hw_ctl *ctl; |
429 | struct dpu_hw_mixer *lm; | 210 | struct dpu_hw_mixer *lm; |
430 | |||
431 | int i; | 211 | int i; |
432 | 212 | ||
433 | if (!crtc) | ||
434 | return; | ||
435 | |||
436 | dpu_crtc = to_dpu_crtc(crtc); | ||
437 | dpu_crtc_state = to_dpu_crtc_state(crtc->state); | ||
438 | mixer = dpu_crtc->mixers; | ||
439 | |||
440 | DPU_DEBUG("%s\n", dpu_crtc->name); | 213 | DPU_DEBUG("%s\n", dpu_crtc->name); |
441 | 214 | ||
442 | if (dpu_crtc->num_mixers > CRTC_DUAL_MIXERS) { | 215 | for (i = 0; i < cstate->num_mixers; i++) { |
443 | DPU_ERROR("invalid number mixers: %d\n", dpu_crtc->num_mixers); | 216 | if (!mixer[i].hw_lm || !mixer[i].lm_ctl) { |
444 | return; | ||
445 | } | ||
446 | |||
447 | for (i = 0; i < dpu_crtc->num_mixers; i++) { | ||
448 | if (!mixer[i].hw_lm || !mixer[i].hw_ctl) { | ||
449 | DPU_ERROR("invalid lm or ctl assigned to mixer\n"); | 217 | DPU_ERROR("invalid lm or ctl assigned to mixer\n"); |
450 | return; | 218 | return; |
451 | } | 219 | } |
452 | mixer[i].mixer_op_mode = 0; | 220 | mixer[i].mixer_op_mode = 0; |
453 | mixer[i].flush_mask = 0; | 221 | mixer[i].flush_mask = 0; |
454 | if (mixer[i].hw_ctl->ops.clear_all_blendstages) | 222 | if (mixer[i].lm_ctl->ops.clear_all_blendstages) |
455 | mixer[i].hw_ctl->ops.clear_all_blendstages( | 223 | mixer[i].lm_ctl->ops.clear_all_blendstages( |
456 | mixer[i].hw_ctl); | 224 | mixer[i].lm_ctl); |
457 | } | 225 | } |
458 | 226 | ||
459 | /* initialize stage cfg */ | 227 | /* initialize stage cfg */ |
@@ -461,8 +229,8 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) | |||
461 | 229 | ||
462 | _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer); | 230 | _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer); |
463 | 231 | ||
464 | for (i = 0; i < dpu_crtc->num_mixers; i++) { | 232 | for (i = 0; i < cstate->num_mixers; i++) { |
465 | ctl = mixer[i].hw_ctl; | 233 | ctl = mixer[i].lm_ctl; |
466 | lm = mixer[i].hw_lm; | 234 | lm = mixer[i].hw_lm; |
467 | 235 | ||
468 | lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); | 236 | lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); |
@@ -543,34 +311,13 @@ static void dpu_crtc_vblank_cb(void *data) | |||
543 | 311 | ||
544 | static void dpu_crtc_frame_event_work(struct kthread_work *work) | 312 | static void dpu_crtc_frame_event_work(struct kthread_work *work) |
545 | { | 313 | { |
546 | struct msm_drm_private *priv; | 314 | struct dpu_crtc_frame_event *fevent = container_of(work, |
547 | struct dpu_crtc_frame_event *fevent; | 315 | struct dpu_crtc_frame_event, work); |
548 | struct drm_crtc *crtc; | 316 | struct drm_crtc *crtc = fevent->crtc; |
549 | struct dpu_crtc *dpu_crtc; | 317 | struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); |
550 | struct dpu_kms *dpu_kms; | ||
551 | unsigned long flags; | 318 | unsigned long flags; |
552 | bool frame_done = false; | 319 | bool frame_done = false; |
553 | 320 | ||
554 | if (!work) { | ||
555 | DPU_ERROR("invalid work handle\n"); | ||
556 | return; | ||
557 | } | ||
558 | |||
559 | fevent = container_of(work, struct dpu_crtc_frame_event, work); | ||
560 | if (!fevent->crtc || !fevent->crtc->state) { | ||
561 | DPU_ERROR("invalid crtc\n"); | ||
562 | return; | ||
563 | } | ||
564 | |||
565 | crtc = fevent->crtc; | ||
566 | dpu_crtc = to_dpu_crtc(crtc); | ||
567 | |||
568 | dpu_kms = _dpu_crtc_get_kms(crtc); | ||
569 | if (!dpu_kms) { | ||
570 | DPU_ERROR("invalid kms handle\n"); | ||
571 | return; | ||
572 | } | ||
573 | priv = dpu_kms->dev->dev_private; | ||
574 | DPU_ATRACE_BEGIN("crtc_frame_event"); | 321 | DPU_ATRACE_BEGIN("crtc_frame_event"); |
575 | 322 | ||
576 | DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, | 323 | DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, |
@@ -636,11 +383,6 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event) | |||
636 | unsigned long flags; | 383 | unsigned long flags; |
637 | u32 crtc_id; | 384 | u32 crtc_id; |
638 | 385 | ||
639 | if (!crtc || !crtc->dev || !crtc->dev->dev_private) { | ||
640 | DPU_ERROR("invalid parameters\n"); | ||
641 | return; | ||
642 | } | ||
643 | |||
644 | /* Nothing to do on idle event */ | 386 | /* Nothing to do on idle event */ |
645 | if (event & DPU_ENCODER_FRAME_EVENT_IDLE) | 387 | if (event & DPU_ENCODER_FRAME_EVENT_IDLE) |
646 | return; | 388 | return; |
@@ -683,7 +425,7 @@ static void _dpu_crtc_setup_mixer_for_encoder( | |||
683 | struct drm_crtc *crtc, | 425 | struct drm_crtc *crtc, |
684 | struct drm_encoder *enc) | 426 | struct drm_encoder *enc) |
685 | { | 427 | { |
686 | struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); | 428 | struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); |
687 | struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); | 429 | struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); |
688 | struct dpu_rm *rm = &dpu_kms->rm; | 430 | struct dpu_rm *rm = &dpu_kms->rm; |
689 | struct dpu_crtc_mixer *mixer; | 431 | struct dpu_crtc_mixer *mixer; |
@@ -695,8 +437,8 @@ static void _dpu_crtc_setup_mixer_for_encoder( | |||
695 | dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL); | 437 | dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL); |
696 | 438 | ||
697 | /* Set up all the mixers and ctls reserved by this encoder */ | 439 | /* Set up all the mixers and ctls reserved by this encoder */ |
698 | for (i = dpu_crtc->num_mixers; i < ARRAY_SIZE(dpu_crtc->mixers); i++) { | 440 | for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++) { |
699 | mixer = &dpu_crtc->mixers[i]; | 441 | mixer = &cstate->mixers[i]; |
700 | 442 | ||
701 | if (!dpu_rm_get_hw(rm, &lm_iter)) | 443 | if (!dpu_rm_get_hw(rm, &lm_iter)) |
702 | break; | 444 | break; |
@@ -706,14 +448,14 @@ static void _dpu_crtc_setup_mixer_for_encoder( | |||
706 | if (!dpu_rm_get_hw(rm, &ctl_iter)) { | 448 | if (!dpu_rm_get_hw(rm, &ctl_iter)) { |
707 | DPU_DEBUG("no ctl assigned to lm %d, using previous\n", | 449 | DPU_DEBUG("no ctl assigned to lm %d, using previous\n", |
708 | mixer->hw_lm->idx - LM_0); | 450 | mixer->hw_lm->idx - LM_0); |
709 | mixer->hw_ctl = last_valid_ctl; | 451 | mixer->lm_ctl = last_valid_ctl; |
710 | } else { | 452 | } else { |
711 | mixer->hw_ctl = (struct dpu_hw_ctl *)ctl_iter.hw; | 453 | mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw; |
712 | last_valid_ctl = mixer->hw_ctl; | 454 | last_valid_ctl = mixer->lm_ctl; |
713 | } | 455 | } |
714 | 456 | ||
715 | /* Shouldn't happen, mixers are always >= ctls */ | 457 | /* Shouldn't happen, mixers are always >= ctls */ |
716 | if (!mixer->hw_ctl) { | 458 | if (!mixer->lm_ctl) { |
717 | DPU_ERROR("no valid ctls found for lm %d\n", | 459 | DPU_ERROR("no valid ctls found for lm %d\n", |
718 | mixer->hw_lm->idx - LM_0); | 460 | mixer->hw_lm->idx - LM_0); |
719 | return; | 461 | return; |
@@ -721,11 +463,11 @@ static void _dpu_crtc_setup_mixer_for_encoder( | |||
721 | 463 | ||
722 | mixer->encoder = enc; | 464 | mixer->encoder = enc; |
723 | 465 | ||
724 | dpu_crtc->num_mixers++; | 466 | cstate->num_mixers++; |
725 | DPU_DEBUG("setup mixer %d: lm %d\n", | 467 | DPU_DEBUG("setup mixer %d: lm %d\n", |
726 | i, mixer->hw_lm->idx - LM_0); | 468 | i, mixer->hw_lm->idx - LM_0); |
727 | DPU_DEBUG("setup mixer %d: ctl %d\n", | 469 | DPU_DEBUG("setup mixer %d: ctl %d\n", |
728 | i, mixer->hw_ctl->idx - CTL_0); | 470 | i, mixer->lm_ctl->idx - CTL_0); |
729 | } | 471 | } |
730 | } | 472 | } |
731 | 473 | ||
@@ -734,10 +476,6 @@ static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc) | |||
734 | struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); | 476 | struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); |
735 | struct drm_encoder *enc; | 477 | struct drm_encoder *enc; |
736 | 478 | ||
737 | dpu_crtc->num_mixers = 0; | ||
738 | dpu_crtc->mixers_swapped = false; | ||
739 | memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers)); | ||
740 | |||
741 | mutex_lock(&dpu_crtc->crtc_lock); | 479 | mutex_lock(&dpu_crtc->crtc_lock); |
742 | /* Check for mixers on all encoders attached to this crtc */ | 480 | /* Check for mixers on all encoders attached to this crtc */ |
743 | list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) { | 481 | list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) { |
@@ -753,24 +491,13 @@ static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc) | |||
753 | static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, | 491 | static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, |
754 | struct drm_crtc_state *state) | 492 | struct drm_crtc_state *state) |
755 | { | 493 | { |
756 | struct dpu_crtc *dpu_crtc; | 494 | struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); |
757 | struct dpu_crtc_state *cstate; | 495 | struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); |
758 | struct drm_display_mode *adj_mode; | 496 | struct drm_display_mode *adj_mode = &state->adjusted_mode; |
759 | u32 crtc_split_width; | 497 | u32 crtc_split_width = _dpu_crtc_get_mixer_width(cstate, adj_mode); |
760 | int i; | 498 | int i; |
761 | 499 | ||
762 | if (!crtc || !state) { | 500 | for (i = 0; i < cstate->num_mixers; i++) { |
763 | DPU_ERROR("invalid args\n"); | ||
764 | return; | ||
765 | } | ||
766 | |||
767 | dpu_crtc = to_dpu_crtc(crtc); | ||
768 | cstate = to_dpu_crtc_state(state); | ||
769 | |||
770 | adj_mode = &state->adjusted_mode; | ||
771 | crtc_split_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, adj_mode); | ||
772 | |||
773 | for (i = 0; i < dpu_crtc->num_mixers; i++) { | ||
774 | struct drm_rect *r = &cstate->lm_bounds[i]; | 501 | struct drm_rect *r = &cstate->lm_bounds[i]; |
775 | r->x1 = crtc_split_width * i; | 502 | r->x1 = crtc_split_width * i; |
776 | r->y1 = 0; | 503 | r->y1 = 0; |
@@ -787,6 +514,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, | |||
787 | struct drm_crtc_state *old_state) | 514 | struct drm_crtc_state *old_state) |
788 | { | 515 | { |
789 | struct dpu_crtc *dpu_crtc; | 516 | struct dpu_crtc *dpu_crtc; |
517 | struct dpu_crtc_state *cstate; | ||
790 | struct drm_encoder *encoder; | 518 | struct drm_encoder *encoder; |
791 | struct drm_device *dev; | 519 | struct drm_device *dev; |
792 | unsigned long flags; | 520 | unsigned long flags; |
@@ -806,10 +534,11 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, | |||
806 | DPU_DEBUG("crtc%d\n", crtc->base.id); | 534 | DPU_DEBUG("crtc%d\n", crtc->base.id); |
807 | 535 | ||
808 | dpu_crtc = to_dpu_crtc(crtc); | 536 | dpu_crtc = to_dpu_crtc(crtc); |
537 | cstate = to_dpu_crtc_state(crtc->state); | ||
809 | dev = crtc->dev; | 538 | dev = crtc->dev; |
810 | smmu_state = &dpu_crtc->smmu_state; | 539 | smmu_state = &dpu_crtc->smmu_state; |
811 | 540 | ||
812 | if (!dpu_crtc->num_mixers) { | 541 | if (!cstate->num_mixers) { |
813 | _dpu_crtc_setup_mixers(crtc); | 542 | _dpu_crtc_setup_mixers(crtc); |
814 | _dpu_crtc_setup_lm_bounds(crtc, crtc->state); | 543 | _dpu_crtc_setup_lm_bounds(crtc, crtc->state); |
815 | } | 544 | } |
@@ -836,7 +565,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, | |||
836 | * it means we are trying to flush a CRTC whose state is disabled: | 565 | * it means we are trying to flush a CRTC whose state is disabled: |
837 | * nothing else needs to be done. | 566 | * nothing else needs to be done. |
838 | */ | 567 | */ |
839 | if (unlikely(!dpu_crtc->num_mixers)) | 568 | if (unlikely(!cstate->num_mixers)) |
840 | return; | 569 | return; |
841 | 570 | ||
842 | _dpu_crtc_blend_setup(crtc); | 571 | _dpu_crtc_blend_setup(crtc); |
@@ -861,11 +590,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, | |||
861 | unsigned long flags; | 590 | unsigned long flags; |
862 | struct dpu_crtc_state *cstate; | 591 | struct dpu_crtc_state *cstate; |
863 | 592 | ||
864 | if (!crtc || !crtc->dev || !crtc->dev->dev_private) { | ||
865 | DPU_ERROR("invalid crtc\n"); | ||
866 | return; | ||
867 | } | ||
868 | |||
869 | if (!crtc->state->enable) { | 593 | if (!crtc->state->enable) { |
870 | DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n", | 594 | DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n", |
871 | crtc->base.id, crtc->state->enable); | 595 | crtc->base.id, crtc->state->enable); |
@@ -900,7 +624,7 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, | |||
900 | * it means we are trying to flush a CRTC whose state is disabled: | 624 | * it means we are trying to flush a CRTC whose state is disabled: |
901 | * nothing else needs to be done. | 625 | * nothing else needs to be done. |
902 | */ | 626 | */ |
903 | if (unlikely(!dpu_crtc->num_mixers)) | 627 | if (unlikely(!cstate->num_mixers)) |
904 | return; | 628 | return; |
905 | 629 | ||
906 | /* | 630 | /* |
@@ -951,8 +675,6 @@ static void dpu_crtc_destroy_state(struct drm_crtc *crtc, | |||
951 | 675 | ||
952 | DPU_DEBUG("crtc%d\n", crtc->base.id); | 676 | DPU_DEBUG("crtc%d\n", crtc->base.id); |
953 | 677 | ||
954 | _dpu_crtc_rp_destroy(&cstate->rp); | ||
955 | |||
956 | __drm_atomic_helper_crtc_destroy_state(state); | 678 | __drm_atomic_helper_crtc_destroy_state(state); |
957 | 679 | ||
958 | kfree(cstate); | 680 | kfree(cstate); |
@@ -960,15 +682,9 @@ static void dpu_crtc_destroy_state(struct drm_crtc *crtc, | |||
960 | 682 | ||
961 | static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) | 683 | static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) |
962 | { | 684 | { |
963 | struct dpu_crtc *dpu_crtc; | 685 | struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); |
964 | int ret, rc = 0; | 686 | int ret, rc = 0; |
965 | 687 | ||
966 | if (!crtc) { | ||
967 | DPU_ERROR("invalid argument\n"); | ||
968 | return -EINVAL; | ||
969 | } | ||
970 | dpu_crtc = to_dpu_crtc(crtc); | ||
971 | |||
972 | if (!atomic_read(&dpu_crtc->frame_pending)) { | 688 | if (!atomic_read(&dpu_crtc->frame_pending)) { |
973 | DPU_DEBUG("no frames pending\n"); | 689 | DPU_DEBUG("no frames pending\n"); |
974 | return 0; | 690 | return 0; |
@@ -989,35 +705,18 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) | |||
989 | void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) | 705 | void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) |
990 | { | 706 | { |
991 | struct drm_encoder *encoder; | 707 | struct drm_encoder *encoder; |
992 | struct drm_device *dev; | 708 | struct drm_device *dev = crtc->dev; |
993 | struct dpu_crtc *dpu_crtc; | 709 | struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); |
994 | struct msm_drm_private *priv; | 710 | struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); |
995 | struct dpu_kms *dpu_kms; | 711 | struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); |
996 | struct dpu_crtc_state *cstate; | ||
997 | int ret; | 712 | int ret; |
998 | 713 | ||
999 | if (!crtc) { | ||
1000 | DPU_ERROR("invalid argument\n"); | ||
1001 | return; | ||
1002 | } | ||
1003 | dev = crtc->dev; | ||
1004 | dpu_crtc = to_dpu_crtc(crtc); | ||
1005 | dpu_kms = _dpu_crtc_get_kms(crtc); | ||
1006 | |||
1007 | if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) { | ||
1008 | DPU_ERROR("invalid argument\n"); | ||
1009 | return; | ||
1010 | } | ||
1011 | |||
1012 | priv = dpu_kms->dev->dev_private; | ||
1013 | cstate = to_dpu_crtc_state(crtc->state); | ||
1014 | |||
1015 | /* | 714 | /* |
1016 | * If no mixers has been allocated in dpu_crtc_atomic_check(), | 715 | * If no mixers has been allocated in dpu_crtc_atomic_check(), |
1017 | * it means we are trying to start a CRTC whose state is disabled: | 716 | * it means we are trying to start a CRTC whose state is disabled: |
1018 | * nothing else needs to be done. | 717 | * nothing else needs to be done. |
1019 | */ | 718 | */ |
1020 | if (unlikely(!dpu_crtc->num_mixers)) | 719 | if (unlikely(!cstate->num_mixers)) |
1021 | return; | 720 | return; |
1022 | 721 | ||
1023 | DPU_ATRACE_BEGIN("crtc_commit"); | 722 | DPU_ATRACE_BEGIN("crtc_commit"); |
@@ -1072,33 +771,19 @@ end: | |||
1072 | * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request | 771 | * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request |
1073 | * @dpu_crtc: Pointer to dpu crtc structure | 772 | * @dpu_crtc: Pointer to dpu crtc structure |
1074 | * @enable: Whether to enable/disable vblanks | 773 | * @enable: Whether to enable/disable vblanks |
1075 | * | ||
1076 | * @Return: error code | ||
1077 | */ | 774 | */ |
1078 | static int _dpu_crtc_vblank_enable_no_lock( | 775 | static void _dpu_crtc_vblank_enable_no_lock( |
1079 | struct dpu_crtc *dpu_crtc, bool enable) | 776 | struct dpu_crtc *dpu_crtc, bool enable) |
1080 | { | 777 | { |
1081 | struct drm_device *dev; | 778 | struct drm_crtc *crtc = &dpu_crtc->base; |
1082 | struct drm_crtc *crtc; | 779 | struct drm_device *dev = crtc->dev; |
1083 | struct drm_encoder *enc; | 780 | struct drm_encoder *enc; |
1084 | 781 | ||
1085 | if (!dpu_crtc) { | ||
1086 | DPU_ERROR("invalid crtc\n"); | ||
1087 | return -EINVAL; | ||
1088 | } | ||
1089 | |||
1090 | crtc = &dpu_crtc->base; | ||
1091 | dev = crtc->dev; | ||
1092 | |||
1093 | if (enable) { | 782 | if (enable) { |
1094 | int ret; | ||
1095 | |||
1096 | /* drop lock since power crtc cb may try to re-acquire lock */ | 783 | /* drop lock since power crtc cb may try to re-acquire lock */ |
1097 | mutex_unlock(&dpu_crtc->crtc_lock); | 784 | mutex_unlock(&dpu_crtc->crtc_lock); |
1098 | ret = _dpu_crtc_power_enable(dpu_crtc, true); | 785 | pm_runtime_get_sync(dev->dev); |
1099 | mutex_lock(&dpu_crtc->crtc_lock); | 786 | mutex_lock(&dpu_crtc->crtc_lock); |
1100 | if (ret) | ||
1101 | return ret; | ||
1102 | 787 | ||
1103 | list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { | 788 | list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { |
1104 | if (enc->crtc != crtc) | 789 | if (enc->crtc != crtc) |
@@ -1125,11 +810,9 @@ static int _dpu_crtc_vblank_enable_no_lock( | |||
1125 | 810 | ||
1126 | /* drop lock since power crtc cb may try to re-acquire lock */ | 811 | /* drop lock since power crtc cb may try to re-acquire lock */ |
1127 | mutex_unlock(&dpu_crtc->crtc_lock); | 812 | mutex_unlock(&dpu_crtc->crtc_lock); |
1128 | _dpu_crtc_power_enable(dpu_crtc, false); | 813 | pm_runtime_put_sync(dev->dev); |
1129 | mutex_lock(&dpu_crtc->crtc_lock); | 814 | mutex_lock(&dpu_crtc->crtc_lock); |
1130 | } | 815 | } |
1131 | |||
1132 | return 0; | ||
1133 | } | 816 | } |
1134 | 817 | ||
1135 | /** | 818 | /** |
@@ -1139,23 +822,7 @@ static int _dpu_crtc_vblank_enable_no_lock( | |||
1139 | */ | 822 | */ |
1140 | static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable) | 823 | static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable) |
1141 | { | 824 | { |
1142 | struct dpu_crtc *dpu_crtc; | 825 | struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); |
1143 | struct msm_drm_private *priv; | ||
1144 | struct dpu_kms *dpu_kms; | ||
1145 | int ret = 0; | ||
1146 | |||
1147 | if (!crtc || !crtc->dev || !crtc->dev->dev_private) { | ||
1148 | DPU_ERROR("invalid crtc\n"); | ||
1149 | return; | ||
1150 | } | ||
1151 | dpu_crtc = to_dpu_crtc(crtc); | ||
1152 | priv = crtc->dev->dev_private; | ||
1153 | |||
1154 | if (!priv->kms) { | ||
1155 | DPU_ERROR("invalid crtc kms\n"); | ||
1156 | return; | ||
1157 | } | ||
1158 | dpu_kms = to_dpu_kms(priv->kms); | ||
1159 | 826 | ||
1160 | DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable); | 827 | DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable); |
1161 | 828 | ||
@@ -1170,10 +837,7 @@ static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable) | |||
1170 | DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n", | 837 | DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n", |
1171 | crtc->base.id, enable); | 838 | crtc->base.id, enable); |
1172 | else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) { | 839 | else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) { |
1173 | ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable); | 840 | _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable); |
1174 | if (ret) | ||
1175 | DPU_ERROR("%s vblank enable failed: %d\n", | ||
1176 | dpu_crtc->name, ret); | ||
1177 | } | 841 | } |
1178 | 842 | ||
1179 | dpu_crtc->suspend = enable; | 843 | dpu_crtc->suspend = enable; |
@@ -1206,8 +870,6 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) | |||
1206 | /* duplicate base helper */ | 870 | /* duplicate base helper */ |
1207 | __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base); | 871 | __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base); |
1208 | 872 | ||
1209 | _dpu_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp); | ||
1210 | |||
1211 | return &cstate->base; | 873 | return &cstate->base; |
1212 | } | 874 | } |
1213 | 875 | ||
@@ -1244,9 +906,6 @@ static void dpu_crtc_reset(struct drm_crtc *crtc) | |||
1244 | return; | 906 | return; |
1245 | } | 907 | } |
1246 | 908 | ||
1247 | _dpu_crtc_rp_reset(&cstate->rp, &dpu_crtc->rp_lock, | ||
1248 | &dpu_crtc->rp_head); | ||
1249 | |||
1250 | cstate->base.crtc = crtc; | 909 | cstate->base.crtc = crtc; |
1251 | crtc->state = &cstate->base; | 910 | crtc->state = &cstate->base; |
1252 | } | 911 | } |
@@ -1254,62 +913,19 @@ static void dpu_crtc_reset(struct drm_crtc *crtc) | |||
1254 | static void dpu_crtc_handle_power_event(u32 event_type, void *arg) | 913 | static void dpu_crtc_handle_power_event(u32 event_type, void *arg) |
1255 | { | 914 | { |
1256 | struct drm_crtc *crtc = arg; | 915 | struct drm_crtc *crtc = arg; |
1257 | struct dpu_crtc *dpu_crtc; | 916 | struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); |
1258 | struct drm_encoder *encoder; | 917 | struct drm_encoder *encoder; |
1259 | struct dpu_crtc_mixer *m; | ||
1260 | u32 i, misr_status; | ||
1261 | |||
1262 | if (!crtc) { | ||
1263 | DPU_ERROR("invalid crtc\n"); | ||
1264 | return; | ||
1265 | } | ||
1266 | dpu_crtc = to_dpu_crtc(crtc); | ||
1267 | 918 | ||
1268 | mutex_lock(&dpu_crtc->crtc_lock); | 919 | mutex_lock(&dpu_crtc->crtc_lock); |
1269 | 920 | ||
1270 | trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type); | 921 | trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type); |
1271 | 922 | ||
1272 | switch (event_type) { | 923 | /* restore encoder; crtc will be programmed during commit */ |
1273 | case DPU_POWER_EVENT_POST_ENABLE: | 924 | drm_for_each_encoder(encoder, crtc->dev) { |
1274 | /* restore encoder; crtc will be programmed during commit */ | 925 | if (encoder->crtc != crtc) |
1275 | drm_for_each_encoder(encoder, crtc->dev) { | 926 | continue; |
1276 | if (encoder->crtc != crtc) | ||
1277 | continue; | ||
1278 | |||
1279 | dpu_encoder_virt_restore(encoder); | ||
1280 | } | ||
1281 | |||
1282 | for (i = 0; i < dpu_crtc->num_mixers; ++i) { | ||
1283 | m = &dpu_crtc->mixers[i]; | ||
1284 | if (!m->hw_lm || !m->hw_lm->ops.setup_misr || | ||
1285 | !dpu_crtc->misr_enable) | ||
1286 | continue; | ||
1287 | |||
1288 | m->hw_lm->ops.setup_misr(m->hw_lm, true, | ||
1289 | dpu_crtc->misr_frame_count); | ||
1290 | } | ||
1291 | break; | ||
1292 | case DPU_POWER_EVENT_PRE_DISABLE: | ||
1293 | for (i = 0; i < dpu_crtc->num_mixers; ++i) { | ||
1294 | m = &dpu_crtc->mixers[i]; | ||
1295 | if (!m->hw_lm || !m->hw_lm->ops.collect_misr || | ||
1296 | !dpu_crtc->misr_enable) | ||
1297 | continue; | ||
1298 | 927 | ||
1299 | misr_status = m->hw_lm->ops.collect_misr(m->hw_lm); | 928 | dpu_encoder_virt_restore(encoder); |
1300 | dpu_crtc->misr_data[i] = misr_status ? misr_status : | ||
1301 | dpu_crtc->misr_data[i]; | ||
1302 | } | ||
1303 | break; | ||
1304 | case DPU_POWER_EVENT_POST_DISABLE: | ||
1305 | /** | ||
1306 | * Nothing to do. All the planes on the CRTC will be | ||
1307 | * programmed for every frame | ||
1308 | */ | ||
1309 | break; | ||
1310 | default: | ||
1311 | DPU_DEBUG("event:%d not handled\n", event_type); | ||
1312 | break; | ||
1313 | } | 929 | } |
1314 | 930 | ||
1315 | mutex_unlock(&dpu_crtc->crtc_lock); | 931 | mutex_unlock(&dpu_crtc->crtc_lock); |
@@ -1322,7 +938,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc) | |||
1322 | struct drm_display_mode *mode; | 938 | struct drm_display_mode *mode; |
1323 | struct drm_encoder *encoder; | 939 | struct drm_encoder *encoder; |
1324 | struct msm_drm_private *priv; | 940 | struct msm_drm_private *priv; |
1325 | int ret; | ||
1326 | unsigned long flags; | 941 | unsigned long flags; |
1327 | 942 | ||
1328 | if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) { | 943 | if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) { |
@@ -1353,10 +968,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc) | |||
1353 | trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc); | 968 | trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc); |
1354 | if (dpu_crtc->enabled && !dpu_crtc->suspend && | 969 | if (dpu_crtc->enabled && !dpu_crtc->suspend && |
1355 | dpu_crtc->vblank_requested) { | 970 | dpu_crtc->vblank_requested) { |
1356 | ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false); | 971 | _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false); |
1357 | if (ret) | ||
1358 | DPU_ERROR("%s vblank enable failed: %d\n", | ||
1359 | dpu_crtc->name, ret); | ||
1360 | } | 972 | } |
1361 | dpu_crtc->enabled = false; | 973 | dpu_crtc->enabled = false; |
1362 | 974 | ||
@@ -1379,9 +991,8 @@ static void dpu_crtc_disable(struct drm_crtc *crtc) | |||
1379 | dpu_power_handle_unregister_event(dpu_crtc->phandle, | 991 | dpu_power_handle_unregister_event(dpu_crtc->phandle, |
1380 | dpu_crtc->power_event); | 992 | dpu_crtc->power_event); |
1381 | 993 | ||
1382 | memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers)); | 994 | memset(cstate->mixers, 0, sizeof(cstate->mixers)); |
1383 | dpu_crtc->num_mixers = 0; | 995 | cstate->num_mixers = 0; |
1384 | dpu_crtc->mixers_swapped = false; | ||
1385 | 996 | ||
1386 | /* disable clk & bw control until clk & bw properties are set */ | 997 | /* disable clk & bw control until clk & bw properties are set */ |
1387 | cstate->bw_control = false; | 998 | cstate->bw_control = false; |
@@ -1403,7 +1014,6 @@ static void dpu_crtc_enable(struct drm_crtc *crtc, | |||
1403 | struct dpu_crtc *dpu_crtc; | 1014 | struct dpu_crtc *dpu_crtc; |
1404 | struct drm_encoder *encoder; | 1015 | struct drm_encoder *encoder; |
1405 | struct msm_drm_private *priv; | 1016 | struct msm_drm_private *priv; |
1406 | int ret; | ||
1407 | 1017 | ||
1408 | if (!crtc || !crtc->dev || !crtc->dev->dev_private) { | 1018 | if (!crtc || !crtc->dev || !crtc->dev->dev_private) { |
1409 | DPU_ERROR("invalid crtc\n"); | 1019 | DPU_ERROR("invalid crtc\n"); |
@@ -1425,10 +1035,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc, | |||
1425 | trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc); | 1035 | trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc); |
1426 | if (!dpu_crtc->enabled && !dpu_crtc->suspend && | 1036 | if (!dpu_crtc->enabled && !dpu_crtc->suspend && |
1427 | dpu_crtc->vblank_requested) { | 1037 | dpu_crtc->vblank_requested) { |
1428 | ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true); | 1038 | _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true); |
1429 | if (ret) | ||
1430 | DPU_ERROR("%s vblank enable failed: %d\n", | ||
1431 | dpu_crtc->name, ret); | ||
1432 | } | 1039 | } |
1433 | dpu_crtc->enabled = true; | 1040 | dpu_crtc->enabled = true; |
1434 | 1041 | ||
@@ -1438,9 +1045,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc, | |||
1438 | drm_crtc_vblank_on(crtc); | 1045 | drm_crtc_vblank_on(crtc); |
1439 | 1046 | ||
1440 | dpu_crtc->power_event = dpu_power_handle_register_event( | 1047 | dpu_crtc->power_event = dpu_power_handle_register_event( |
1441 | dpu_crtc->phandle, | 1048 | dpu_crtc->phandle, DPU_POWER_EVENT_ENABLE, |
1442 | DPU_POWER_EVENT_POST_ENABLE | DPU_POWER_EVENT_POST_DISABLE | | ||
1443 | DPU_POWER_EVENT_PRE_DISABLE, | ||
1444 | dpu_crtc_handle_power_event, crtc, dpu_crtc->name); | 1049 | dpu_crtc_handle_power_event, crtc, dpu_crtc->name); |
1445 | 1050 | ||
1446 | } | 1051 | } |
@@ -1496,7 +1101,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, | |||
1496 | 1101 | ||
1497 | memset(pipe_staged, 0, sizeof(pipe_staged)); | 1102 | memset(pipe_staged, 0, sizeof(pipe_staged)); |
1498 | 1103 | ||
1499 | mixer_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode); | 1104 | mixer_width = _dpu_crtc_get_mixer_width(cstate, mode); |
1500 | 1105 | ||
1501 | _dpu_crtc_setup_lm_bounds(crtc, state); | 1106 | _dpu_crtc_setup_lm_bounds(crtc, state); |
1502 | 1107 | ||
@@ -1535,8 +1140,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, | |||
1535 | cnt++; | 1140 | cnt++; |
1536 | 1141 | ||
1537 | dst = drm_plane_state_dest(pstate); | 1142 | dst = drm_plane_state_dest(pstate); |
1538 | if (!drm_rect_intersect(&clip, &dst) || | 1143 | if (!drm_rect_intersect(&clip, &dst)) { |
1539 | !drm_rect_equals(&clip, &dst)) { | ||
1540 | DPU_ERROR("invalid vertical/horizontal destination\n"); | 1144 | DPU_ERROR("invalid vertical/horizontal destination\n"); |
1541 | DPU_ERROR("display: " DRM_RECT_FMT " plane: " | 1145 | DPU_ERROR("display: " DRM_RECT_FMT " plane: " |
1542 | DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect), | 1146 | DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect), |
@@ -1679,7 +1283,6 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, | |||
1679 | } | 1283 | } |
1680 | 1284 | ||
1681 | end: | 1285 | end: |
1682 | _dpu_crtc_rp_free_unused(&cstate->rp); | ||
1683 | kfree(pstates); | 1286 | kfree(pstates); |
1684 | return rc; | 1287 | return rc; |
1685 | } | 1288 | } |
@@ -1687,7 +1290,6 @@ end: | |||
1687 | int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) | 1290 | int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) |
1688 | { | 1291 | { |
1689 | struct dpu_crtc *dpu_crtc; | 1292 | struct dpu_crtc *dpu_crtc; |
1690 | int ret; | ||
1691 | 1293 | ||
1692 | if (!crtc) { | 1294 | if (!crtc) { |
1693 | DPU_ERROR("invalid crtc\n"); | 1295 | DPU_ERROR("invalid crtc\n"); |
@@ -1698,10 +1300,7 @@ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) | |||
1698 | mutex_lock(&dpu_crtc->crtc_lock); | 1300 | mutex_lock(&dpu_crtc->crtc_lock); |
1699 | trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc); | 1301 | trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc); |
1700 | if (dpu_crtc->enabled && !dpu_crtc->suspend) { | 1302 | if (dpu_crtc->enabled && !dpu_crtc->suspend) { |
1701 | ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en); | 1303 | _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en); |
1702 | if (ret) | ||
1703 | DPU_ERROR("%s vblank enable failed: %d\n", | ||
1704 | dpu_crtc->name, ret); | ||
1705 | } | 1304 | } |
1706 | dpu_crtc->vblank_requested = en; | 1305 | dpu_crtc->vblank_requested = en; |
1707 | mutex_unlock(&dpu_crtc->crtc_lock); | 1306 | mutex_unlock(&dpu_crtc->crtc_lock); |
@@ -1730,26 +1329,28 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data) | |||
1730 | 1329 | ||
1731 | dpu_crtc = s->private; | 1330 | dpu_crtc = s->private; |
1732 | crtc = &dpu_crtc->base; | 1331 | crtc = &dpu_crtc->base; |
1332 | |||
1333 | drm_modeset_lock_all(crtc->dev); | ||
1733 | cstate = to_dpu_crtc_state(crtc->state); | 1334 | cstate = to_dpu_crtc_state(crtc->state); |
1734 | 1335 | ||
1735 | mutex_lock(&dpu_crtc->crtc_lock); | 1336 | mutex_lock(&dpu_crtc->crtc_lock); |
1736 | mode = &crtc->state->adjusted_mode; | 1337 | mode = &crtc->state->adjusted_mode; |
1737 | out_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode); | 1338 | out_width = _dpu_crtc_get_mixer_width(cstate, mode); |
1738 | 1339 | ||
1739 | seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id, | 1340 | seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id, |
1740 | mode->hdisplay, mode->vdisplay); | 1341 | mode->hdisplay, mode->vdisplay); |
1741 | 1342 | ||
1742 | seq_puts(s, "\n"); | 1343 | seq_puts(s, "\n"); |
1743 | 1344 | ||
1744 | for (i = 0; i < dpu_crtc->num_mixers; ++i) { | 1345 | for (i = 0; i < cstate->num_mixers; ++i) { |
1745 | m = &dpu_crtc->mixers[i]; | 1346 | m = &cstate->mixers[i]; |
1746 | if (!m->hw_lm) | 1347 | if (!m->hw_lm) |
1747 | seq_printf(s, "\tmixer[%d] has no lm\n", i); | 1348 | seq_printf(s, "\tmixer[%d] has no lm\n", i); |
1748 | else if (!m->hw_ctl) | 1349 | else if (!m->lm_ctl) |
1749 | seq_printf(s, "\tmixer[%d] has no ctl\n", i); | 1350 | seq_printf(s, "\tmixer[%d] has no ctl\n", i); |
1750 | else | 1351 | else |
1751 | seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", | 1352 | seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", |
1752 | m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0, | 1353 | m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0, |
1753 | out_width, mode->vdisplay); | 1354 | out_width, mode->vdisplay); |
1754 | } | 1355 | } |
1755 | 1356 | ||
@@ -1822,6 +1423,7 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data) | |||
1822 | seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested); | 1423 | seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested); |
1823 | 1424 | ||
1824 | mutex_unlock(&dpu_crtc->crtc_lock); | 1425 | mutex_unlock(&dpu_crtc->crtc_lock); |
1426 | drm_modeset_unlock_all(crtc->dev); | ||
1825 | 1427 | ||
1826 | return 0; | 1428 | return 0; |
1827 | } | 1429 | } |
@@ -1831,113 +1433,6 @@ static int _dpu_debugfs_status_open(struct inode *inode, struct file *file) | |||
1831 | return single_open(file, _dpu_debugfs_status_show, inode->i_private); | 1433 | return single_open(file, _dpu_debugfs_status_show, inode->i_private); |
1832 | } | 1434 | } |
1833 | 1435 | ||
1834 | static ssize_t _dpu_crtc_misr_setup(struct file *file, | ||
1835 | const char __user *user_buf, size_t count, loff_t *ppos) | ||
1836 | { | ||
1837 | struct dpu_crtc *dpu_crtc; | ||
1838 | struct dpu_crtc_mixer *m; | ||
1839 | int i = 0, rc; | ||
1840 | char buf[MISR_BUFF_SIZE + 1]; | ||
1841 | u32 frame_count, enable; | ||
1842 | size_t buff_copy; | ||
1843 | |||
1844 | if (!file || !file->private_data) | ||
1845 | return -EINVAL; | ||
1846 | |||
1847 | dpu_crtc = file->private_data; | ||
1848 | buff_copy = min_t(size_t, count, MISR_BUFF_SIZE); | ||
1849 | if (copy_from_user(buf, user_buf, buff_copy)) { | ||
1850 | DPU_ERROR("buffer copy failed\n"); | ||
1851 | return -EINVAL; | ||
1852 | } | ||
1853 | |||
1854 | buf[buff_copy] = 0; /* end of string */ | ||
1855 | |||
1856 | if (sscanf(buf, "%u %u", &enable, &frame_count) != 2) | ||
1857 | return -EINVAL; | ||
1858 | |||
1859 | rc = _dpu_crtc_power_enable(dpu_crtc, true); | ||
1860 | if (rc) | ||
1861 | return rc; | ||
1862 | |||
1863 | mutex_lock(&dpu_crtc->crtc_lock); | ||
1864 | dpu_crtc->misr_enable = enable; | ||
1865 | dpu_crtc->misr_frame_count = frame_count; | ||
1866 | for (i = 0; i < dpu_crtc->num_mixers; ++i) { | ||
1867 | dpu_crtc->misr_data[i] = 0; | ||
1868 | m = &dpu_crtc->mixers[i]; | ||
1869 | if (!m->hw_lm || !m->hw_lm->ops.setup_misr) | ||
1870 | continue; | ||
1871 | |||
1872 | m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count); | ||
1873 | } | ||
1874 | mutex_unlock(&dpu_crtc->crtc_lock); | ||
1875 | _dpu_crtc_power_enable(dpu_crtc, false); | ||
1876 | |||
1877 | return count; | ||
1878 | } | ||
1879 | |||
1880 | static ssize_t _dpu_crtc_misr_read(struct file *file, | ||
1881 | char __user *user_buff, size_t count, loff_t *ppos) | ||
1882 | { | ||
1883 | struct dpu_crtc *dpu_crtc; | ||
1884 | struct dpu_crtc_mixer *m; | ||
1885 | int i = 0, rc; | ||
1886 | u32 misr_status; | ||
1887 | ssize_t len = 0; | ||
1888 | char buf[MISR_BUFF_SIZE + 1] = {'\0'}; | ||
1889 | |||
1890 | if (*ppos) | ||
1891 | return 0; | ||
1892 | |||
1893 | if (!file || !file->private_data) | ||
1894 | return -EINVAL; | ||
1895 | |||
1896 | dpu_crtc = file->private_data; | ||
1897 | rc = _dpu_crtc_power_enable(dpu_crtc, true); | ||
1898 | if (rc) | ||
1899 | return rc; | ||
1900 | |||
1901 | mutex_lock(&dpu_crtc->crtc_lock); | ||
1902 | if (!dpu_crtc->misr_enable) { | ||
1903 | len += snprintf(buf + len, MISR_BUFF_SIZE - len, | ||
1904 | "disabled\n"); | ||
1905 | goto buff_check; | ||
1906 | } | ||
1907 | |||
1908 | for (i = 0; i < dpu_crtc->num_mixers; ++i) { | ||
1909 | m = &dpu_crtc->mixers[i]; | ||
1910 | if (!m->hw_lm || !m->hw_lm->ops.collect_misr) | ||
1911 | continue; | ||
1912 | |||
1913 | misr_status = m->hw_lm->ops.collect_misr(m->hw_lm); | ||
1914 | dpu_crtc->misr_data[i] = misr_status ? misr_status : | ||
1915 | dpu_crtc->misr_data[i]; | ||
1916 | len += snprintf(buf + len, MISR_BUFF_SIZE - len, "lm idx:%d\n", | ||
1917 | m->hw_lm->idx - LM_0); | ||
1918 | len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n", | ||
1919 | dpu_crtc->misr_data[i]); | ||
1920 | } | ||
1921 | |||
1922 | buff_check: | ||
1923 | if (count <= len) { | ||
1924 | len = 0; | ||
1925 | goto end; | ||
1926 | } | ||
1927 | |||
1928 | if (copy_to_user(user_buff, buf, len)) { | ||
1929 | len = -EFAULT; | ||
1930 | goto end; | ||
1931 | } | ||
1932 | |||
1933 | *ppos += len; /* increase offset */ | ||
1934 | |||
1935 | end: | ||
1936 | mutex_unlock(&dpu_crtc->crtc_lock); | ||
1937 | _dpu_crtc_power_enable(dpu_crtc, false); | ||
1938 | return len; | ||
1939 | } | ||
1940 | |||
1941 | #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \ | 1436 | #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \ |
1942 | static int __prefix ## _open(struct inode *inode, struct file *file) \ | 1437 | static int __prefix ## _open(struct inode *inode, struct file *file) \ |
1943 | { \ | 1438 | { \ |
@@ -1955,8 +1450,6 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) | |||
1955 | { | 1450 | { |
1956 | struct drm_crtc *crtc = (struct drm_crtc *) s->private; | 1451 | struct drm_crtc *crtc = (struct drm_crtc *) s->private; |
1957 | struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); | 1452 | struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); |
1958 | struct dpu_crtc_res *res; | ||
1959 | struct dpu_crtc_respool *rp; | ||
1960 | int i; | 1453 | int i; |
1961 | 1454 | ||
1962 | seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc)); | 1455 | seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc)); |
@@ -1973,17 +1466,6 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) | |||
1973 | dpu_crtc->cur_perf.max_per_pipe_ib[i]); | 1466 | dpu_crtc->cur_perf.max_per_pipe_ib[i]); |
1974 | } | 1467 | } |
1975 | 1468 | ||
1976 | mutex_lock(&dpu_crtc->rp_lock); | ||
1977 | list_for_each_entry(rp, &dpu_crtc->rp_head, rp_list) { | ||
1978 | seq_printf(s, "rp.%d: ", rp->sequence_id); | ||
1979 | list_for_each_entry(res, &rp->res_list, list) | ||
1980 | seq_printf(s, "0x%x/0x%llx/%pK/%d ", | ||
1981 | res->type, res->tag, res->val, | ||
1982 | atomic_read(&res->refcount)); | ||
1983 | seq_puts(s, "\n"); | ||
1984 | } | ||
1985 | mutex_unlock(&dpu_crtc->rp_lock); | ||
1986 | |||
1987 | return 0; | 1469 | return 0; |
1988 | } | 1470 | } |
1989 | DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state); | 1471 | DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state); |
@@ -1999,19 +1481,12 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) | |||
1999 | .llseek = seq_lseek, | 1481 | .llseek = seq_lseek, |
2000 | .release = single_release, | 1482 | .release = single_release, |
2001 | }; | 1483 | }; |
2002 | static const struct file_operations debugfs_misr_fops = { | ||
2003 | .open = simple_open, | ||
2004 | .read = _dpu_crtc_misr_read, | ||
2005 | .write = _dpu_crtc_misr_setup, | ||
2006 | }; | ||
2007 | 1484 | ||
2008 | if (!crtc) | 1485 | if (!crtc) |
2009 | return -EINVAL; | 1486 | return -EINVAL; |
2010 | dpu_crtc = to_dpu_crtc(crtc); | 1487 | dpu_crtc = to_dpu_crtc(crtc); |
2011 | 1488 | ||
2012 | dpu_kms = _dpu_crtc_get_kms(crtc); | 1489 | dpu_kms = _dpu_crtc_get_kms(crtc); |
2013 | if (!dpu_kms) | ||
2014 | return -EINVAL; | ||
2015 | 1490 | ||
2016 | dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name, | 1491 | dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name, |
2017 | crtc->dev->primary->debugfs_root); | 1492 | crtc->dev->primary->debugfs_root); |
@@ -2026,8 +1501,6 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) | |||
2026 | dpu_crtc->debugfs_root, | 1501 | dpu_crtc->debugfs_root, |
2027 | &dpu_crtc->base, | 1502 | &dpu_crtc->base, |
2028 | &dpu_crtc_debugfs_state_fops); | 1503 | &dpu_crtc_debugfs_state_fops); |
2029 | debugfs_create_file("misr_data", 0600, dpu_crtc->debugfs_root, | ||
2030 | dpu_crtc, &debugfs_misr_fops); | ||
2031 | 1504 | ||
2032 | return 0; | 1505 | return 0; |
2033 | } | 1506 | } |
@@ -2082,7 +1555,8 @@ static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = { | |||
2082 | }; | 1555 | }; |
2083 | 1556 | ||
2084 | /* initialize crtc */ | 1557 | /* initialize crtc */ |
2085 | struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane) | 1558 | struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, |
1559 | struct drm_plane *cursor) | ||
2086 | { | 1560 | { |
2087 | struct drm_crtc *crtc = NULL; | 1561 | struct drm_crtc *crtc = NULL; |
2088 | struct dpu_crtc *dpu_crtc = NULL; | 1562 | struct dpu_crtc *dpu_crtc = NULL; |
@@ -2104,9 +1578,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane) | |||
2104 | spin_lock_init(&dpu_crtc->spin_lock); | 1578 | spin_lock_init(&dpu_crtc->spin_lock); |
2105 | atomic_set(&dpu_crtc->frame_pending, 0); | 1579 | atomic_set(&dpu_crtc->frame_pending, 0); |
2106 | 1580 | ||
2107 | mutex_init(&dpu_crtc->rp_lock); | ||
2108 | INIT_LIST_HEAD(&dpu_crtc->rp_head); | ||
2109 | |||
2110 | init_completion(&dpu_crtc->frame_done_comp); | 1581 | init_completion(&dpu_crtc->frame_done_comp); |
2111 | 1582 | ||
2112 | INIT_LIST_HEAD(&dpu_crtc->frame_event_list); | 1583 | INIT_LIST_HEAD(&dpu_crtc->frame_event_list); |
@@ -2119,7 +1590,7 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane) | |||
2119 | dpu_crtc_frame_event_work); | 1590 | dpu_crtc_frame_event_work); |
2120 | } | 1591 | } |
2121 | 1592 | ||
2122 | drm_crtc_init_with_planes(dev, crtc, plane, NULL, &dpu_crtc_funcs, | 1593 | drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs, |
2123 | NULL); | 1594 | NULL); |
2124 | 1595 | ||
2125 | drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); | 1596 | drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h index e87109e608e9..3723b4830335 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h | |||
@@ -83,14 +83,14 @@ struct dpu_crtc_smmu_state_data { | |||
83 | /** | 83 | /** |
84 | * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC | 84 | * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC |
85 | * @hw_lm: LM HW Driver context | 85 | * @hw_lm: LM HW Driver context |
86 | * @hw_ctl: CTL Path HW driver context | 86 | * @lm_ctl: CTL Path HW driver context |
87 | * @encoder: Encoder attached to this lm & ctl | 87 | * @encoder: Encoder attached to this lm & ctl |
88 | * @mixer_op_mode: mixer blending operation mode | 88 | * @mixer_op_mode: mixer blending operation mode |
89 | * @flush_mask: mixer flush mask for ctl, mixer and pipe | 89 | * @flush_mask: mixer flush mask for ctl, mixer and pipe |
90 | */ | 90 | */ |
91 | struct dpu_crtc_mixer { | 91 | struct dpu_crtc_mixer { |
92 | struct dpu_hw_mixer *hw_lm; | 92 | struct dpu_hw_mixer *hw_lm; |
93 | struct dpu_hw_ctl *hw_ctl; | 93 | struct dpu_hw_ctl *lm_ctl; |
94 | struct drm_encoder *encoder; | 94 | struct drm_encoder *encoder; |
95 | u32 mixer_op_mode; | 95 | u32 mixer_op_mode; |
96 | u32 flush_mask; | 96 | u32 flush_mask; |
@@ -121,11 +121,6 @@ struct dpu_crtc_frame_event { | |||
121 | * struct dpu_crtc - virtualized CRTC data structure | 121 | * struct dpu_crtc - virtualized CRTC data structure |
122 | * @base : Base drm crtc structure | 122 | * @base : Base drm crtc structure |
123 | * @name : ASCII description of this crtc | 123 | * @name : ASCII description of this crtc |
124 | * @num_ctls : Number of ctl paths in use | ||
125 | * @num_mixers : Number of mixers in use | ||
126 | * @mixers_swapped: Whether the mixers have been swapped for left/right update | ||
127 | * especially in the case of DSC Merge. | ||
128 | * @mixers : List of active mixers | ||
129 | * @event : Pointer to last received drm vblank event. If there is a | 124 | * @event : Pointer to last received drm vblank event. If there is a |
130 | * pending vblank event, this will be non-null. | 125 | * pending vblank event, this will be non-null. |
131 | * @vsync_count : Running count of received vsync events | 126 | * @vsync_count : Running count of received vsync events |
@@ -156,27 +151,14 @@ struct dpu_crtc_frame_event { | |||
156 | * @event_thread : Pointer to event handler thread | 151 | * @event_thread : Pointer to event handler thread |
157 | * @event_worker : Event worker queue | 152 | * @event_worker : Event worker queue |
158 | * @event_lock : Spinlock around event handling code | 153 | * @event_lock : Spinlock around event handling code |
159 | * @misr_enable : boolean entry indicates misr enable/disable status. | ||
160 | * @misr_frame_count : misr frame count provided by client | ||
161 | * @misr_data : store misr data before turning off the clocks. | ||
162 | * @phandle: Pointer to power handler | 154 | * @phandle: Pointer to power handler |
163 | * @power_event : registered power event handle | 155 | * @power_event : registered power event handle |
164 | * @cur_perf : current performance committed to clock/bandwidth driver | 156 | * @cur_perf : current performance committed to clock/bandwidth driver |
165 | * @rp_lock : serialization lock for resource pool | ||
166 | * @rp_head : list of active resource pool | ||
167 | * @scl3_cfg_lut : qseed3 lut config | ||
168 | */ | 157 | */ |
169 | struct dpu_crtc { | 158 | struct dpu_crtc { |
170 | struct drm_crtc base; | 159 | struct drm_crtc base; |
171 | char name[DPU_CRTC_NAME_SIZE]; | 160 | char name[DPU_CRTC_NAME_SIZE]; |
172 | 161 | ||
173 | /* HW Resources reserved for the crtc */ | ||
174 | u32 num_ctls; | ||
175 | u32 num_mixers; | ||
176 | bool mixers_swapped; | ||
177 | struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS]; | ||
178 | struct dpu_hw_scaler3_lut_cfg *scl3_lut_cfg; | ||
179 | |||
180 | struct drm_pending_vblank_event *event; | 162 | struct drm_pending_vblank_event *event; |
181 | u32 vsync_count; | 163 | u32 vsync_count; |
182 | 164 | ||
@@ -206,77 +188,20 @@ struct dpu_crtc { | |||
206 | 188 | ||
207 | /* for handling internal event thread */ | 189 | /* for handling internal event thread */ |
208 | spinlock_t event_lock; | 190 | spinlock_t event_lock; |
209 | bool misr_enable; | ||
210 | u32 misr_frame_count; | ||
211 | u32 misr_data[CRTC_DUAL_MIXERS]; | ||
212 | 191 | ||
213 | struct dpu_power_handle *phandle; | 192 | struct dpu_power_handle *phandle; |
214 | struct dpu_power_event *power_event; | 193 | struct dpu_power_event *power_event; |
215 | 194 | ||
216 | struct dpu_core_perf_params cur_perf; | 195 | struct dpu_core_perf_params cur_perf; |
217 | 196 | ||
218 | struct mutex rp_lock; | ||
219 | struct list_head rp_head; | ||
220 | |||
221 | struct dpu_crtc_smmu_state_data smmu_state; | 197 | struct dpu_crtc_smmu_state_data smmu_state; |
222 | }; | 198 | }; |
223 | 199 | ||
224 | #define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base) | 200 | #define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base) |
225 | 201 | ||
226 | /** | 202 | /** |
227 | * struct dpu_crtc_res_ops - common operations for crtc resources | ||
228 | * @get: get given resource | ||
229 | * @put: put given resource | ||
230 | */ | ||
231 | struct dpu_crtc_res_ops { | ||
232 | void *(*get)(void *val, u32 type, u64 tag); | ||
233 | void (*put)(void *val); | ||
234 | }; | ||
235 | |||
236 | #define DPU_CRTC_RES_FLAG_FREE BIT(0) | ||
237 | |||
238 | /** | ||
239 | * struct dpu_crtc_res - definition of crtc resources | ||
240 | * @list: list of crtc resource | ||
241 | * @type: crtc resource type | ||
242 | * @tag: unique identifier per type | ||
243 | * @refcount: reference/usage count | ||
244 | * @ops: callback operations | ||
245 | * @val: resource handle associated with type/tag | ||
246 | * @flags: customization flags | ||
247 | */ | ||
248 | struct dpu_crtc_res { | ||
249 | struct list_head list; | ||
250 | u32 type; | ||
251 | u64 tag; | ||
252 | atomic_t refcount; | ||
253 | struct dpu_crtc_res_ops ops; | ||
254 | void *val; | ||
255 | u32 flags; | ||
256 | }; | ||
257 | |||
258 | /** | ||
259 | * dpu_crtc_respool - crtc resource pool | ||
260 | * @rp_lock: pointer to serialization lock | ||
261 | * @rp_head: pointer to head of active resource pools of this crtc | ||
262 | * @rp_list: list of crtc resource pool | ||
263 | * @sequence_id: sequence identifier, incremented per state duplication | ||
264 | * @res_list: list of resource managed by this resource pool | ||
265 | * @ops: resource operations for parent resource pool | ||
266 | */ | ||
267 | struct dpu_crtc_respool { | ||
268 | struct mutex *rp_lock; | ||
269 | struct list_head *rp_head; | ||
270 | struct list_head rp_list; | ||
271 | u32 sequence_id; | ||
272 | struct list_head res_list; | ||
273 | struct dpu_crtc_res_ops ops; | ||
274 | }; | ||
275 | |||
276 | /** | ||
277 | * struct dpu_crtc_state - dpu container for atomic crtc state | 203 | * struct dpu_crtc_state - dpu container for atomic crtc state |
278 | * @base: Base drm crtc state structure | 204 | * @base: Base drm crtc state structure |
279 | * @is_ppsplit : Whether current topology requires PPSplit special handling | ||
280 | * @bw_control : true if bw/clk controlled by core bw/clk properties | 205 | * @bw_control : true if bw/clk controlled by core bw/clk properties |
281 | * @bw_split_vote : true if bw controlled by llcc/dram bw properties | 206 | * @bw_split_vote : true if bw controlled by llcc/dram bw properties |
282 | * @lm_bounds : LM boundaries based on current mode full resolution, no ROI. | 207 | * @lm_bounds : LM boundaries based on current mode full resolution, no ROI. |
@@ -285,41 +210,41 @@ struct dpu_crtc_respool { | |||
285 | * @property_values: Current crtc property values | 210 | * @property_values: Current crtc property values |
286 | * @input_fence_timeout_ns : Cached input fence timeout, in ns | 211 | * @input_fence_timeout_ns : Cached input fence timeout, in ns |
287 | * @new_perf: new performance state being requested | 212 | * @new_perf: new performance state being requested |
213 | * @num_mixers : Number of mixers in use | ||
214 | * @mixers : List of active mixers | ||
215 | * @num_ctls : Number of ctl paths in use | ||
216 | * @hw_ctls : List of active ctl paths | ||
288 | */ | 217 | */ |
289 | struct dpu_crtc_state { | 218 | struct dpu_crtc_state { |
290 | struct drm_crtc_state base; | 219 | struct drm_crtc_state base; |
291 | 220 | ||
292 | bool bw_control; | 221 | bool bw_control; |
293 | bool bw_split_vote; | 222 | bool bw_split_vote; |
294 | |||
295 | bool is_ppsplit; | ||
296 | struct drm_rect lm_bounds[CRTC_DUAL_MIXERS]; | 223 | struct drm_rect lm_bounds[CRTC_DUAL_MIXERS]; |
297 | 224 | ||
298 | uint64_t input_fence_timeout_ns; | 225 | uint64_t input_fence_timeout_ns; |
299 | 226 | ||
300 | struct dpu_core_perf_params new_perf; | 227 | struct dpu_core_perf_params new_perf; |
301 | struct dpu_crtc_respool rp; | 228 | |
229 | /* HW Resources reserved for the crtc */ | ||
230 | u32 num_mixers; | ||
231 | struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS]; | ||
232 | |||
233 | u32 num_ctls; | ||
234 | struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS]; | ||
302 | }; | 235 | }; |
303 | 236 | ||
304 | #define to_dpu_crtc_state(x) \ | 237 | #define to_dpu_crtc_state(x) \ |
305 | container_of(x, struct dpu_crtc_state, base) | 238 | container_of(x, struct dpu_crtc_state, base) |
306 | 239 | ||
307 | /** | 240 | /** |
308 | * dpu_crtc_get_mixer_width - get the mixer width | 241 | * dpu_crtc_state_is_stereo - Is crtc virtualized with two mixers? |
309 | * Mixer width will be same as panel width(/2 for split) | 242 | * @cstate: Pointer to dpu crtc state |
243 | * @Return: true - has two mixers, false - has one mixer | ||
310 | */ | 244 | */ |
311 | static inline int dpu_crtc_get_mixer_width(struct dpu_crtc *dpu_crtc, | 245 | static inline bool dpu_crtc_state_is_stereo(struct dpu_crtc_state *cstate) |
312 | struct dpu_crtc_state *cstate, struct drm_display_mode *mode) | ||
313 | { | 246 | { |
314 | u32 mixer_width; | 247 | return cstate->num_mixers == CRTC_DUAL_MIXERS; |
315 | |||
316 | if (!dpu_crtc || !cstate || !mode) | ||
317 | return 0; | ||
318 | |||
319 | mixer_width = (dpu_crtc->num_mixers == CRTC_DUAL_MIXERS ? | ||
320 | mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay); | ||
321 | |||
322 | return mixer_width; | ||
323 | } | 248 | } |
324 | 249 | ||
325 | /** | 250 | /** |
@@ -375,9 +300,11 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc, | |||
375 | * dpu_crtc_init - create a new crtc object | 300 | * dpu_crtc_init - create a new crtc object |
376 | * @dev: dpu device | 301 | * @dev: dpu device |
377 | * @plane: base plane | 302 | * @plane: base plane |
303 | * @cursor: cursor plane | ||
378 | * @Return: new crtc object or error | 304 | * @Return: new crtc object or error |
379 | */ | 305 | */ |
380 | struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane); | 306 | struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, |
307 | struct drm_plane *cursor); | ||
381 | 308 | ||
382 | /** | 309 | /** |
383 | * dpu_crtc_register_custom_event - api for enabling/disabling crtc event | 310 | * dpu_crtc_register_custom_event - api for enabling/disabling crtc event |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 1b4de3486ef9..96cdf06e7da2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | |||
@@ -65,8 +65,6 @@ | |||
65 | 65 | ||
66 | #define MAX_CHANNELS_PER_ENC 2 | 66 | #define MAX_CHANNELS_PER_ENC 2 |
67 | 67 | ||
68 | #define MISR_BUFF_SIZE 256 | ||
69 | |||
70 | #define IDLE_SHORT_TIMEOUT 1 | 68 | #define IDLE_SHORT_TIMEOUT 1 |
71 | 69 | ||
72 | #define MAX_VDISPLAY_SPLIT 1080 | 70 | #define MAX_VDISPLAY_SPLIT 1080 |
@@ -161,8 +159,6 @@ enum dpu_enc_rc_states { | |||
161 | * @frame_done_timer: watchdog timer for frame done event | 159 | * @frame_done_timer: watchdog timer for frame done event |
162 | * @vsync_event_timer: vsync timer | 160 | * @vsync_event_timer: vsync timer |
163 | * @disp_info: local copy of msm_display_info struct | 161 | * @disp_info: local copy of msm_display_info struct |
164 | * @misr_enable: misr enable/disable status | ||
165 | * @misr_frame_count: misr frame count before start capturing the data | ||
166 | * @idle_pc_supported: indicate if idle power collaps is supported | 162 | * @idle_pc_supported: indicate if idle power collaps is supported |
167 | * @rc_lock: resource control mutex lock to protect | 163 | * @rc_lock: resource control mutex lock to protect |
168 | * virt encoder over various state changes | 164 | * virt encoder over various state changes |
@@ -179,11 +175,10 @@ struct dpu_encoder_virt { | |||
179 | spinlock_t enc_spinlock; | 175 | spinlock_t enc_spinlock; |
180 | uint32_t bus_scaling_client; | 176 | uint32_t bus_scaling_client; |
181 | 177 | ||
182 | uint32_t display_num_of_h_tiles; | ||
183 | |||
184 | unsigned int num_phys_encs; | 178 | unsigned int num_phys_encs; |
185 | struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL]; | 179 | struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL]; |
186 | struct dpu_encoder_phys *cur_master; | 180 | struct dpu_encoder_phys *cur_master; |
181 | struct dpu_encoder_phys *cur_slave; | ||
187 | struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; | 182 | struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; |
188 | 183 | ||
189 | bool intfs_swapped; | 184 | bool intfs_swapped; |
@@ -202,8 +197,6 @@ struct dpu_encoder_virt { | |||
202 | struct timer_list vsync_event_timer; | 197 | struct timer_list vsync_event_timer; |
203 | 198 | ||
204 | struct msm_display_info disp_info; | 199 | struct msm_display_info disp_info; |
205 | bool misr_enable; | ||
206 | u32 misr_frame_count; | ||
207 | 200 | ||
208 | bool idle_pc_supported; | 201 | bool idle_pc_supported; |
209 | struct mutex rc_lock; | 202 | struct mutex rc_lock; |
@@ -443,30 +436,22 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc, | |||
443 | } | 436 | } |
444 | 437 | ||
445 | void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc, | 438 | void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc, |
446 | struct dpu_encoder_hw_resources *hw_res, | 439 | struct dpu_encoder_hw_resources *hw_res) |
447 | struct drm_connector_state *conn_state) | ||
448 | { | 440 | { |
449 | struct dpu_encoder_virt *dpu_enc = NULL; | 441 | struct dpu_encoder_virt *dpu_enc = NULL; |
450 | int i = 0; | 442 | int i = 0; |
451 | 443 | ||
452 | if (!hw_res || !drm_enc || !conn_state) { | ||
453 | DPU_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n", | ||
454 | drm_enc != 0, hw_res != 0, conn_state != 0); | ||
455 | return; | ||
456 | } | ||
457 | |||
458 | dpu_enc = to_dpu_encoder_virt(drm_enc); | 444 | dpu_enc = to_dpu_encoder_virt(drm_enc); |
459 | DPU_DEBUG_ENC(dpu_enc, "\n"); | 445 | DPU_DEBUG_ENC(dpu_enc, "\n"); |
460 | 446 | ||
461 | /* Query resources used by phys encs, expected to be without overlap */ | 447 | /* Query resources used by phys encs, expected to be without overlap */ |
462 | memset(hw_res, 0, sizeof(*hw_res)); | 448 | memset(hw_res, 0, sizeof(*hw_res)); |
463 | hw_res->display_num_of_h_tiles = dpu_enc->display_num_of_h_tiles; | ||
464 | 449 | ||
465 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | 450 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { |
466 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | 451 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; |
467 | 452 | ||
468 | if (phys && phys->ops.get_hw_resources) | 453 | if (phys && phys->ops.get_hw_resources) |
469 | phys->ops.get_hw_resources(phys, hw_res, conn_state); | 454 | phys->ops.get_hw_resources(phys, hw_res); |
470 | } | 455 | } |
471 | } | 456 | } |
472 | 457 | ||
@@ -525,7 +510,7 @@ void dpu_encoder_helper_split_config( | |||
525 | hw_mdptop = phys_enc->hw_mdptop; | 510 | hw_mdptop = phys_enc->hw_mdptop; |
526 | disp_info = &dpu_enc->disp_info; | 511 | disp_info = &dpu_enc->disp_info; |
527 | 512 | ||
528 | if (disp_info->intf_type != DRM_MODE_CONNECTOR_DSI) | 513 | if (disp_info->intf_type != DRM_MODE_ENCODER_DSI) |
529 | return; | 514 | return; |
530 | 515 | ||
531 | /** | 516 | /** |
@@ -660,7 +645,7 @@ static int dpu_encoder_virt_atomic_check( | |||
660 | if (drm_atomic_crtc_needs_modeset(crtc_state) | 645 | if (drm_atomic_crtc_needs_modeset(crtc_state) |
661 | && dpu_enc->mode_set_complete) { | 646 | && dpu_enc->mode_set_complete) { |
662 | ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state, | 647 | ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state, |
663 | conn_state, topology, true); | 648 | topology, true); |
664 | dpu_enc->mode_set_complete = false; | 649 | dpu_enc->mode_set_complete = false; |
665 | } | 650 | } |
666 | } | 651 | } |
@@ -1016,9 +1001,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, | |||
1016 | struct dpu_kms *dpu_kms; | 1001 | struct dpu_kms *dpu_kms; |
1017 | struct list_head *connector_list; | 1002 | struct list_head *connector_list; |
1018 | struct drm_connector *conn = NULL, *conn_iter; | 1003 | struct drm_connector *conn = NULL, *conn_iter; |
1019 | struct dpu_rm_hw_iter pp_iter; | 1004 | struct dpu_rm_hw_iter pp_iter, ctl_iter; |
1020 | struct msm_display_topology topology; | 1005 | struct msm_display_topology topology; |
1021 | enum dpu_rm_topology_name topology_name; | 1006 | struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL }; |
1022 | int i = 0, ret; | 1007 | int i = 0, ret; |
1023 | 1008 | ||
1024 | if (!drm_enc) { | 1009 | if (!drm_enc) { |
@@ -1051,7 +1036,7 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, | |||
1051 | 1036 | ||
1052 | /* Reserve dynamic resources now. Indicating non-AtomicTest phase */ | 1037 | /* Reserve dynamic resources now. Indicating non-AtomicTest phase */ |
1053 | ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state, | 1038 | ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state, |
1054 | conn->state, topology, false); | 1039 | topology, false); |
1055 | if (ret) { | 1040 | if (ret) { |
1056 | DPU_ERROR_ENC(dpu_enc, | 1041 | DPU_ERROR_ENC(dpu_enc, |
1057 | "failed to reserve hw resources, %d\n", ret); | 1042 | "failed to reserve hw resources, %d\n", ret); |
@@ -1066,19 +1051,33 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, | |||
1066 | dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw; | 1051 | dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw; |
1067 | } | 1052 | } |
1068 | 1053 | ||
1069 | topology_name = dpu_rm_get_topology_name(topology); | 1054 | dpu_rm_init_hw_iter(&ctl_iter, drm_enc->base.id, DPU_HW_BLK_CTL); |
1055 | for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { | ||
1056 | if (!dpu_rm_get_hw(&dpu_kms->rm, &ctl_iter)) | ||
1057 | break; | ||
1058 | hw_ctl[i] = (struct dpu_hw_ctl *)ctl_iter.hw; | ||
1059 | } | ||
1060 | |||
1070 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | 1061 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { |
1071 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | 1062 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; |
1072 | 1063 | ||
1073 | if (phys) { | 1064 | if (phys) { |
1074 | if (!dpu_enc->hw_pp[i]) { | 1065 | if (!dpu_enc->hw_pp[i]) { |
1075 | DPU_ERROR_ENC(dpu_enc, | 1066 | DPU_ERROR_ENC(dpu_enc, "no pp block assigned" |
1076 | "invalid pingpong block for the encoder\n"); | 1067 | "at idx: %d\n", i); |
1077 | return; | 1068 | return; |
1078 | } | 1069 | } |
1070 | |||
1071 | if (!hw_ctl[i]) { | ||
1072 | DPU_ERROR_ENC(dpu_enc, "no ctl block assigned" | ||
1073 | "at idx: %d\n", i); | ||
1074 | return; | ||
1075 | } | ||
1076 | |||
1079 | phys->hw_pp = dpu_enc->hw_pp[i]; | 1077 | phys->hw_pp = dpu_enc->hw_pp[i]; |
1078 | phys->hw_ctl = hw_ctl[i]; | ||
1079 | |||
1080 | phys->connector = conn->state->connector; | 1080 | phys->connector = conn->state->connector; |
1081 | phys->topology_name = topology_name; | ||
1082 | if (phys->ops.mode_set) | 1081 | if (phys->ops.mode_set) |
1083 | phys->ops.mode_set(phys, mode, adj_mode); | 1082 | phys->ops.mode_set(phys, mode, adj_mode); |
1084 | } | 1083 | } |
@@ -1111,12 +1110,6 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) | |||
1111 | return; | 1110 | return; |
1112 | } | 1111 | } |
1113 | 1112 | ||
1114 | if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort && | ||
1115 | dpu_enc->cur_master->hw_mdptop && | ||
1116 | dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select) | ||
1117 | dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select( | ||
1118 | dpu_enc->cur_master->hw_mdptop); | ||
1119 | |||
1120 | if (dpu_enc->cur_master->hw_mdptop && | 1113 | if (dpu_enc->cur_master->hw_mdptop && |
1121 | dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc) | 1114 | dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc) |
1122 | dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc( | 1115 | dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc( |
@@ -1153,7 +1146,7 @@ void dpu_encoder_virt_restore(struct drm_encoder *drm_enc) | |||
1153 | static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) | 1146 | static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) |
1154 | { | 1147 | { |
1155 | struct dpu_encoder_virt *dpu_enc = NULL; | 1148 | struct dpu_encoder_virt *dpu_enc = NULL; |
1156 | int i, ret = 0; | 1149 | int ret = 0; |
1157 | struct drm_display_mode *cur_mode = NULL; | 1150 | struct drm_display_mode *cur_mode = NULL; |
1158 | 1151 | ||
1159 | if (!drm_enc) { | 1152 | if (!drm_enc) { |
@@ -1166,21 +1159,12 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) | |||
1166 | trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay, | 1159 | trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay, |
1167 | cur_mode->vdisplay); | 1160 | cur_mode->vdisplay); |
1168 | 1161 | ||
1169 | dpu_enc->cur_master = NULL; | 1162 | /* always enable slave encoder before master */ |
1170 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | 1163 | if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable) |
1171 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | 1164 | dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave); |
1172 | 1165 | ||
1173 | if (phys && phys->ops.is_master && phys->ops.is_master(phys)) { | 1166 | if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable) |
1174 | DPU_DEBUG_ENC(dpu_enc, "master is now idx %d\n", i); | 1167 | dpu_enc->cur_master->ops.enable(dpu_enc->cur_master); |
1175 | dpu_enc->cur_master = phys; | ||
1176 | break; | ||
1177 | } | ||
1178 | } | ||
1179 | |||
1180 | if (!dpu_enc->cur_master) { | ||
1181 | DPU_ERROR("virt encoder has no master! num_phys %d\n", i); | ||
1182 | return; | ||
1183 | } | ||
1184 | 1168 | ||
1185 | ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); | 1169 | ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); |
1186 | if (ret) { | 1170 | if (ret) { |
@@ -1189,26 +1173,6 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) | |||
1189 | return; | 1173 | return; |
1190 | } | 1174 | } |
1191 | 1175 | ||
1192 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | ||
1193 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | ||
1194 | |||
1195 | if (!phys) | ||
1196 | continue; | ||
1197 | |||
1198 | if (phys != dpu_enc->cur_master) { | ||
1199 | if (phys->ops.enable) | ||
1200 | phys->ops.enable(phys); | ||
1201 | } | ||
1202 | |||
1203 | if (dpu_enc->misr_enable && (dpu_enc->disp_info.capabilities & | ||
1204 | MSM_DISPLAY_CAP_VID_MODE) && phys->ops.setup_misr) | ||
1205 | phys->ops.setup_misr(phys, true, | ||
1206 | dpu_enc->misr_frame_count); | ||
1207 | } | ||
1208 | |||
1209 | if (dpu_enc->cur_master->ops.enable) | ||
1210 | dpu_enc->cur_master->ops.enable(dpu_enc->cur_master); | ||
1211 | |||
1212 | _dpu_encoder_virt_enable_helper(drm_enc); | 1176 | _dpu_encoder_virt_enable_helper(drm_enc); |
1213 | } | 1177 | } |
1214 | 1178 | ||
@@ -1266,8 +1230,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) | |||
1266 | dpu_enc->phys_encs[i]->connector = NULL; | 1230 | dpu_enc->phys_encs[i]->connector = NULL; |
1267 | } | 1231 | } |
1268 | 1232 | ||
1269 | dpu_enc->cur_master = NULL; | ||
1270 | |||
1271 | DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); | 1233 | DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); |
1272 | 1234 | ||
1273 | dpu_rm_release(&dpu_kms->rm, drm_enc); | 1235 | dpu_rm_release(&dpu_kms->rm, drm_enc); |
@@ -1397,9 +1359,9 @@ static void dpu_encoder_frame_done_callback( | |||
1397 | /* One of the physical encoders has become idle */ | 1359 | /* One of the physical encoders has become idle */ |
1398 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | 1360 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { |
1399 | if (dpu_enc->phys_encs[i] == ready_phys) { | 1361 | if (dpu_enc->phys_encs[i] == ready_phys) { |
1400 | clear_bit(i, dpu_enc->frame_busy_mask); | ||
1401 | trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i, | 1362 | trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i, |
1402 | dpu_enc->frame_busy_mask[0]); | 1363 | dpu_enc->frame_busy_mask[0]); |
1364 | clear_bit(i, dpu_enc->frame_busy_mask); | ||
1403 | } | 1365 | } |
1404 | } | 1366 | } |
1405 | 1367 | ||
@@ -1480,7 +1442,8 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, | |||
1480 | ret = ctl->ops.get_pending_flush(ctl); | 1442 | ret = ctl->ops.get_pending_flush(ctl); |
1481 | 1443 | ||
1482 | trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx, | 1444 | trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx, |
1483 | pending_kickoff_cnt, ctl->idx, ret); | 1445 | pending_kickoff_cnt, ctl->idx, |
1446 | extra_flush_bits, ret); | ||
1484 | } | 1447 | } |
1485 | 1448 | ||
1486 | /** | 1449 | /** |
@@ -1879,7 +1842,7 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc) | |||
1879 | phys->ops.handle_post_kickoff(phys); | 1842 | phys->ops.handle_post_kickoff(phys); |
1880 | } | 1843 | } |
1881 | 1844 | ||
1882 | if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI && | 1845 | if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI && |
1883 | !_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) { | 1846 | !_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) { |
1884 | trace_dpu_enc_early_kickoff(DRMID(drm_enc), | 1847 | trace_dpu_enc_early_kickoff(DRMID(drm_enc), |
1885 | ktime_to_ms(wakeup_time)); | 1848 | ktime_to_ms(wakeup_time)); |
@@ -1955,113 +1918,6 @@ static int _dpu_encoder_debugfs_status_open(struct inode *inode, | |||
1955 | return single_open(file, _dpu_encoder_status_show, inode->i_private); | 1918 | return single_open(file, _dpu_encoder_status_show, inode->i_private); |
1956 | } | 1919 | } |
1957 | 1920 | ||
1958 | static ssize_t _dpu_encoder_misr_setup(struct file *file, | ||
1959 | const char __user *user_buf, size_t count, loff_t *ppos) | ||
1960 | { | ||
1961 | struct dpu_encoder_virt *dpu_enc; | ||
1962 | int i = 0, rc; | ||
1963 | char buf[MISR_BUFF_SIZE + 1]; | ||
1964 | size_t buff_copy; | ||
1965 | u32 frame_count, enable; | ||
1966 | |||
1967 | if (!file || !file->private_data) | ||
1968 | return -EINVAL; | ||
1969 | |||
1970 | dpu_enc = file->private_data; | ||
1971 | |||
1972 | buff_copy = min_t(size_t, count, MISR_BUFF_SIZE); | ||
1973 | if (copy_from_user(buf, user_buf, buff_copy)) | ||
1974 | return -EINVAL; | ||
1975 | |||
1976 | buf[buff_copy] = 0; /* end of string */ | ||
1977 | |||
1978 | if (sscanf(buf, "%u %u", &enable, &frame_count) != 2) | ||
1979 | return -EINVAL; | ||
1980 | |||
1981 | rc = _dpu_encoder_power_enable(dpu_enc, true); | ||
1982 | if (rc) | ||
1983 | return rc; | ||
1984 | |||
1985 | mutex_lock(&dpu_enc->enc_lock); | ||
1986 | dpu_enc->misr_enable = enable; | ||
1987 | dpu_enc->misr_frame_count = frame_count; | ||
1988 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | ||
1989 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | ||
1990 | |||
1991 | if (!phys || !phys->ops.setup_misr) | ||
1992 | continue; | ||
1993 | |||
1994 | phys->ops.setup_misr(phys, enable, frame_count); | ||
1995 | } | ||
1996 | mutex_unlock(&dpu_enc->enc_lock); | ||
1997 | _dpu_encoder_power_enable(dpu_enc, false); | ||
1998 | |||
1999 | return count; | ||
2000 | } | ||
2001 | |||
2002 | static ssize_t _dpu_encoder_misr_read(struct file *file, | ||
2003 | char __user *user_buff, size_t count, loff_t *ppos) | ||
2004 | { | ||
2005 | struct dpu_encoder_virt *dpu_enc; | ||
2006 | int i = 0, len = 0; | ||
2007 | char buf[MISR_BUFF_SIZE + 1] = {'\0'}; | ||
2008 | int rc; | ||
2009 | |||
2010 | if (*ppos) | ||
2011 | return 0; | ||
2012 | |||
2013 | if (!file || !file->private_data) | ||
2014 | return -EINVAL; | ||
2015 | |||
2016 | dpu_enc = file->private_data; | ||
2017 | |||
2018 | rc = _dpu_encoder_power_enable(dpu_enc, true); | ||
2019 | if (rc) | ||
2020 | return rc; | ||
2021 | |||
2022 | mutex_lock(&dpu_enc->enc_lock); | ||
2023 | if (!dpu_enc->misr_enable) { | ||
2024 | len += snprintf(buf + len, MISR_BUFF_SIZE - len, | ||
2025 | "disabled\n"); | ||
2026 | goto buff_check; | ||
2027 | } else if (dpu_enc->disp_info.capabilities & | ||
2028 | ~MSM_DISPLAY_CAP_VID_MODE) { | ||
2029 | len += snprintf(buf + len, MISR_BUFF_SIZE - len, | ||
2030 | "unsupported\n"); | ||
2031 | goto buff_check; | ||
2032 | } | ||
2033 | |||
2034 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | ||
2035 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | ||
2036 | |||
2037 | if (!phys || !phys->ops.collect_misr) | ||
2038 | continue; | ||
2039 | |||
2040 | len += snprintf(buf + len, MISR_BUFF_SIZE - len, | ||
2041 | "Intf idx:%d\n", phys->intf_idx - INTF_0); | ||
2042 | len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n", | ||
2043 | phys->ops.collect_misr(phys)); | ||
2044 | } | ||
2045 | |||
2046 | buff_check: | ||
2047 | if (count <= len) { | ||
2048 | len = 0; | ||
2049 | goto end; | ||
2050 | } | ||
2051 | |||
2052 | if (copy_to_user(user_buff, buf, len)) { | ||
2053 | len = -EFAULT; | ||
2054 | goto end; | ||
2055 | } | ||
2056 | |||
2057 | *ppos += len; /* increase offset */ | ||
2058 | |||
2059 | end: | ||
2060 | mutex_unlock(&dpu_enc->enc_lock); | ||
2061 | _dpu_encoder_power_enable(dpu_enc, false); | ||
2062 | return len; | ||
2063 | } | ||
2064 | |||
2065 | static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) | 1921 | static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) |
2066 | { | 1922 | { |
2067 | struct dpu_encoder_virt *dpu_enc; | 1923 | struct dpu_encoder_virt *dpu_enc; |
@@ -2076,12 +1932,6 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) | |||
2076 | .release = single_release, | 1932 | .release = single_release, |
2077 | }; | 1933 | }; |
2078 | 1934 | ||
2079 | static const struct file_operations debugfs_misr_fops = { | ||
2080 | .open = simple_open, | ||
2081 | .read = _dpu_encoder_misr_read, | ||
2082 | .write = _dpu_encoder_misr_setup, | ||
2083 | }; | ||
2084 | |||
2085 | char name[DPU_NAME_SIZE]; | 1935 | char name[DPU_NAME_SIZE]; |
2086 | 1936 | ||
2087 | if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) { | 1937 | if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) { |
@@ -2105,9 +1955,6 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) | |||
2105 | debugfs_create_file("status", 0600, | 1955 | debugfs_create_file("status", 0600, |
2106 | dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops); | 1956 | dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops); |
2107 | 1957 | ||
2108 | debugfs_create_file("misr_data", 0600, | ||
2109 | dpu_enc->debugfs_root, dpu_enc, &debugfs_misr_fops); | ||
2110 | |||
2111 | for (i = 0; i < dpu_enc->num_phys_encs; i++) | 1958 | for (i = 0; i < dpu_enc->num_phys_encs; i++) |
2112 | if (dpu_enc->phys_encs[i] && | 1959 | if (dpu_enc->phys_encs[i] && |
2113 | dpu_enc->phys_encs[i]->ops.late_register) | 1960 | dpu_enc->phys_encs[i]->ops.late_register) |
@@ -2195,6 +2042,11 @@ static int dpu_encoder_virt_add_phys_encs( | |||
2195 | ++dpu_enc->num_phys_encs; | 2042 | ++dpu_enc->num_phys_encs; |
2196 | } | 2043 | } |
2197 | 2044 | ||
2045 | if (params->split_role == ENC_ROLE_SLAVE) | ||
2046 | dpu_enc->cur_slave = enc; | ||
2047 | else | ||
2048 | dpu_enc->cur_master = enc; | ||
2049 | |||
2198 | return 0; | 2050 | return 0; |
2199 | } | 2051 | } |
2200 | 2052 | ||
@@ -2206,8 +2058,7 @@ static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = { | |||
2206 | 2058 | ||
2207 | static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, | 2059 | static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, |
2208 | struct dpu_kms *dpu_kms, | 2060 | struct dpu_kms *dpu_kms, |
2209 | struct msm_display_info *disp_info, | 2061 | struct msm_display_info *disp_info) |
2210 | int *drm_enc_mode) | ||
2211 | { | 2062 | { |
2212 | int ret = 0; | 2063 | int ret = 0; |
2213 | int i = 0; | 2064 | int i = 0; |
@@ -2220,6 +2071,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, | |||
2220 | return -EINVAL; | 2071 | return -EINVAL; |
2221 | } | 2072 | } |
2222 | 2073 | ||
2074 | dpu_enc->cur_master = NULL; | ||
2075 | |||
2223 | memset(&phys_params, 0, sizeof(phys_params)); | 2076 | memset(&phys_params, 0, sizeof(phys_params)); |
2224 | phys_params.dpu_kms = dpu_kms; | 2077 | phys_params.dpu_kms = dpu_kms; |
2225 | phys_params.parent = &dpu_enc->base; | 2078 | phys_params.parent = &dpu_enc->base; |
@@ -2228,24 +2081,17 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, | |||
2228 | 2081 | ||
2229 | DPU_DEBUG("\n"); | 2082 | DPU_DEBUG("\n"); |
2230 | 2083 | ||
2231 | if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) { | 2084 | switch (disp_info->intf_type) { |
2232 | *drm_enc_mode = DRM_MODE_ENCODER_DSI; | 2085 | case DRM_MODE_ENCODER_DSI: |
2233 | intf_type = INTF_DSI; | 2086 | intf_type = INTF_DSI; |
2234 | } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) { | 2087 | break; |
2235 | *drm_enc_mode = DRM_MODE_ENCODER_TMDS; | 2088 | default: |
2236 | intf_type = INTF_HDMI; | ||
2237 | } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) { | ||
2238 | *drm_enc_mode = DRM_MODE_ENCODER_TMDS; | ||
2239 | intf_type = INTF_DP; | ||
2240 | } else { | ||
2241 | DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n"); | 2089 | DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n"); |
2242 | return -EINVAL; | 2090 | return -EINVAL; |
2243 | } | 2091 | } |
2244 | 2092 | ||
2245 | WARN_ON(disp_info->num_of_h_tiles < 1); | 2093 | WARN_ON(disp_info->num_of_h_tiles < 1); |
2246 | 2094 | ||
2247 | dpu_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles; | ||
2248 | |||
2249 | DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles); | 2095 | DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles); |
2250 | 2096 | ||
2251 | if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) || | 2097 | if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) || |
@@ -2358,25 +2204,22 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, | |||
2358 | struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); | 2204 | struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); |
2359 | struct drm_encoder *drm_enc = NULL; | 2205 | struct drm_encoder *drm_enc = NULL; |
2360 | struct dpu_encoder_virt *dpu_enc = NULL; | 2206 | struct dpu_encoder_virt *dpu_enc = NULL; |
2361 | int drm_enc_mode = DRM_MODE_ENCODER_NONE; | ||
2362 | int ret = 0; | 2207 | int ret = 0; |
2363 | 2208 | ||
2364 | dpu_enc = to_dpu_encoder_virt(enc); | 2209 | dpu_enc = to_dpu_encoder_virt(enc); |
2365 | 2210 | ||
2366 | mutex_init(&dpu_enc->enc_lock); | 2211 | mutex_init(&dpu_enc->enc_lock); |
2367 | ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info, | 2212 | ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info); |
2368 | &drm_enc_mode); | ||
2369 | if (ret) | 2213 | if (ret) |
2370 | goto fail; | 2214 | goto fail; |
2371 | 2215 | ||
2372 | dpu_enc->cur_master = NULL; | ||
2373 | spin_lock_init(&dpu_enc->enc_spinlock); | 2216 | spin_lock_init(&dpu_enc->enc_spinlock); |
2374 | 2217 | ||
2375 | atomic_set(&dpu_enc->frame_done_timeout, 0); | 2218 | atomic_set(&dpu_enc->frame_done_timeout, 0); |
2376 | timer_setup(&dpu_enc->frame_done_timer, | 2219 | timer_setup(&dpu_enc->frame_done_timer, |
2377 | dpu_encoder_frame_done_timeout, 0); | 2220 | dpu_encoder_frame_done_timeout, 0); |
2378 | 2221 | ||
2379 | if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) | 2222 | if (disp_info->intf_type == DRM_MODE_ENCODER_DSI) |
2380 | timer_setup(&dpu_enc->vsync_event_timer, | 2223 | timer_setup(&dpu_enc->vsync_event_timer, |
2381 | dpu_encoder_vsync_event_handler, | 2224 | dpu_encoder_vsync_event_handler, |
2382 | 0); | 2225 | 0); |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h index 60f809fc7c13..9dbf38f446d9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h | |||
@@ -32,15 +32,9 @@ | |||
32 | /** | 32 | /** |
33 | * Encoder functions and data types | 33 | * Encoder functions and data types |
34 | * @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused | 34 | * @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused |
35 | * @needs_cdm: Encoder requests a CDM based on pixel format conversion needs | ||
36 | * @display_num_of_h_tiles: Number of horizontal tiles in case of split | ||
37 | * interface | ||
38 | * @topology: Topology of the display | ||
39 | */ | 35 | */ |
40 | struct dpu_encoder_hw_resources { | 36 | struct dpu_encoder_hw_resources { |
41 | enum dpu_intf_mode intfs[INTF_MAX]; | 37 | enum dpu_intf_mode intfs[INTF_MAX]; |
42 | bool needs_cdm; | ||
43 | u32 display_num_of_h_tiles; | ||
44 | }; | 38 | }; |
45 | 39 | ||
46 | /** | 40 | /** |
@@ -56,11 +50,9 @@ struct dpu_encoder_kickoff_params { | |||
56 | * dpu_encoder_get_hw_resources - Populate table of required hardware resources | 50 | * dpu_encoder_get_hw_resources - Populate table of required hardware resources |
57 | * @encoder: encoder pointer | 51 | * @encoder: encoder pointer |
58 | * @hw_res: resource table to populate with encoder required resources | 52 | * @hw_res: resource table to populate with encoder required resources |
59 | * @conn_state: report hw reqs based on this proposed connector state | ||
60 | */ | 53 | */ |
61 | void dpu_encoder_get_hw_resources(struct drm_encoder *encoder, | 54 | void dpu_encoder_get_hw_resources(struct drm_encoder *encoder, |
62 | struct dpu_encoder_hw_resources *hw_res, | 55 | struct dpu_encoder_hw_resources *hw_res); |
63 | struct drm_connector_state *conn_state); | ||
64 | 56 | ||
65 | /** | 57 | /** |
66 | * dpu_encoder_register_vblank_callback - provide callback to encoder that | 58 | * dpu_encoder_register_vblank_callback - provide callback to encoder that |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h index c7df8aad6613..964efcc757a4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h | |||
@@ -22,8 +22,8 @@ | |||
22 | #include "dpu_hw_pingpong.h" | 22 | #include "dpu_hw_pingpong.h" |
23 | #include "dpu_hw_ctl.h" | 23 | #include "dpu_hw_ctl.h" |
24 | #include "dpu_hw_top.h" | 24 | #include "dpu_hw_top.h" |
25 | #include "dpu_hw_cdm.h" | ||
26 | #include "dpu_encoder.h" | 25 | #include "dpu_encoder.h" |
26 | #include "dpu_crtc.h" | ||
27 | 27 | ||
28 | #define DPU_ENCODER_NAME_MAX 16 | 28 | #define DPU_ENCODER_NAME_MAX 16 |
29 | 29 | ||
@@ -114,8 +114,6 @@ struct dpu_encoder_virt_ops { | |||
114 | * @handle_post_kickoff: Do any work necessary post-kickoff work | 114 | * @handle_post_kickoff: Do any work necessary post-kickoff work |
115 | * @trigger_start: Process start event on physical encoder | 115 | * @trigger_start: Process start event on physical encoder |
116 | * @needs_single_flush: Whether encoder slaves need to be flushed | 116 | * @needs_single_flush: Whether encoder slaves need to be flushed |
117 | * @setup_misr: Sets up MISR, enable and disables based on sysfs | ||
118 | * @collect_misr: Collects MISR data on frame update | ||
119 | * @hw_reset: Issue HW recovery such as CTL reset and clear | 117 | * @hw_reset: Issue HW recovery such as CTL reset and clear |
120 | * DPU_ENC_ERR_NEEDS_HW_RESET state | 118 | * DPU_ENC_ERR_NEEDS_HW_RESET state |
121 | * @irq_control: Handler to enable/disable all the encoder IRQs | 119 | * @irq_control: Handler to enable/disable all the encoder IRQs |
@@ -143,8 +141,7 @@ struct dpu_encoder_phys_ops { | |||
143 | struct drm_connector_state *conn_state); | 141 | struct drm_connector_state *conn_state); |
144 | void (*destroy)(struct dpu_encoder_phys *encoder); | 142 | void (*destroy)(struct dpu_encoder_phys *encoder); |
145 | void (*get_hw_resources)(struct dpu_encoder_phys *encoder, | 143 | void (*get_hw_resources)(struct dpu_encoder_phys *encoder, |
146 | struct dpu_encoder_hw_resources *hw_res, | 144 | struct dpu_encoder_hw_resources *hw_res); |
147 | struct drm_connector_state *conn_state); | ||
148 | int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable); | 145 | int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable); |
149 | int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc); | 146 | int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc); |
150 | int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc); | 147 | int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc); |
@@ -154,10 +151,6 @@ struct dpu_encoder_phys_ops { | |||
154 | void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc); | 151 | void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc); |
155 | void (*trigger_start)(struct dpu_encoder_phys *phys_enc); | 152 | void (*trigger_start)(struct dpu_encoder_phys *phys_enc); |
156 | bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc); | 153 | bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc); |
157 | |||
158 | void (*setup_misr)(struct dpu_encoder_phys *phys_encs, | ||
159 | bool enable, u32 frame_count); | ||
160 | u32 (*collect_misr)(struct dpu_encoder_phys *phys_enc); | ||
161 | void (*hw_reset)(struct dpu_encoder_phys *phys_enc); | 154 | void (*hw_reset)(struct dpu_encoder_phys *phys_enc); |
162 | void (*irq_control)(struct dpu_encoder_phys *phys, bool enable); | 155 | void (*irq_control)(struct dpu_encoder_phys *phys, bool enable); |
163 | void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc); | 156 | void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc); |
@@ -210,8 +203,6 @@ struct dpu_encoder_irq { | |||
210 | * @parent_ops: Callbacks exposed by the parent to the phys_enc | 203 | * @parent_ops: Callbacks exposed by the parent to the phys_enc |
211 | * @hw_mdptop: Hardware interface to the top registers | 204 | * @hw_mdptop: Hardware interface to the top registers |
212 | * @hw_ctl: Hardware interface to the ctl registers | 205 | * @hw_ctl: Hardware interface to the ctl registers |
213 | * @hw_cdm: Hardware interface to the cdm registers | ||
214 | * @cdm_cfg: Chroma-down hardware configuration | ||
215 | * @hw_pp: Hardware interface to the ping pong registers | 206 | * @hw_pp: Hardware interface to the ping pong registers |
216 | * @dpu_kms: Pointer to the dpu_kms top level | 207 | * @dpu_kms: Pointer to the dpu_kms top level |
217 | * @cached_mode: DRM mode cached at mode_set time, acted on in enable | 208 | * @cached_mode: DRM mode cached at mode_set time, acted on in enable |
@@ -219,7 +210,6 @@ struct dpu_encoder_irq { | |||
219 | * @split_role: Role to play in a split-panel configuration | 210 | * @split_role: Role to play in a split-panel configuration |
220 | * @intf_mode: Interface mode | 211 | * @intf_mode: Interface mode |
221 | * @intf_idx: Interface index on dpu hardware | 212 | * @intf_idx: Interface index on dpu hardware |
222 | * @topology_name: topology selected for the display | ||
223 | * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes | 213 | * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes |
224 | * @enable_state: Enable state tracking | 214 | * @enable_state: Enable state tracking |
225 | * @vblank_refcount: Reference count of vblank request | 215 | * @vblank_refcount: Reference count of vblank request |
@@ -241,15 +231,12 @@ struct dpu_encoder_phys { | |||
241 | const struct dpu_encoder_virt_ops *parent_ops; | 231 | const struct dpu_encoder_virt_ops *parent_ops; |
242 | struct dpu_hw_mdp *hw_mdptop; | 232 | struct dpu_hw_mdp *hw_mdptop; |
243 | struct dpu_hw_ctl *hw_ctl; | 233 | struct dpu_hw_ctl *hw_ctl; |
244 | struct dpu_hw_cdm *hw_cdm; | ||
245 | struct dpu_hw_cdm_cfg cdm_cfg; | ||
246 | struct dpu_hw_pingpong *hw_pp; | 234 | struct dpu_hw_pingpong *hw_pp; |
247 | struct dpu_kms *dpu_kms; | 235 | struct dpu_kms *dpu_kms; |
248 | struct drm_display_mode cached_mode; | 236 | struct drm_display_mode cached_mode; |
249 | enum dpu_enc_split_role split_role; | 237 | enum dpu_enc_split_role split_role; |
250 | enum dpu_intf_mode intf_mode; | 238 | enum dpu_intf_mode intf_mode; |
251 | enum dpu_intf intf_idx; | 239 | enum dpu_intf intf_idx; |
252 | enum dpu_rm_topology_name topology_name; | ||
253 | spinlock_t *enc_spinlock; | 240 | spinlock_t *enc_spinlock; |
254 | enum dpu_enc_enable_state enable_state; | 241 | enum dpu_enc_enable_state enable_state; |
255 | atomic_t vblank_refcount; | 242 | atomic_t vblank_refcount; |
@@ -367,11 +354,15 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc); | |||
367 | static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode( | 354 | static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode( |
368 | struct dpu_encoder_phys *phys_enc) | 355 | struct dpu_encoder_phys *phys_enc) |
369 | { | 356 | { |
357 | struct dpu_crtc_state *dpu_cstate; | ||
358 | |||
370 | if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING) | 359 | if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING) |
371 | return BLEND_3D_NONE; | 360 | return BLEND_3D_NONE; |
372 | 361 | ||
362 | dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state); | ||
363 | |||
373 | if (phys_enc->split_role == ENC_ROLE_SOLO && | 364 | if (phys_enc->split_role == ENC_ROLE_SOLO && |
374 | phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE) | 365 | dpu_crtc_state_is_stereo(dpu_cstate)) |
375 | return BLEND_3D_H_ROW_INT; | 366 | return BLEND_3D_H_ROW_INT; |
376 | 367 | ||
377 | return BLEND_3D_NONE; | 368 | return BLEND_3D_NONE; |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 3084675ed425..b2d7f0ded24c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c | |||
@@ -196,9 +196,6 @@ static void dpu_encoder_phys_cmd_mode_set( | |||
196 | { | 196 | { |
197 | struct dpu_encoder_phys_cmd *cmd_enc = | 197 | struct dpu_encoder_phys_cmd *cmd_enc = |
198 | to_dpu_encoder_phys_cmd(phys_enc); | 198 | to_dpu_encoder_phys_cmd(phys_enc); |
199 | struct dpu_rm *rm = &phys_enc->dpu_kms->rm; | ||
200 | struct dpu_rm_hw_iter iter; | ||
201 | int i, instance; | ||
202 | 199 | ||
203 | if (!phys_enc || !mode || !adj_mode) { | 200 | if (!phys_enc || !mode || !adj_mode) { |
204 | DPU_ERROR("invalid args\n"); | 201 | DPU_ERROR("invalid args\n"); |
@@ -208,22 +205,6 @@ static void dpu_encoder_phys_cmd_mode_set( | |||
208 | DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n"); | 205 | DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n"); |
209 | drm_mode_debug_printmodeline(adj_mode); | 206 | drm_mode_debug_printmodeline(adj_mode); |
210 | 207 | ||
211 | instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0; | ||
212 | |||
213 | /* Retrieve previously allocated HW Resources. Shouldn't fail */ | ||
214 | dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL); | ||
215 | for (i = 0; i <= instance; i++) { | ||
216 | if (dpu_rm_get_hw(rm, &iter)) | ||
217 | phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw; | ||
218 | } | ||
219 | |||
220 | if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) { | ||
221 | DPU_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n", | ||
222 | PTR_ERR(phys_enc->hw_ctl)); | ||
223 | phys_enc->hw_ctl = NULL; | ||
224 | return; | ||
225 | } | ||
226 | |||
227 | _dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc); | 208 | _dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc); |
228 | } | 209 | } |
229 | 210 | ||
@@ -618,23 +599,8 @@ static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc) | |||
618 | 599 | ||
619 | static void dpu_encoder_phys_cmd_get_hw_resources( | 600 | static void dpu_encoder_phys_cmd_get_hw_resources( |
620 | struct dpu_encoder_phys *phys_enc, | 601 | struct dpu_encoder_phys *phys_enc, |
621 | struct dpu_encoder_hw_resources *hw_res, | 602 | struct dpu_encoder_hw_resources *hw_res) |
622 | struct drm_connector_state *conn_state) | ||
623 | { | 603 | { |
624 | struct dpu_encoder_phys_cmd *cmd_enc = | ||
625 | to_dpu_encoder_phys_cmd(phys_enc); | ||
626 | |||
627 | if (!phys_enc) { | ||
628 | DPU_ERROR("invalid encoder\n"); | ||
629 | return; | ||
630 | } | ||
631 | |||
632 | if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) { | ||
633 | DPU_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx); | ||
634 | return; | ||
635 | } | ||
636 | |||
637 | DPU_DEBUG_CMDENC(cmd_enc, "\n"); | ||
638 | hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD; | 604 | hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD; |
639 | } | 605 | } |
640 | 606 | ||
@@ -823,7 +789,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( | |||
823 | { | 789 | { |
824 | struct dpu_encoder_phys *phys_enc = NULL; | 790 | struct dpu_encoder_phys *phys_enc = NULL; |
825 | struct dpu_encoder_phys_cmd *cmd_enc = NULL; | 791 | struct dpu_encoder_phys_cmd *cmd_enc = NULL; |
826 | struct dpu_hw_mdp *hw_mdp; | ||
827 | struct dpu_encoder_irq *irq; | 792 | struct dpu_encoder_irq *irq; |
828 | int i, ret = 0; | 793 | int i, ret = 0; |
829 | 794 | ||
@@ -836,14 +801,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( | |||
836 | goto fail; | 801 | goto fail; |
837 | } | 802 | } |
838 | phys_enc = &cmd_enc->base; | 803 | phys_enc = &cmd_enc->base; |
839 | 804 | phys_enc->hw_mdptop = p->dpu_kms->hw_mdp; | |
840 | hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm); | ||
841 | if (IS_ERR_OR_NULL(hw_mdp)) { | ||
842 | ret = PTR_ERR(hw_mdp); | ||
843 | DPU_ERROR("failed to get mdptop\n"); | ||
844 | goto fail_mdp_init; | ||
845 | } | ||
846 | phys_enc->hw_mdptop = hw_mdp; | ||
847 | phys_enc->intf_idx = p->intf_idx; | 805 | phys_enc->intf_idx = p->intf_idx; |
848 | 806 | ||
849 | dpu_encoder_phys_cmd_init_ops(&phys_enc->ops); | 807 | dpu_encoder_phys_cmd_init_ops(&phys_enc->ops); |
@@ -898,8 +856,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( | |||
898 | 856 | ||
899 | return phys_enc; | 857 | return phys_enc; |
900 | 858 | ||
901 | fail_mdp_init: | ||
902 | kfree(cmd_enc); | ||
903 | fail: | 859 | fail: |
904 | return ERR_PTR(ret); | 860 | return ERR_PTR(ret); |
905 | } | 861 | } |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index 14fc7c2a6bb7..84de385a9f62 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c | |||
@@ -355,13 +355,14 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx) | |||
355 | 355 | ||
356 | static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc) | 356 | static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc) |
357 | { | 357 | { |
358 | struct dpu_crtc_state *dpu_cstate; | ||
359 | |||
358 | if (!phys_enc) | 360 | if (!phys_enc) |
359 | return false; | 361 | return false; |
360 | 362 | ||
361 | if (phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE) | 363 | dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state); |
362 | return true; | ||
363 | 364 | ||
364 | return false; | 365 | return dpu_cstate->num_ctls > 1; |
365 | } | 366 | } |
366 | 367 | ||
367 | static bool dpu_encoder_phys_vid_needs_single_flush( | 368 | static bool dpu_encoder_phys_vid_needs_single_flush( |
@@ -395,9 +396,6 @@ static void dpu_encoder_phys_vid_mode_set( | |||
395 | struct drm_display_mode *mode, | 396 | struct drm_display_mode *mode, |
396 | struct drm_display_mode *adj_mode) | 397 | struct drm_display_mode *adj_mode) |
397 | { | 398 | { |
398 | struct dpu_rm *rm; | ||
399 | struct dpu_rm_hw_iter iter; | ||
400 | int i, instance; | ||
401 | struct dpu_encoder_phys_vid *vid_enc; | 399 | struct dpu_encoder_phys_vid *vid_enc; |
402 | 400 | ||
403 | if (!phys_enc || !phys_enc->dpu_kms) { | 401 | if (!phys_enc || !phys_enc->dpu_kms) { |
@@ -405,7 +403,6 @@ static void dpu_encoder_phys_vid_mode_set( | |||
405 | return; | 403 | return; |
406 | } | 404 | } |
407 | 405 | ||
408 | rm = &phys_enc->dpu_kms->rm; | ||
409 | vid_enc = to_dpu_encoder_phys_vid(phys_enc); | 406 | vid_enc = to_dpu_encoder_phys_vid(phys_enc); |
410 | 407 | ||
411 | if (adj_mode) { | 408 | if (adj_mode) { |
@@ -414,21 +411,6 @@ static void dpu_encoder_phys_vid_mode_set( | |||
414 | DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n"); | 411 | DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n"); |
415 | } | 412 | } |
416 | 413 | ||
417 | instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0; | ||
418 | |||
419 | /* Retrieve previously allocated HW Resources. Shouldn't fail */ | ||
420 | dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL); | ||
421 | for (i = 0; i <= instance; i++) { | ||
422 | if (dpu_rm_get_hw(rm, &iter)) | ||
423 | phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw; | ||
424 | } | ||
425 | if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) { | ||
426 | DPU_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n", | ||
427 | PTR_ERR(phys_enc->hw_ctl)); | ||
428 | phys_enc->hw_ctl = NULL; | ||
429 | return; | ||
430 | } | ||
431 | |||
432 | _dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc); | 414 | _dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc); |
433 | } | 415 | } |
434 | 416 | ||
@@ -481,7 +463,7 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) | |||
481 | { | 463 | { |
482 | struct msm_drm_private *priv; | 464 | struct msm_drm_private *priv; |
483 | struct dpu_encoder_phys_vid *vid_enc; | 465 | struct dpu_encoder_phys_vid *vid_enc; |
484 | struct dpu_hw_intf *intf; | 466 | struct dpu_rm_hw_iter iter; |
485 | struct dpu_hw_ctl *ctl; | 467 | struct dpu_hw_ctl *ctl; |
486 | u32 flush_mask = 0; | 468 | u32 flush_mask = 0; |
487 | 469 | ||
@@ -493,11 +475,20 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) | |||
493 | priv = phys_enc->parent->dev->dev_private; | 475 | priv = phys_enc->parent->dev->dev_private; |
494 | 476 | ||
495 | vid_enc = to_dpu_encoder_phys_vid(phys_enc); | 477 | vid_enc = to_dpu_encoder_phys_vid(phys_enc); |
496 | intf = vid_enc->hw_intf; | ||
497 | ctl = phys_enc->hw_ctl; | 478 | ctl = phys_enc->hw_ctl; |
498 | if (!vid_enc->hw_intf || !phys_enc->hw_ctl) { | 479 | |
499 | DPU_ERROR("invalid hw_intf %d hw_ctl %d\n", | 480 | dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_INTF); |
500 | vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0); | 481 | while (dpu_rm_get_hw(&phys_enc->dpu_kms->rm, &iter)) { |
482 | struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw; | ||
483 | |||
484 | if (hw_intf->idx == phys_enc->intf_idx) { | ||
485 | vid_enc->hw_intf = hw_intf; | ||
486 | break; | ||
487 | } | ||
488 | } | ||
489 | |||
490 | if (!vid_enc->hw_intf) { | ||
491 | DPU_ERROR("hw_intf not assigned\n"); | ||
501 | return; | 492 | return; |
502 | } | 493 | } |
503 | 494 | ||
@@ -519,7 +510,7 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) | |||
519 | !dpu_encoder_phys_vid_is_master(phys_enc)) | 510 | !dpu_encoder_phys_vid_is_master(phys_enc)) |
520 | goto skip_flush; | 511 | goto skip_flush; |
521 | 512 | ||
522 | ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx); | 513 | ctl->ops.get_bitmask_intf(ctl, &flush_mask, vid_enc->hw_intf->idx); |
523 | ctl->ops.update_pending_flush(ctl, flush_mask); | 514 | ctl->ops.update_pending_flush(ctl, flush_mask); |
524 | 515 | ||
525 | skip_flush: | 516 | skip_flush: |
@@ -547,25 +538,9 @@ static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc) | |||
547 | 538 | ||
548 | static void dpu_encoder_phys_vid_get_hw_resources( | 539 | static void dpu_encoder_phys_vid_get_hw_resources( |
549 | struct dpu_encoder_phys *phys_enc, | 540 | struct dpu_encoder_phys *phys_enc, |
550 | struct dpu_encoder_hw_resources *hw_res, | 541 | struct dpu_encoder_hw_resources *hw_res) |
551 | struct drm_connector_state *conn_state) | ||
552 | { | 542 | { |
553 | struct dpu_encoder_phys_vid *vid_enc; | 543 | hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_VIDEO; |
554 | |||
555 | if (!phys_enc || !hw_res) { | ||
556 | DPU_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n", | ||
557 | phys_enc != 0, hw_res != 0, conn_state != 0); | ||
558 | return; | ||
559 | } | ||
560 | |||
561 | vid_enc = to_dpu_encoder_phys_vid(phys_enc); | ||
562 | if (!vid_enc->hw_intf) { | ||
563 | DPU_ERROR("invalid arg(s), hw_intf\n"); | ||
564 | return; | ||
565 | } | ||
566 | |||
567 | DPU_DEBUG_VIDENC(vid_enc, "\n"); | ||
568 | hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO; | ||
569 | } | 544 | } |
570 | 545 | ||
571 | static int _dpu_encoder_phys_vid_wait_for_vblank( | 546 | static int _dpu_encoder_phys_vid_wait_for_vblank( |
@@ -756,32 +731,6 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc, | |||
756 | } | 731 | } |
757 | } | 732 | } |
758 | 733 | ||
759 | static void dpu_encoder_phys_vid_setup_misr(struct dpu_encoder_phys *phys_enc, | ||
760 | bool enable, u32 frame_count) | ||
761 | { | ||
762 | struct dpu_encoder_phys_vid *vid_enc; | ||
763 | |||
764 | if (!phys_enc) | ||
765 | return; | ||
766 | vid_enc = to_dpu_encoder_phys_vid(phys_enc); | ||
767 | |||
768 | if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr) | ||
769 | vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf, | ||
770 | enable, frame_count); | ||
771 | } | ||
772 | |||
773 | static u32 dpu_encoder_phys_vid_collect_misr(struct dpu_encoder_phys *phys_enc) | ||
774 | { | ||
775 | struct dpu_encoder_phys_vid *vid_enc; | ||
776 | |||
777 | if (!phys_enc) | ||
778 | return 0; | ||
779 | vid_enc = to_dpu_encoder_phys_vid(phys_enc); | ||
780 | |||
781 | return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ? | ||
782 | vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0; | ||
783 | } | ||
784 | |||
785 | static int dpu_encoder_phys_vid_get_line_count( | 734 | static int dpu_encoder_phys_vid_get_line_count( |
786 | struct dpu_encoder_phys *phys_enc) | 735 | struct dpu_encoder_phys *phys_enc) |
787 | { | 736 | { |
@@ -817,8 +766,6 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops) | |||
817 | ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff; | 766 | ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff; |
818 | ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff; | 767 | ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff; |
819 | ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush; | 768 | ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush; |
820 | ops->setup_misr = dpu_encoder_phys_vid_setup_misr; | ||
821 | ops->collect_misr = dpu_encoder_phys_vid_collect_misr; | ||
822 | ops->hw_reset = dpu_encoder_helper_hw_reset; | 769 | ops->hw_reset = dpu_encoder_helper_hw_reset; |
823 | ops->get_line_count = dpu_encoder_phys_vid_get_line_count; | 770 | ops->get_line_count = dpu_encoder_phys_vid_get_line_count; |
824 | } | 771 | } |
@@ -828,8 +775,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init( | |||
828 | { | 775 | { |
829 | struct dpu_encoder_phys *phys_enc = NULL; | 776 | struct dpu_encoder_phys *phys_enc = NULL; |
830 | struct dpu_encoder_phys_vid *vid_enc = NULL; | 777 | struct dpu_encoder_phys_vid *vid_enc = NULL; |
831 | struct dpu_rm_hw_iter iter; | ||
832 | struct dpu_hw_mdp *hw_mdp; | ||
833 | struct dpu_encoder_irq *irq; | 778 | struct dpu_encoder_irq *irq; |
834 | int i, ret = 0; | 779 | int i, ret = 0; |
835 | 780 | ||
@@ -846,35 +791,9 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init( | |||
846 | 791 | ||
847 | phys_enc = &vid_enc->base; | 792 | phys_enc = &vid_enc->base; |
848 | 793 | ||
849 | hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm); | 794 | phys_enc->hw_mdptop = p->dpu_kms->hw_mdp; |
850 | if (IS_ERR_OR_NULL(hw_mdp)) { | ||
851 | ret = PTR_ERR(hw_mdp); | ||
852 | DPU_ERROR("failed to get mdptop\n"); | ||
853 | goto fail; | ||
854 | } | ||
855 | phys_enc->hw_mdptop = hw_mdp; | ||
856 | phys_enc->intf_idx = p->intf_idx; | 795 | phys_enc->intf_idx = p->intf_idx; |
857 | 796 | ||
858 | /** | ||
859 | * hw_intf resource permanently assigned to this encoder | ||
860 | * Other resources allocated at atomic commit time by use case | ||
861 | */ | ||
862 | dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF); | ||
863 | while (dpu_rm_get_hw(&p->dpu_kms->rm, &iter)) { | ||
864 | struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw; | ||
865 | |||
866 | if (hw_intf->idx == p->intf_idx) { | ||
867 | vid_enc->hw_intf = hw_intf; | ||
868 | break; | ||
869 | } | ||
870 | } | ||
871 | |||
872 | if (!vid_enc->hw_intf) { | ||
873 | ret = -EINVAL; | ||
874 | DPU_ERROR("failed to get hw_intf\n"); | ||
875 | goto fail; | ||
876 | } | ||
877 | |||
878 | DPU_DEBUG_VIDENC(vid_enc, "\n"); | 797 | DPU_DEBUG_VIDENC(vid_enc, "\n"); |
879 | 798 | ||
880 | dpu_encoder_phys_vid_init_ops(&phys_enc->ops); | 799 | dpu_encoder_phys_vid_init_ops(&phys_enc->ops); |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 44ee06398b1d..512ac0834d2b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c | |||
@@ -29,6 +29,9 @@ | |||
29 | BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\ | 29 | BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\ |
30 | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT)) | 30 | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT)) |
31 | 31 | ||
32 | #define DMA_CURSOR_SDM845_MASK \ | ||
33 | (DMA_SDM845_MASK | BIT(DPU_SSPP_CURSOR)) | ||
34 | |||
32 | #define MIXER_SDM845_MASK \ | 35 | #define MIXER_SDM845_MASK \ |
33 | (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER)) | 36 | (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER)) |
34 | 37 | ||
@@ -71,7 +74,6 @@ static struct dpu_mdp_cfg sdm845_mdp[] = { | |||
71 | .base = 0x0, .len = 0x45C, | 74 | .base = 0x0, .len = 0x45C, |
72 | .features = 0, | 75 | .features = 0, |
73 | .highest_bank_bit = 0x2, | 76 | .highest_bank_bit = 0x2, |
74 | .has_dest_scaler = true, | ||
75 | .clk_ctrls[DPU_CLK_CTRL_VIG0] = { | 77 | .clk_ctrls[DPU_CLK_CTRL_VIG0] = { |
76 | .reg_off = 0x2AC, .bit_off = 0}, | 78 | .reg_off = 0x2AC, .bit_off = 0}, |
77 | .clk_ctrls[DPU_CLK_CTRL_VIG1] = { | 79 | .clk_ctrls[DPU_CLK_CTRL_VIG1] = { |
@@ -174,45 +176,35 @@ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2); | |||
174 | static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3); | 176 | static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3); |
175 | static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4); | 177 | static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4); |
176 | 178 | ||
177 | #define SSPP_VIG_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \ | 179 | #define SSPP_BLK(_name, _id, _base, _features, \ |
178 | { \ | 180 | _sblk, _xinid, _type, _clkctrl) \ |
179 | .name = _name, .id = _id, \ | ||
180 | .base = _base, .len = 0x1c8, \ | ||
181 | .features = VIG_SDM845_MASK, \ | ||
182 | .sblk = &_sblk, \ | ||
183 | .xin_id = _xinid, \ | ||
184 | .type = SSPP_TYPE_VIG, \ | ||
185 | .clk_ctrl = _clkctrl \ | ||
186 | } | ||
187 | |||
188 | #define SSPP_DMA_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \ | ||
189 | { \ | 181 | { \ |
190 | .name = _name, .id = _id, \ | 182 | .name = _name, .id = _id, \ |
191 | .base = _base, .len = 0x1c8, \ | 183 | .base = _base, .len = 0x1c8, \ |
192 | .features = DMA_SDM845_MASK, \ | 184 | .features = _features, \ |
193 | .sblk = &_sblk, \ | 185 | .sblk = &_sblk, \ |
194 | .xin_id = _xinid, \ | 186 | .xin_id = _xinid, \ |
195 | .type = SSPP_TYPE_DMA, \ | 187 | .type = _type, \ |
196 | .clk_ctrl = _clkctrl \ | 188 | .clk_ctrl = _clkctrl \ |
197 | } | 189 | } |
198 | 190 | ||
199 | static struct dpu_sspp_cfg sdm845_sspp[] = { | 191 | static struct dpu_sspp_cfg sdm845_sspp[] = { |
200 | SSPP_VIG_BLK("sspp_0", SSPP_VIG0, 0x4000, | 192 | SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SDM845_MASK, |
201 | sdm845_vig_sblk_0, 0, DPU_CLK_CTRL_VIG0), | 193 | sdm845_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0), |
202 | SSPP_VIG_BLK("sspp_1", SSPP_VIG1, 0x6000, | 194 | SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SDM845_MASK, |
203 | sdm845_vig_sblk_1, 4, DPU_CLK_CTRL_VIG1), | 195 | sdm845_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1), |
204 | SSPP_VIG_BLK("sspp_2", SSPP_VIG2, 0x8000, | 196 | SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SDM845_MASK, |
205 | sdm845_vig_sblk_2, 8, DPU_CLK_CTRL_VIG2), | 197 | sdm845_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2), |
206 | SSPP_VIG_BLK("sspp_3", SSPP_VIG3, 0xa000, | 198 | SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SDM845_MASK, |
207 | sdm845_vig_sblk_3, 12, DPU_CLK_CTRL_VIG3), | 199 | sdm845_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3), |
208 | SSPP_DMA_BLK("sspp_8", SSPP_DMA0, 0x24000, | 200 | SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK, |
209 | sdm845_dma_sblk_0, 1, DPU_CLK_CTRL_DMA0), | 201 | sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0), |
210 | SSPP_DMA_BLK("sspp_9", SSPP_DMA1, 0x26000, | 202 | SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK, |
211 | sdm845_dma_sblk_1, 5, DPU_CLK_CTRL_DMA1), | 203 | sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1), |
212 | SSPP_DMA_BLK("sspp_10", SSPP_DMA2, 0x28000, | 204 | SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK, |
213 | sdm845_dma_sblk_2, 9, DPU_CLK_CTRL_CURSOR0), | 205 | sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), |
214 | SSPP_DMA_BLK("sspp_11", SSPP_DMA3, 0x2a000, | 206 | SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK, |
215 | sdm845_dma_sblk_3, 13, DPU_CLK_CTRL_CURSOR1), | 207 | sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), |
216 | }; | 208 | }; |
217 | 209 | ||
218 | /************************************************************* | 210 | /************************************************************* |
@@ -227,48 +219,23 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = { | |||
227 | }, | 219 | }, |
228 | }; | 220 | }; |
229 | 221 | ||
230 | #define LM_BLK(_name, _id, _base, _ds, _pp, _lmpair) \ | 222 | #define LM_BLK(_name, _id, _base, _pp, _lmpair) \ |
231 | { \ | 223 | { \ |
232 | .name = _name, .id = _id, \ | 224 | .name = _name, .id = _id, \ |
233 | .base = _base, .len = 0x320, \ | 225 | .base = _base, .len = 0x320, \ |
234 | .features = MIXER_SDM845_MASK, \ | 226 | .features = MIXER_SDM845_MASK, \ |
235 | .sblk = &sdm845_lm_sblk, \ | 227 | .sblk = &sdm845_lm_sblk, \ |
236 | .ds = _ds, \ | ||
237 | .pingpong = _pp, \ | 228 | .pingpong = _pp, \ |
238 | .lm_pair_mask = (1 << _lmpair) \ | 229 | .lm_pair_mask = (1 << _lmpair) \ |
239 | } | 230 | } |
240 | 231 | ||
241 | static struct dpu_lm_cfg sdm845_lm[] = { | 232 | static struct dpu_lm_cfg sdm845_lm[] = { |
242 | LM_BLK("lm_0", LM_0, 0x44000, DS_0, PINGPONG_0, LM_1), | 233 | LM_BLK("lm_0", LM_0, 0x44000, PINGPONG_0, LM_1), |
243 | LM_BLK("lm_1", LM_1, 0x45000, DS_1, PINGPONG_1, LM_0), | 234 | LM_BLK("lm_1", LM_1, 0x45000, PINGPONG_1, LM_0), |
244 | LM_BLK("lm_2", LM_2, 0x46000, DS_MAX, PINGPONG_2, LM_5), | 235 | LM_BLK("lm_2", LM_2, 0x46000, PINGPONG_2, LM_5), |
245 | LM_BLK("lm_3", LM_3, 0x0, DS_MAX, PINGPONG_MAX, 0), | 236 | LM_BLK("lm_3", LM_3, 0x0, PINGPONG_MAX, 0), |
246 | LM_BLK("lm_4", LM_4, 0x0, DS_MAX, PINGPONG_MAX, 0), | 237 | LM_BLK("lm_4", LM_4, 0x0, PINGPONG_MAX, 0), |
247 | LM_BLK("lm_5", LM_5, 0x49000, DS_MAX, PINGPONG_3, LM_2), | 238 | LM_BLK("lm_5", LM_5, 0x49000, PINGPONG_3, LM_2), |
248 | }; | ||
249 | |||
250 | /************************************************************* | ||
251 | * DS sub blocks config | ||
252 | *************************************************************/ | ||
253 | static const struct dpu_ds_top_cfg sdm845_ds_top = { | ||
254 | .name = "ds_top_0", .id = DS_TOP, | ||
255 | .base = 0x60000, .len = 0xc, | ||
256 | .maxinputwidth = DEFAULT_DPU_LINE_WIDTH, | ||
257 | .maxoutputwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, | ||
258 | .maxupscale = MAX_UPSCALE_RATIO, | ||
259 | }; | ||
260 | |||
261 | #define DS_BLK(_name, _id, _base) \ | ||
262 | {\ | ||
263 | .name = _name, .id = _id, \ | ||
264 | .base = _base, .len = 0x800, \ | ||
265 | .features = DPU_SSPP_SCALER_QSEED3, \ | ||
266 | .top = &sdm845_ds_top \ | ||
267 | } | ||
268 | |||
269 | static struct dpu_ds_cfg sdm845_ds[] = { | ||
270 | DS_BLK("ds_0", DS_0, 0x800), | ||
271 | DS_BLK("ds_1", DS_1, 0x1000), | ||
272 | }; | 239 | }; |
273 | 240 | ||
274 | /************************************************************* | 241 | /************************************************************* |
@@ -328,18 +295,6 @@ static struct dpu_intf_cfg sdm845_intf[] = { | |||
328 | }; | 295 | }; |
329 | 296 | ||
330 | /************************************************************* | 297 | /************************************************************* |
331 | * CDM sub blocks config | ||
332 | *************************************************************/ | ||
333 | static struct dpu_cdm_cfg sdm845_cdm[] = { | ||
334 | { | ||
335 | .name = "cdm_0", .id = CDM_0, | ||
336 | .base = 0x79200, .len = 0x224, | ||
337 | .features = 0, | ||
338 | .intf_connect = BIT(INTF_3), | ||
339 | }, | ||
340 | }; | ||
341 | |||
342 | /************************************************************* | ||
343 | * VBIF sub blocks config | 298 | * VBIF sub blocks config |
344 | *************************************************************/ | 299 | *************************************************************/ |
345 | /* VBIF QOS remap */ | 300 | /* VBIF QOS remap */ |
@@ -461,12 +416,8 @@ static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg) | |||
461 | .sspp = sdm845_sspp, | 416 | .sspp = sdm845_sspp, |
462 | .mixer_count = ARRAY_SIZE(sdm845_lm), | 417 | .mixer_count = ARRAY_SIZE(sdm845_lm), |
463 | .mixer = sdm845_lm, | 418 | .mixer = sdm845_lm, |
464 | .ds_count = ARRAY_SIZE(sdm845_ds), | ||
465 | .ds = sdm845_ds, | ||
466 | .pingpong_count = ARRAY_SIZE(sdm845_pp), | 419 | .pingpong_count = ARRAY_SIZE(sdm845_pp), |
467 | .pingpong = sdm845_pp, | 420 | .pingpong = sdm845_pp, |
468 | .cdm_count = ARRAY_SIZE(sdm845_cdm), | ||
469 | .cdm = sdm845_cdm, | ||
470 | .intf_count = ARRAY_SIZE(sdm845_intf), | 421 | .intf_count = ARRAY_SIZE(sdm845_intf), |
471 | .intf = sdm845_intf, | 422 | .intf = sdm845_intf, |
472 | .vbif_count = ARRAY_SIZE(sdm845_vbif), | 423 | .vbif_count = ARRAY_SIZE(sdm845_vbif), |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index f0cb0d4fc80e..dc060e7358e4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h | |||
@@ -428,7 +428,6 @@ struct dpu_clk_ctrl_reg { | |||
428 | * @highest_bank_bit: UBWC parameter | 428 | * @highest_bank_bit: UBWC parameter |
429 | * @ubwc_static: ubwc static configuration | 429 | * @ubwc_static: ubwc static configuration |
430 | * @ubwc_swizzle: ubwc default swizzle setting | 430 | * @ubwc_swizzle: ubwc default swizzle setting |
431 | * @has_dest_scaler: indicates support of destination scaler | ||
432 | * @clk_ctrls clock control register definition | 431 | * @clk_ctrls clock control register definition |
433 | */ | 432 | */ |
434 | struct dpu_mdp_cfg { | 433 | struct dpu_mdp_cfg { |
@@ -436,7 +435,6 @@ struct dpu_mdp_cfg { | |||
436 | u32 highest_bank_bit; | 435 | u32 highest_bank_bit; |
437 | u32 ubwc_static; | 436 | u32 ubwc_static; |
438 | u32 ubwc_swizzle; | 437 | u32 ubwc_swizzle; |
439 | bool has_dest_scaler; | ||
440 | struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX]; | 438 | struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX]; |
441 | }; | 439 | }; |
442 | 440 | ||
@@ -474,50 +472,16 @@ struct dpu_sspp_cfg { | |||
474 | * @features bit mask identifying sub-blocks/features | 472 | * @features bit mask identifying sub-blocks/features |
475 | * @sblk: LM Sub-blocks information | 473 | * @sblk: LM Sub-blocks information |
476 | * @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported | 474 | * @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported |
477 | * @ds: ID of connected DS, DS_MAX if unsupported | ||
478 | * @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL | 475 | * @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL |
479 | */ | 476 | */ |
480 | struct dpu_lm_cfg { | 477 | struct dpu_lm_cfg { |
481 | DPU_HW_BLK_INFO; | 478 | DPU_HW_BLK_INFO; |
482 | const struct dpu_lm_sub_blks *sblk; | 479 | const struct dpu_lm_sub_blks *sblk; |
483 | u32 pingpong; | 480 | u32 pingpong; |
484 | u32 ds; | ||
485 | unsigned long lm_pair_mask; | 481 | unsigned long lm_pair_mask; |
486 | }; | 482 | }; |
487 | 483 | ||
488 | /** | 484 | /** |
489 | * struct dpu_ds_top_cfg - information of dest scaler top | ||
490 | * @id enum identifying this block | ||
491 | * @base register offset of this block | ||
492 | * @features bit mask identifying features | ||
493 | * @version hw version of dest scaler | ||
494 | * @maxinputwidth maximum input line width | ||
495 | * @maxoutputwidth maximum output line width | ||
496 | * @maxupscale maximum upscale ratio | ||
497 | */ | ||
498 | struct dpu_ds_top_cfg { | ||
499 | DPU_HW_BLK_INFO; | ||
500 | u32 version; | ||
501 | u32 maxinputwidth; | ||
502 | u32 maxoutputwidth; | ||
503 | u32 maxupscale; | ||
504 | }; | ||
505 | |||
506 | /** | ||
507 | * struct dpu_ds_cfg - information of dest scaler blocks | ||
508 | * @id enum identifying this block | ||
509 | * @base register offset wrt DS top offset | ||
510 | * @features bit mask identifying features | ||
511 | * @version hw version of the qseed block | ||
512 | * @top DS top information | ||
513 | */ | ||
514 | struct dpu_ds_cfg { | ||
515 | DPU_HW_BLK_INFO; | ||
516 | u32 version; | ||
517 | const struct dpu_ds_top_cfg *top; | ||
518 | }; | ||
519 | |||
520 | /** | ||
521 | * struct dpu_pingpong_cfg - information of PING-PONG blocks | 485 | * struct dpu_pingpong_cfg - information of PING-PONG blocks |
522 | * @id enum identifying this block | 486 | * @id enum identifying this block |
523 | * @base register offset of this block | 487 | * @base register offset of this block |
@@ -530,18 +494,6 @@ struct dpu_pingpong_cfg { | |||
530 | }; | 494 | }; |
531 | 495 | ||
532 | /** | 496 | /** |
533 | * struct dpu_cdm_cfg - information of chroma down blocks | ||
534 | * @id enum identifying this block | ||
535 | * @base register offset of this block | ||
536 | * @features bit mask identifying sub-blocks/features | ||
537 | * @intf_connect Bitmask of INTF IDs this CDM can connect to | ||
538 | */ | ||
539 | struct dpu_cdm_cfg { | ||
540 | DPU_HW_BLK_INFO; | ||
541 | unsigned long intf_connect; | ||
542 | }; | ||
543 | |||
544 | /** | ||
545 | * struct dpu_intf_cfg - information of timing engine blocks | 497 | * struct dpu_intf_cfg - information of timing engine blocks |
546 | * @id enum identifying this block | 498 | * @id enum identifying this block |
547 | * @base register offset of this block | 499 | * @base register offset of this block |
@@ -728,15 +680,9 @@ struct dpu_mdss_cfg { | |||
728 | u32 mixer_count; | 680 | u32 mixer_count; |
729 | struct dpu_lm_cfg *mixer; | 681 | struct dpu_lm_cfg *mixer; |
730 | 682 | ||
731 | u32 ds_count; | ||
732 | struct dpu_ds_cfg *ds; | ||
733 | |||
734 | u32 pingpong_count; | 683 | u32 pingpong_count; |
735 | struct dpu_pingpong_cfg *pingpong; | 684 | struct dpu_pingpong_cfg *pingpong; |
736 | 685 | ||
737 | u32 cdm_count; | ||
738 | struct dpu_cdm_cfg *cdm; | ||
739 | |||
740 | u32 intf_count; | 686 | u32 intf_count; |
741 | struct dpu_intf_cfg *intf; | 687 | struct dpu_intf_cfg *intf; |
742 | 688 | ||
@@ -771,9 +717,7 @@ struct dpu_mdss_hw_cfg_handler { | |||
771 | #define BLK_DMA(s) ((s)->dma) | 717 | #define BLK_DMA(s) ((s)->dma) |
772 | #define BLK_CURSOR(s) ((s)->cursor) | 718 | #define BLK_CURSOR(s) ((s)->cursor) |
773 | #define BLK_MIXER(s) ((s)->mixer) | 719 | #define BLK_MIXER(s) ((s)->mixer) |
774 | #define BLK_DS(s) ((s)->ds) | ||
775 | #define BLK_PINGPONG(s) ((s)->pingpong) | 720 | #define BLK_PINGPONG(s) ((s)->pingpong) |
776 | #define BLK_CDM(s) ((s)->cdm) | ||
777 | #define BLK_INTF(s) ((s)->intf) | 721 | #define BLK_INTF(s) ((s)->intf) |
778 | #define BLK_AD(s) ((s)->ad) | 722 | #define BLK_AD(s) ((s)->ad) |
779 | 723 | ||
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c deleted file mode 100644 index 554874ba0c3b..000000000000 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c +++ /dev/null | |||
@@ -1,323 +0,0 @@ | |||
1 | /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 and | ||
5 | * only version 2 as published by the Free Software Foundation. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | */ | ||
12 | |||
13 | #include "dpu_hw_mdss.h" | ||
14 | #include "dpu_hwio.h" | ||
15 | #include "dpu_hw_catalog.h" | ||
16 | #include "dpu_hw_cdm.h" | ||
17 | #include "dpu_dbg.h" | ||
18 | #include "dpu_kms.h" | ||
19 | |||
20 | #define CDM_CSC_10_OPMODE 0x000 | ||
21 | #define CDM_CSC_10_BASE 0x004 | ||
22 | |||
23 | #define CDM_CDWN2_OP_MODE 0x100 | ||
24 | #define CDM_CDWN2_CLAMP_OUT 0x104 | ||
25 | #define CDM_CDWN2_PARAMS_3D_0 0x108 | ||
26 | #define CDM_CDWN2_PARAMS_3D_1 0x10C | ||
27 | #define CDM_CDWN2_COEFF_COSITE_H_0 0x110 | ||
28 | #define CDM_CDWN2_COEFF_COSITE_H_1 0x114 | ||
29 | #define CDM_CDWN2_COEFF_COSITE_H_2 0x118 | ||
30 | #define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C | ||
31 | #define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120 | ||
32 | #define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124 | ||
33 | #define CDM_CDWN2_COEFF_COSITE_V 0x128 | ||
34 | #define CDM_CDWN2_COEFF_OFFSITE_V 0x12C | ||
35 | #define CDM_CDWN2_OUT_SIZE 0x130 | ||
36 | |||
37 | #define CDM_HDMI_PACK_OP_MODE 0x200 | ||
38 | #define CDM_CSC_10_MATRIX_COEFF_0 0x004 | ||
39 | |||
40 | /** | ||
41 | * Horizontal coefficients for cosite chroma downscale | ||
42 | * s13 representation of coefficients | ||
43 | */ | ||
44 | static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e}; | ||
45 | |||
46 | /** | ||
47 | * Horizontal coefficients for offsite chroma downscale | ||
48 | */ | ||
49 | static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046}; | ||
50 | |||
51 | /** | ||
52 | * Vertical coefficients for cosite chroma downscale | ||
53 | */ | ||
54 | static u32 cosite_v_coeff[] = {0x00080004}; | ||
55 | /** | ||
56 | * Vertical coefficients for offsite chroma downscale | ||
57 | */ | ||
58 | static u32 offsite_v_coeff[] = {0x00060002}; | ||
59 | |||
60 | /* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */ | ||
61 | static struct dpu_csc_cfg rgb2yuv_cfg = { | ||
62 | { | ||
63 | 0x0083, 0x0102, 0x0032, | ||
64 | 0x1fb5, 0x1f6c, 0x00e1, | ||
65 | 0x00e1, 0x1f45, 0x1fdc | ||
66 | }, | ||
67 | { 0x00, 0x00, 0x00 }, | ||
68 | { 0x0040, 0x0200, 0x0200 }, | ||
69 | { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff }, | ||
70 | { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 }, | ||
71 | }; | ||
72 | |||
73 | static struct dpu_cdm_cfg *_cdm_offset(enum dpu_cdm cdm, | ||
74 | struct dpu_mdss_cfg *m, | ||
75 | void __iomem *addr, | ||
76 | struct dpu_hw_blk_reg_map *b) | ||
77 | { | ||
78 | int i; | ||
79 | |||
80 | for (i = 0; i < m->cdm_count; i++) { | ||
81 | if (cdm == m->cdm[i].id) { | ||
82 | b->base_off = addr; | ||
83 | b->blk_off = m->cdm[i].base; | ||
84 | b->length = m->cdm[i].len; | ||
85 | b->hwversion = m->hwversion; | ||
86 | b->log_mask = DPU_DBG_MASK_CDM; | ||
87 | return &m->cdm[i]; | ||
88 | } | ||
89 | } | ||
90 | |||
91 | return ERR_PTR(-EINVAL); | ||
92 | } | ||
93 | |||
94 | static int dpu_hw_cdm_setup_csc_10bit(struct dpu_hw_cdm *ctx, | ||
95 | struct dpu_csc_cfg *data) | ||
96 | { | ||
97 | dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true); | ||
98 | |||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx, | ||
103 | struct dpu_hw_cdm_cfg *cfg) | ||
104 | { | ||
105 | struct dpu_hw_blk_reg_map *c = &ctx->hw; | ||
106 | u32 opmode = 0; | ||
107 | u32 out_size = 0; | ||
108 | |||
109 | if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT) | ||
110 | opmode &= ~BIT(7); | ||
111 | else | ||
112 | opmode |= BIT(7); | ||
113 | |||
114 | /* ENABLE DWNS_H bit */ | ||
115 | opmode |= BIT(1); | ||
116 | |||
117 | switch (cfg->h_cdwn_type) { | ||
118 | case CDM_CDWN_DISABLE: | ||
119 | /* CLEAR METHOD_H field */ | ||
120 | opmode &= ~(0x18); | ||
121 | /* CLEAR DWNS_H bit */ | ||
122 | opmode &= ~BIT(1); | ||
123 | break; | ||
124 | case CDM_CDWN_PIXEL_DROP: | ||
125 | /* Clear METHOD_H field (pixel drop is 0) */ | ||
126 | opmode &= ~(0x18); | ||
127 | break; | ||
128 | case CDM_CDWN_AVG: | ||
129 | /* Clear METHOD_H field (Average is 0x1) */ | ||
130 | opmode &= ~(0x18); | ||
131 | opmode |= (0x1 << 0x3); | ||
132 | break; | ||
133 | case CDM_CDWN_COSITE: | ||
134 | /* Clear METHOD_H field (Average is 0x2) */ | ||
135 | opmode &= ~(0x18); | ||
136 | opmode |= (0x2 << 0x3); | ||
137 | /* Co-site horizontal coefficients */ | ||
138 | DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0, | ||
139 | cosite_h_coeff[0]); | ||
140 | DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1, | ||
141 | cosite_h_coeff[1]); | ||
142 | DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2, | ||
143 | cosite_h_coeff[2]); | ||
144 | break; | ||
145 | case CDM_CDWN_OFFSITE: | ||
146 | /* Clear METHOD_H field (Average is 0x3) */ | ||
147 | opmode &= ~(0x18); | ||
148 | opmode |= (0x3 << 0x3); | ||
149 | |||
150 | /* Off-site horizontal coefficients */ | ||
151 | DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0, | ||
152 | offsite_h_coeff[0]); | ||
153 | DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1, | ||
154 | offsite_h_coeff[1]); | ||
155 | DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2, | ||
156 | offsite_h_coeff[2]); | ||
157 | break; | ||
158 | default: | ||
159 | pr_err("%s invalid horz down sampling type\n", __func__); | ||
160 | return -EINVAL; | ||
161 | } | ||
162 | |||
163 | /* ENABLE DWNS_V bit */ | ||
164 | opmode |= BIT(2); | ||
165 | |||
166 | switch (cfg->v_cdwn_type) { | ||
167 | case CDM_CDWN_DISABLE: | ||
168 | /* CLEAR METHOD_V field */ | ||
169 | opmode &= ~(0x60); | ||
170 | /* CLEAR DWNS_V bit */ | ||
171 | opmode &= ~BIT(2); | ||
172 | break; | ||
173 | case CDM_CDWN_PIXEL_DROP: | ||
174 | /* Clear METHOD_V field (pixel drop is 0) */ | ||
175 | opmode &= ~(0x60); | ||
176 | break; | ||
177 | case CDM_CDWN_AVG: | ||
178 | /* Clear METHOD_V field (Average is 0x1) */ | ||
179 | opmode &= ~(0x60); | ||
180 | opmode |= (0x1 << 0x5); | ||
181 | break; | ||
182 | case CDM_CDWN_COSITE: | ||
183 | /* Clear METHOD_V field (Average is 0x2) */ | ||
184 | opmode &= ~(0x60); | ||
185 | opmode |= (0x2 << 0x5); | ||
186 | /* Co-site vertical coefficients */ | ||
187 | DPU_REG_WRITE(c, | ||
188 | CDM_CDWN2_COEFF_COSITE_V, | ||
189 | cosite_v_coeff[0]); | ||
190 | break; | ||
191 | case CDM_CDWN_OFFSITE: | ||
192 | /* Clear METHOD_V field (Average is 0x3) */ | ||
193 | opmode &= ~(0x60); | ||
194 | opmode |= (0x3 << 0x5); | ||
195 | |||
196 | /* Off-site vertical coefficients */ | ||
197 | DPU_REG_WRITE(c, | ||
198 | CDM_CDWN2_COEFF_OFFSITE_V, | ||
199 | offsite_v_coeff[0]); | ||
200 | break; | ||
201 | default: | ||
202 | return -EINVAL; | ||
203 | } | ||
204 | |||
205 | if (cfg->v_cdwn_type || cfg->h_cdwn_type) | ||
206 | opmode |= BIT(0); /* EN CDWN module */ | ||
207 | else | ||
208 | opmode &= ~BIT(0); | ||
209 | |||
210 | out_size = (cfg->output_width & 0xFFFF) | | ||
211 | ((cfg->output_height & 0xFFFF) << 16); | ||
212 | DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size); | ||
213 | DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode); | ||
214 | DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT, | ||
215 | ((0x3FF << 16) | 0x0)); | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx, | ||
221 | struct dpu_hw_cdm_cfg *cdm) | ||
222 | { | ||
223 | struct dpu_hw_blk_reg_map *c = &ctx->hw; | ||
224 | const struct dpu_format *fmt = cdm->output_fmt; | ||
225 | struct cdm_output_cfg cdm_cfg = { 0 }; | ||
226 | u32 opmode = 0; | ||
227 | u32 csc = 0; | ||
228 | |||
229 | if (!DPU_FORMAT_IS_YUV(fmt)) | ||
230 | return -EINVAL; | ||
231 | |||
232 | if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) { | ||
233 | if (fmt->chroma_sample != DPU_CHROMA_H1V2) | ||
234 | return -EINVAL; /*unsupported format */ | ||
235 | opmode = BIT(0); | ||
236 | opmode |= (fmt->chroma_sample << 1); | ||
237 | cdm_cfg.intf_en = true; | ||
238 | } | ||
239 | |||
240 | csc |= BIT(2); | ||
241 | csc &= ~BIT(1); | ||
242 | csc |= BIT(0); | ||
243 | |||
244 | if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output) | ||
245 | ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg); | ||
246 | |||
247 | DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc); | ||
248 | DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode); | ||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | static void dpu_hw_cdm_disable(struct dpu_hw_cdm *ctx) | ||
253 | { | ||
254 | struct cdm_output_cfg cdm_cfg = { 0 }; | ||
255 | |||
256 | if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output) | ||
257 | ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg); | ||
258 | } | ||
259 | |||
260 | static void _setup_cdm_ops(struct dpu_hw_cdm_ops *ops, | ||
261 | unsigned long features) | ||
262 | { | ||
263 | ops->setup_csc_data = dpu_hw_cdm_setup_csc_10bit; | ||
264 | ops->setup_cdwn = dpu_hw_cdm_setup_cdwn; | ||
265 | ops->enable = dpu_hw_cdm_enable; | ||
266 | ops->disable = dpu_hw_cdm_disable; | ||
267 | } | ||
268 | |||
269 | static struct dpu_hw_blk_ops dpu_hw_ops = { | ||
270 | .start = NULL, | ||
271 | .stop = NULL, | ||
272 | }; | ||
273 | |||
274 | struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx, | ||
275 | void __iomem *addr, | ||
276 | struct dpu_mdss_cfg *m, | ||
277 | struct dpu_hw_mdp *hw_mdp) | ||
278 | { | ||
279 | struct dpu_hw_cdm *c; | ||
280 | struct dpu_cdm_cfg *cfg; | ||
281 | int rc; | ||
282 | |||
283 | c = kzalloc(sizeof(*c), GFP_KERNEL); | ||
284 | if (!c) | ||
285 | return ERR_PTR(-ENOMEM); | ||
286 | |||
287 | cfg = _cdm_offset(idx, m, addr, &c->hw); | ||
288 | if (IS_ERR_OR_NULL(cfg)) { | ||
289 | kfree(c); | ||
290 | return ERR_PTR(-EINVAL); | ||
291 | } | ||
292 | |||
293 | c->idx = idx; | ||
294 | c->caps = cfg; | ||
295 | _setup_cdm_ops(&c->ops, c->caps->features); | ||
296 | c->hw_mdp = hw_mdp; | ||
297 | |||
298 | rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CDM, idx, &dpu_hw_ops); | ||
299 | if (rc) { | ||
300 | DPU_ERROR("failed to init hw blk %d\n", rc); | ||
301 | goto blk_init_error; | ||
302 | } | ||
303 | |||
304 | /* | ||
305 | * Perform any default initialization for the chroma down module | ||
306 | * @setup default csc coefficients | ||
307 | */ | ||
308 | dpu_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg); | ||
309 | |||
310 | return c; | ||
311 | |||
312 | blk_init_error: | ||
313 | kzfree(c); | ||
314 | |||
315 | return ERR_PTR(rc); | ||
316 | } | ||
317 | |||
318 | void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm) | ||
319 | { | ||
320 | if (cdm) | ||
321 | dpu_hw_blk_destroy(&cdm->base); | ||
322 | kfree(cdm); | ||
323 | } | ||
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h deleted file mode 100644 index 5cceb1ecb8e0..000000000000 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h +++ /dev/null | |||
@@ -1,139 +0,0 @@ | |||
1 | /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 and | ||
5 | * only version 2 as published by the Free Software Foundation. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | */ | ||
12 | |||
13 | #ifndef _DPU_HW_CDM_H | ||
14 | #define _DPU_HW_CDM_H | ||
15 | |||
16 | #include "dpu_hw_mdss.h" | ||
17 | #include "dpu_hw_top.h" | ||
18 | #include "dpu_hw_blk.h" | ||
19 | |||
20 | struct dpu_hw_cdm; | ||
21 | |||
22 | struct dpu_hw_cdm_cfg { | ||
23 | u32 output_width; | ||
24 | u32 output_height; | ||
25 | u32 output_bit_depth; | ||
26 | u32 h_cdwn_type; | ||
27 | u32 v_cdwn_type; | ||
28 | const struct dpu_format *output_fmt; | ||
29 | u32 output_type; | ||
30 | int flags; | ||
31 | }; | ||
32 | |||
33 | enum dpu_hw_cdwn_type { | ||
34 | CDM_CDWN_DISABLE, | ||
35 | CDM_CDWN_PIXEL_DROP, | ||
36 | CDM_CDWN_AVG, | ||
37 | CDM_CDWN_COSITE, | ||
38 | CDM_CDWN_OFFSITE, | ||
39 | }; | ||
40 | |||
41 | enum dpu_hw_cdwn_output_type { | ||
42 | CDM_CDWN_OUTPUT_HDMI, | ||
43 | CDM_CDWN_OUTPUT_WB, | ||
44 | }; | ||
45 | |||
46 | enum dpu_hw_cdwn_output_bit_depth { | ||
47 | CDM_CDWN_OUTPUT_8BIT, | ||
48 | CDM_CDWN_OUTPUT_10BIT, | ||
49 | }; | ||
50 | |||
51 | /** | ||
52 | * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions | ||
53 | * Assumption is these functions will be called after | ||
54 | * clocks are enabled | ||
55 | * @setup_csc: Programs the csc matrix | ||
56 | * @setup_cdwn: Sets up the chroma down sub module | ||
57 | * @enable: Enables the output to interface and programs the | ||
58 | * output packer | ||
59 | * @disable: Puts the cdm in bypass mode | ||
60 | */ | ||
61 | struct dpu_hw_cdm_ops { | ||
62 | /** | ||
63 | * Programs the CSC matrix for conversion from RGB space to YUV space, | ||
64 | * it is optional to call this function as this matrix is automatically | ||
65 | * set during initialization, user should call this if it wants | ||
66 | * to program a different matrix than default matrix. | ||
67 | * @cdm: Pointer to the chroma down context structure | ||
68 | * @data Pointer to CSC configuration data | ||
69 | * return: 0 if success; error code otherwise | ||
70 | */ | ||
71 | int (*setup_csc_data)(struct dpu_hw_cdm *cdm, | ||
72 | struct dpu_csc_cfg *data); | ||
73 | |||
74 | /** | ||
75 | * Programs the Chroma downsample part. | ||
76 | * @cdm Pointer to chroma down context | ||
77 | */ | ||
78 | int (*setup_cdwn)(struct dpu_hw_cdm *cdm, | ||
79 | struct dpu_hw_cdm_cfg *cfg); | ||
80 | |||
81 | /** | ||
82 | * Enable the CDM module | ||
83 | * @cdm Pointer to chroma down context | ||
84 | */ | ||
85 | int (*enable)(struct dpu_hw_cdm *cdm, | ||
86 | struct dpu_hw_cdm_cfg *cfg); | ||
87 | |||
88 | /** | ||
89 | * Disable the CDM module | ||
90 | * @cdm Pointer to chroma down context | ||
91 | */ | ||
92 | void (*disable)(struct dpu_hw_cdm *cdm); | ||
93 | }; | ||
94 | |||
95 | struct dpu_hw_cdm { | ||
96 | struct dpu_hw_blk base; | ||
97 | struct dpu_hw_blk_reg_map hw; | ||
98 | |||
99 | /* chroma down */ | ||
100 | const struct dpu_cdm_cfg *caps; | ||
101 | enum dpu_cdm idx; | ||
102 | |||
103 | /* mdp top hw driver */ | ||
104 | struct dpu_hw_mdp *hw_mdp; | ||
105 | |||
106 | /* ops */ | ||
107 | struct dpu_hw_cdm_ops ops; | ||
108 | }; | ||
109 | |||
110 | /** | ||
111 | * dpu_hw_cdm - convert base object dpu_hw_base to container | ||
112 | * @hw: Pointer to base hardware block | ||
113 | * return: Pointer to hardware block container | ||
114 | */ | ||
115 | static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw) | ||
116 | { | ||
117 | return container_of(hw, struct dpu_hw_cdm, base); | ||
118 | } | ||
119 | |||
120 | /** | ||
121 | * dpu_hw_cdm_init - initializes the cdm hw driver object. | ||
122 | * should be called once before accessing every cdm. | ||
123 | * @idx: cdm index for which driver object is required | ||
124 | * @addr: mapped register io address of MDP | ||
125 | * @m : pointer to mdss catalog data | ||
126 | * @hw_mdp: pointer to mdp top hw driver object | ||
127 | */ | ||
128 | struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx, | ||
129 | void __iomem *addr, | ||
130 | struct dpu_mdss_cfg *m, | ||
131 | struct dpu_hw_mdp *hw_mdp); | ||
132 | |||
133 | /** | ||
134 | * dpu_hw_cdm_destroy - destroys CDM driver context | ||
135 | * @cdm: pointer to CDM driver context | ||
136 | */ | ||
137 | void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm); | ||
138 | |||
139 | #endif /*_DPU_HW_CDM_H */ | ||
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index 06be7cf7ce50..eec1051f2afc 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c | |||
@@ -224,19 +224,6 @@ static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx, | |||
224 | return 0; | 224 | return 0; |
225 | } | 225 | } |
226 | 226 | ||
227 | static inline int dpu_hw_ctl_get_bitmask_cdm(struct dpu_hw_ctl *ctx, | ||
228 | u32 *flushbits, enum dpu_cdm cdm) | ||
229 | { | ||
230 | switch (cdm) { | ||
231 | case CDM_0: | ||
232 | *flushbits |= BIT(26); | ||
233 | break; | ||
234 | default: | ||
235 | return -EINVAL; | ||
236 | } | ||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us) | 227 | static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us) |
241 | { | 228 | { |
242 | struct dpu_hw_blk_reg_map *c = &ctx->hw; | 229 | struct dpu_hw_blk_reg_map *c = &ctx->hw; |
@@ -310,7 +297,7 @@ static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx, | |||
310 | u32 mixercfg = 0, mixercfg_ext = 0, mix, ext; | 297 | u32 mixercfg = 0, mixercfg_ext = 0, mix, ext; |
311 | u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0; | 298 | u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0; |
312 | int i, j; | 299 | int i, j; |
313 | u8 stages; | 300 | int stages; |
314 | int pipes_per_stage; | 301 | int pipes_per_stage; |
315 | 302 | ||
316 | stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm); | 303 | stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm); |
@@ -485,7 +472,6 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, | |||
485 | ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp; | 472 | ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp; |
486 | ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer; | 473 | ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer; |
487 | ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf; | 474 | ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf; |
488 | ops->get_bitmask_cdm = dpu_hw_ctl_get_bitmask_cdm; | ||
489 | }; | 475 | }; |
490 | 476 | ||
491 | static struct dpu_hw_blk_ops dpu_hw_ops = { | 477 | static struct dpu_hw_blk_ops dpu_hw_ops = { |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h index c66a71f8b839..6f313faca43e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h | |||
@@ -142,10 +142,6 @@ struct dpu_hw_ctl_ops { | |||
142 | u32 *flushbits, | 142 | u32 *flushbits, |
143 | enum dpu_intf blk); | 143 | enum dpu_intf blk); |
144 | 144 | ||
145 | int (*get_bitmask_cdm)(struct dpu_hw_ctl *ctx, | ||
146 | u32 *flushbits, | ||
147 | enum dpu_cdm blk); | ||
148 | |||
149 | /** | 145 | /** |
150 | * Set all blend stages to disabled | 146 | * Set all blend stages to disabled |
151 | * @ctx : ctl path ctx pointer | 147 | * @ctx : ctl path ctx pointer |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c index d280df5613c9..9c6bba0ac7c3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c | |||
@@ -65,9 +65,6 @@ | |||
65 | #define INTF_FRAME_COUNT 0x0AC | 65 | #define INTF_FRAME_COUNT 0x0AC |
66 | #define INTF_LINE_COUNT 0x0B0 | 66 | #define INTF_LINE_COUNT 0x0B0 |
67 | 67 | ||
68 | #define INTF_MISR_CTRL 0x180 | ||
69 | #define INTF_MISR_SIGNATURE 0x184 | ||
70 | |||
71 | static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf, | 68 | static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf, |
72 | struct dpu_mdss_cfg *m, | 69 | struct dpu_mdss_cfg *m, |
73 | void __iomem *addr, | 70 | void __iomem *addr, |
@@ -246,30 +243,6 @@ static void dpu_hw_intf_get_status( | |||
246 | } | 243 | } |
247 | } | 244 | } |
248 | 245 | ||
249 | static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf, | ||
250 | bool enable, u32 frame_count) | ||
251 | { | ||
252 | struct dpu_hw_blk_reg_map *c = &intf->hw; | ||
253 | u32 config = 0; | ||
254 | |||
255 | DPU_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR); | ||
256 | /* clear misr data */ | ||
257 | wmb(); | ||
258 | |||
259 | if (enable) | ||
260 | config = (frame_count & MISR_FRAME_COUNT_MASK) | | ||
261 | MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK; | ||
262 | |||
263 | DPU_REG_WRITE(c, INTF_MISR_CTRL, config); | ||
264 | } | ||
265 | |||
266 | static u32 dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf) | ||
267 | { | ||
268 | struct dpu_hw_blk_reg_map *c = &intf->hw; | ||
269 | |||
270 | return DPU_REG_READ(c, INTF_MISR_SIGNATURE); | ||
271 | } | ||
272 | |||
273 | static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf) | 246 | static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf) |
274 | { | 247 | { |
275 | struct dpu_hw_blk_reg_map *c; | 248 | struct dpu_hw_blk_reg_map *c; |
@@ -289,8 +262,6 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops, | |||
289 | ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch; | 262 | ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch; |
290 | ops->get_status = dpu_hw_intf_get_status; | 263 | ops->get_status = dpu_hw_intf_get_status; |
291 | ops->enable_timing = dpu_hw_intf_enable_timing_engine; | 264 | ops->enable_timing = dpu_hw_intf_enable_timing_engine; |
292 | ops->setup_misr = dpu_hw_intf_setup_misr; | ||
293 | ops->collect_misr = dpu_hw_intf_collect_misr; | ||
294 | ops->get_line_count = dpu_hw_intf_get_line_count; | 265 | ops->get_line_count = dpu_hw_intf_get_line_count; |
295 | } | 266 | } |
296 | 267 | ||
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h index a79d735da68d..3b77df460dea 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h | |||
@@ -59,8 +59,6 @@ struct intf_status { | |||
59 | * @ setup_prog_fetch : enables/disables the programmable fetch logic | 59 | * @ setup_prog_fetch : enables/disables the programmable fetch logic |
60 | * @ enable_timing: enable/disable timing engine | 60 | * @ enable_timing: enable/disable timing engine |
61 | * @ get_status: returns if timing engine is enabled or not | 61 | * @ get_status: returns if timing engine is enabled or not |
62 | * @ setup_misr: enables/disables MISR in HW register | ||
63 | * @ collect_misr: reads and stores MISR data from HW register | ||
64 | * @ get_line_count: reads current vertical line counter | 62 | * @ get_line_count: reads current vertical line counter |
65 | */ | 63 | */ |
66 | struct dpu_hw_intf_ops { | 64 | struct dpu_hw_intf_ops { |
@@ -77,11 +75,6 @@ struct dpu_hw_intf_ops { | |||
77 | void (*get_status)(struct dpu_hw_intf *intf, | 75 | void (*get_status)(struct dpu_hw_intf *intf, |
78 | struct intf_status *status); | 76 | struct intf_status *status); |
79 | 77 | ||
80 | void (*setup_misr)(struct dpu_hw_intf *intf, | ||
81 | bool enable, u32 frame_count); | ||
82 | |||
83 | u32 (*collect_misr)(struct dpu_hw_intf *intf); | ||
84 | |||
85 | u32 (*get_line_count)(struct dpu_hw_intf *intf); | 78 | u32 (*get_line_count)(struct dpu_hw_intf *intf); |
86 | }; | 79 | }; |
87 | 80 | ||
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c index 4ab72b0f07a5..acb8dc8acaa5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c | |||
@@ -34,9 +34,6 @@ | |||
34 | #define LM_BLEND0_FG_ALPHA 0x04 | 34 | #define LM_BLEND0_FG_ALPHA 0x04 |
35 | #define LM_BLEND0_BG_ALPHA 0x08 | 35 | #define LM_BLEND0_BG_ALPHA 0x08 |
36 | 36 | ||
37 | #define LM_MISR_CTRL 0x310 | ||
38 | #define LM_MISR_SIGNATURE 0x314 | ||
39 | |||
40 | static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer, | 37 | static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer, |
41 | struct dpu_mdss_cfg *m, | 38 | struct dpu_mdss_cfg *m, |
42 | void __iomem *addr, | 39 | void __iomem *addr, |
@@ -171,30 +168,6 @@ static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer, | |||
171 | { | 168 | { |
172 | } | 169 | } |
173 | 170 | ||
174 | static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx, | ||
175 | bool enable, u32 frame_count) | ||
176 | { | ||
177 | struct dpu_hw_blk_reg_map *c = &ctx->hw; | ||
178 | u32 config = 0; | ||
179 | |||
180 | DPU_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR); | ||
181 | /* clear misr data */ | ||
182 | wmb(); | ||
183 | |||
184 | if (enable) | ||
185 | config = (frame_count & MISR_FRAME_COUNT_MASK) | | ||
186 | MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK; | ||
187 | |||
188 | DPU_REG_WRITE(c, LM_MISR_CTRL, config); | ||
189 | } | ||
190 | |||
191 | static u32 dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx) | ||
192 | { | ||
193 | struct dpu_hw_blk_reg_map *c = &ctx->hw; | ||
194 | |||
195 | return DPU_REG_READ(c, LM_MISR_SIGNATURE); | ||
196 | } | ||
197 | |||
198 | static void _setup_mixer_ops(struct dpu_mdss_cfg *m, | 171 | static void _setup_mixer_ops(struct dpu_mdss_cfg *m, |
199 | struct dpu_hw_lm_ops *ops, | 172 | struct dpu_hw_lm_ops *ops, |
200 | unsigned long features) | 173 | unsigned long features) |
@@ -207,8 +180,6 @@ static void _setup_mixer_ops(struct dpu_mdss_cfg *m, | |||
207 | ops->setup_alpha_out = dpu_hw_lm_setup_color3; | 180 | ops->setup_alpha_out = dpu_hw_lm_setup_color3; |
208 | ops->setup_border_color = dpu_hw_lm_setup_border_color; | 181 | ops->setup_border_color = dpu_hw_lm_setup_border_color; |
209 | ops->setup_gc = dpu_hw_lm_gc; | 182 | ops->setup_gc = dpu_hw_lm_gc; |
210 | ops->setup_misr = dpu_hw_lm_setup_misr; | ||
211 | ops->collect_misr = dpu_hw_lm_collect_misr; | ||
212 | }; | 183 | }; |
213 | 184 | ||
214 | static struct dpu_hw_blk_ops dpu_hw_ops = { | 185 | static struct dpu_hw_blk_ops dpu_hw_ops = { |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h index e29e5dab31bf..5b036aca8340 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h | |||
@@ -66,13 +66,6 @@ struct dpu_hw_lm_ops { | |||
66 | */ | 66 | */ |
67 | void (*setup_gc)(struct dpu_hw_mixer *mixer, | 67 | void (*setup_gc)(struct dpu_hw_mixer *mixer, |
68 | void *cfg); | 68 | void *cfg); |
69 | |||
70 | /* setup_misr: enables/disables MISR in HW register */ | ||
71 | void (*setup_misr)(struct dpu_hw_mixer *ctx, | ||
72 | bool enable, u32 frame_count); | ||
73 | |||
74 | /* collect_misr: reads and stores MISR data from HW register */ | ||
75 | u32 (*collect_misr)(struct dpu_hw_mixer *ctx); | ||
76 | }; | 69 | }; |
77 | 70 | ||
78 | struct dpu_hw_mixer { | 71 | struct dpu_hw_mixer { |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h index 35e6bf930924..68c54d2c9677 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h | |||
@@ -100,7 +100,6 @@ enum dpu_hw_blk_type { | |||
100 | DPU_HW_BLK_SSPP, | 100 | DPU_HW_BLK_SSPP, |
101 | DPU_HW_BLK_LM, | 101 | DPU_HW_BLK_LM, |
102 | DPU_HW_BLK_CTL, | 102 | DPU_HW_BLK_CTL, |
103 | DPU_HW_BLK_CDM, | ||
104 | DPU_HW_BLK_PINGPONG, | 103 | DPU_HW_BLK_PINGPONG, |
105 | DPU_HW_BLK_INTF, | 104 | DPU_HW_BLK_INTF, |
106 | DPU_HW_BLK_WB, | 105 | DPU_HW_BLK_WB, |
@@ -173,13 +172,6 @@ enum dpu_dspp { | |||
173 | DSPP_MAX | 172 | DSPP_MAX |
174 | }; | 173 | }; |
175 | 174 | ||
176 | enum dpu_ds { | ||
177 | DS_TOP, | ||
178 | DS_0, | ||
179 | DS_1, | ||
180 | DS_MAX | ||
181 | }; | ||
182 | |||
183 | enum dpu_ctl { | 175 | enum dpu_ctl { |
184 | CTL_0 = 1, | 176 | CTL_0 = 1, |
185 | CTL_1, | 177 | CTL_1, |
@@ -189,12 +181,6 @@ enum dpu_ctl { | |||
189 | CTL_MAX | 181 | CTL_MAX |
190 | }; | 182 | }; |
191 | 183 | ||
192 | enum dpu_cdm { | ||
193 | CDM_0 = 1, | ||
194 | CDM_1, | ||
195 | CDM_MAX | ||
196 | }; | ||
197 | |||
198 | enum dpu_pingpong { | 184 | enum dpu_pingpong { |
199 | PINGPONG_0 = 1, | 185 | PINGPONG_0 = 1, |
200 | PINGPONG_1, | 186 | PINGPONG_1, |
@@ -246,12 +232,6 @@ enum dpu_wb { | |||
246 | WB_MAX | 232 | WB_MAX |
247 | }; | 233 | }; |
248 | 234 | ||
249 | enum dpu_ad { | ||
250 | AD_0 = 0x1, | ||
251 | AD_1, | ||
252 | AD_MAX | ||
253 | }; | ||
254 | |||
255 | enum dpu_cwb { | 235 | enum dpu_cwb { |
256 | CWB_0 = 0x1, | 236 | CWB_0 = 0x1, |
257 | CWB_1, | 237 | CWB_1, |
@@ -451,15 +431,14 @@ struct dpu_mdss_color { | |||
451 | * Define bit masks for h/w logging. | 431 | * Define bit masks for h/w logging. |
452 | */ | 432 | */ |
453 | #define DPU_DBG_MASK_NONE (1 << 0) | 433 | #define DPU_DBG_MASK_NONE (1 << 0) |
454 | #define DPU_DBG_MASK_CDM (1 << 1) | 434 | #define DPU_DBG_MASK_INTF (1 << 1) |
455 | #define DPU_DBG_MASK_INTF (1 << 2) | 435 | #define DPU_DBG_MASK_LM (1 << 2) |
456 | #define DPU_DBG_MASK_LM (1 << 3) | 436 | #define DPU_DBG_MASK_CTL (1 << 3) |
457 | #define DPU_DBG_MASK_CTL (1 << 4) | 437 | #define DPU_DBG_MASK_PINGPONG (1 << 4) |
458 | #define DPU_DBG_MASK_PINGPONG (1 << 5) | 438 | #define DPU_DBG_MASK_SSPP (1 << 5) |
459 | #define DPU_DBG_MASK_SSPP (1 << 6) | 439 | #define DPU_DBG_MASK_WB (1 << 6) |
460 | #define DPU_DBG_MASK_WB (1 << 7) | 440 | #define DPU_DBG_MASK_TOP (1 << 7) |
461 | #define DPU_DBG_MASK_TOP (1 << 8) | 441 | #define DPU_DBG_MASK_VBIF (1 << 8) |
462 | #define DPU_DBG_MASK_VBIF (1 << 9) | 442 | #define DPU_DBG_MASK_ROT (1 << 9) |
463 | #define DPU_DBG_MASK_ROT (1 << 10) | ||
464 | 443 | ||
465 | #endif /* _DPU_HW_MDSS_H */ | 444 | #endif /* _DPU_HW_MDSS_H */ |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c index db2798e862fc..b8781256e21b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c | |||
@@ -98,23 +98,6 @@ static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp, | |||
98 | DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1); | 98 | DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1); |
99 | } | 99 | } |
100 | 100 | ||
101 | static void dpu_hw_setup_cdm_output(struct dpu_hw_mdp *mdp, | ||
102 | struct cdm_output_cfg *cfg) | ||
103 | { | ||
104 | struct dpu_hw_blk_reg_map *c; | ||
105 | u32 out_ctl = 0; | ||
106 | |||
107 | if (!mdp || !cfg) | ||
108 | return; | ||
109 | |||
110 | c = &mdp->hw; | ||
111 | |||
112 | if (cfg->intf_en) | ||
113 | out_ctl |= BIT(19); | ||
114 | |||
115 | DPU_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl); | ||
116 | } | ||
117 | |||
118 | static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp, | 101 | static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp, |
119 | enum dpu_clk_ctrl_type clk_ctrl, bool enable) | 102 | enum dpu_clk_ctrl_type clk_ctrl, bool enable) |
120 | { | 103 | { |
@@ -307,7 +290,6 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops, | |||
307 | unsigned long cap) | 290 | unsigned long cap) |
308 | { | 291 | { |
309 | ops->setup_split_pipe = dpu_hw_setup_split_pipe; | 292 | ops->setup_split_pipe = dpu_hw_setup_split_pipe; |
310 | ops->setup_cdm_output = dpu_hw_setup_cdm_output; | ||
311 | ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl; | 293 | ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl; |
312 | ops->get_danger_status = dpu_hw_get_danger_status; | 294 | ops->get_danger_status = dpu_hw_get_danger_status; |
313 | ops->setup_vsync_source = dpu_hw_setup_vsync_source; | 295 | ops->setup_vsync_source = dpu_hw_setup_vsync_source; |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h index 899925aaa6d7..192e338f20bb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h | |||
@@ -52,14 +52,6 @@ struct split_pipe_cfg { | |||
52 | }; | 52 | }; |
53 | 53 | ||
54 | /** | 54 | /** |
55 | * struct cdm_output_cfg: output configuration for cdm | ||
56 | * @intf_en : enable/disable interface output | ||
57 | */ | ||
58 | struct cdm_output_cfg { | ||
59 | bool intf_en; | ||
60 | }; | ||
61 | |||
62 | /** | ||
63 | * struct dpu_danger_safe_status: danger and safe status signals | 55 | * struct dpu_danger_safe_status: danger and safe status signals |
64 | * @mdp: top level status | 56 | * @mdp: top level status |
65 | * @sspp: source pipe status | 57 | * @sspp: source pipe status |
@@ -89,7 +81,6 @@ struct dpu_vsync_source_cfg { | |||
89 | * Assumption is these functions will be called after clocks are enabled. | 81 | * Assumption is these functions will be called after clocks are enabled. |
90 | * @setup_split_pipe : Programs the pipe control registers | 82 | * @setup_split_pipe : Programs the pipe control registers |
91 | * @setup_pp_split : Programs the pp split control registers | 83 | * @setup_pp_split : Programs the pp split control registers |
92 | * @setup_cdm_output : programs cdm control | ||
93 | * @setup_traffic_shaper : programs traffic shaper control | 84 | * @setup_traffic_shaper : programs traffic shaper control |
94 | */ | 85 | */ |
95 | struct dpu_hw_mdp_ops { | 86 | struct dpu_hw_mdp_ops { |
@@ -102,14 +93,6 @@ struct dpu_hw_mdp_ops { | |||
102 | struct split_pipe_cfg *p); | 93 | struct split_pipe_cfg *p); |
103 | 94 | ||
104 | /** | 95 | /** |
105 | * setup_cdm_output() : Setup selection control of the cdm data path | ||
106 | * @mdp : mdp top context driver | ||
107 | * @cfg : cdm output configuration | ||
108 | */ | ||
109 | void (*setup_cdm_output)(struct dpu_hw_mdp *mdp, | ||
110 | struct cdm_output_cfg *cfg); | ||
111 | |||
112 | /** | ||
113 | * setup_traffic_shaper() : Setup traffic shaper control | 96 | * setup_traffic_shaper() : Setup traffic shaper control |
114 | * @mdp : mdp top context driver | 97 | * @mdp : mdp top context driver |
115 | * @cfg : traffic shaper configuration | 98 | * @cfg : traffic shaper configuration |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c index 4cabae480a7b..cb5c0170374b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c | |||
@@ -50,9 +50,6 @@ static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE; | |||
50 | #define QSEED3_CLK_CTRL0 0x54 | 50 | #define QSEED3_CLK_CTRL0 0x54 |
51 | #define QSEED3_CLK_CTRL1 0x58 | 51 | #define QSEED3_CLK_CTRL1 0x58 |
52 | #define QSEED3_CLK_STATUS 0x5C | 52 | #define QSEED3_CLK_STATUS 0x5C |
53 | #define QSEED3_MISR_CTRL 0x70 | ||
54 | #define QSEED3_MISR_SIGNATURE_0 0x74 | ||
55 | #define QSEED3_MISR_SIGNATURE_1 0x78 | ||
56 | #define QSEED3_PHASE_INIT_Y_H 0x90 | 53 | #define QSEED3_PHASE_INIT_Y_H 0x90 |
57 | #define QSEED3_PHASE_INIT_Y_V 0x94 | 54 | #define QSEED3_PHASE_INIT_Y_V 0x94 |
58 | #define QSEED3_PHASE_INIT_UV_H 0x98 | 55 | #define QSEED3_PHASE_INIT_UV_H 0x98 |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h index 1240f505ca53..321fc64ddd0e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h | |||
@@ -148,16 +148,6 @@ struct dpu_hw_scaler3_cfg { | |||
148 | struct dpu_hw_scaler3_de_cfg de; | 148 | struct dpu_hw_scaler3_de_cfg de; |
149 | }; | 149 | }; |
150 | 150 | ||
151 | struct dpu_hw_scaler3_lut_cfg { | ||
152 | bool is_configured; | ||
153 | u32 *dir_lut; | ||
154 | size_t dir_len; | ||
155 | u32 *cir_lut; | ||
156 | size_t cir_len; | ||
157 | u32 *sep_lut; | ||
158 | size_t sep_len; | ||
159 | }; | ||
160 | |||
161 | /** | 151 | /** |
162 | * struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure | 152 | * struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure |
163 | * @num_ext_pxls_lr: Number of total horizontal pixels | 153 | * @num_ext_pxls_lr: Number of total horizontal pixels |
@@ -325,12 +315,6 @@ int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off); | |||
325 | #define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off) | 315 | #define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off) |
326 | #define DPU_REG_READ(c, off) dpu_reg_read(c, off) | 316 | #define DPU_REG_READ(c, off) dpu_reg_read(c, off) |
327 | 317 | ||
328 | #define MISR_FRAME_COUNT_MASK 0xFF | ||
329 | #define MISR_CTRL_ENABLE BIT(8) | ||
330 | #define MISR_CTRL_STATUS BIT(9) | ||
331 | #define MISR_CTRL_STATUS_CLEAR BIT(10) | ||
332 | #define INTF_MISR_CTRL_FREE_RUN_MASK BIT(31) | ||
333 | |||
334 | void *dpu_hw_util_get_dir(void); | 318 | void *dpu_hw_util_get_dir(void); |
335 | 319 | ||
336 | void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c, | 320 | void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c, |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 7dd6bd2d6d37..0a683e65a9f3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c | |||
@@ -450,7 +450,7 @@ static void _dpu_kms_initialize_dsi(struct drm_device *dev, | |||
450 | int i, rc; | 450 | int i, rc; |
451 | 451 | ||
452 | /*TODO: Support two independent DSI connectors */ | 452 | /*TODO: Support two independent DSI connectors */ |
453 | encoder = dpu_encoder_init(dev, DRM_MODE_CONNECTOR_DSI); | 453 | encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI); |
454 | if (IS_ERR_OR_NULL(encoder)) { | 454 | if (IS_ERR_OR_NULL(encoder)) { |
455 | DPU_ERROR("encoder init failed for dsi display\n"); | 455 | DPU_ERROR("encoder init failed for dsi display\n"); |
456 | return; | 456 | return; |
@@ -531,12 +531,13 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms) | |||
531 | { | 531 | { |
532 | struct drm_device *dev; | 532 | struct drm_device *dev; |
533 | struct drm_plane *primary_planes[MAX_PLANES], *plane; | 533 | struct drm_plane *primary_planes[MAX_PLANES], *plane; |
534 | struct drm_plane *cursor_planes[MAX_PLANES] = { NULL }; | ||
534 | struct drm_crtc *crtc; | 535 | struct drm_crtc *crtc; |
535 | 536 | ||
536 | struct msm_drm_private *priv; | 537 | struct msm_drm_private *priv; |
537 | struct dpu_mdss_cfg *catalog; | 538 | struct dpu_mdss_cfg *catalog; |
538 | 539 | ||
539 | int primary_planes_idx = 0, i, ret; | 540 | int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret; |
540 | int max_crtc_count; | 541 | int max_crtc_count; |
541 | 542 | ||
542 | if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) { | 543 | if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) { |
@@ -556,16 +557,24 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms) | |||
556 | 557 | ||
557 | max_crtc_count = min(catalog->mixer_count, priv->num_encoders); | 558 | max_crtc_count = min(catalog->mixer_count, priv->num_encoders); |
558 | 559 | ||
559 | /* Create the planes */ | 560 | /* Create the planes, keeping track of one primary/cursor per crtc */ |
560 | for (i = 0; i < catalog->sspp_count; i++) { | 561 | for (i = 0; i < catalog->sspp_count; i++) { |
561 | bool primary = true; | 562 | enum drm_plane_type type; |
562 | 563 | ||
563 | if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR) | 564 | if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)) |
564 | || primary_planes_idx >= max_crtc_count) | 565 | && cursor_planes_idx < max_crtc_count) |
565 | primary = false; | 566 | type = DRM_PLANE_TYPE_CURSOR; |
566 | 567 | else if (primary_planes_idx < max_crtc_count) | |
567 | plane = dpu_plane_init(dev, catalog->sspp[i].id, primary, | 568 | type = DRM_PLANE_TYPE_PRIMARY; |
568 | (1UL << max_crtc_count) - 1, 0); | 569 | else |
570 | type = DRM_PLANE_TYPE_OVERLAY; | ||
571 | |||
572 | DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n", | ||
573 | type, catalog->sspp[i].features, | ||
574 | catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)); | ||
575 | |||
576 | plane = dpu_plane_init(dev, catalog->sspp[i].id, type, | ||
577 | (1UL << max_crtc_count) - 1, 0); | ||
569 | if (IS_ERR(plane)) { | 578 | if (IS_ERR(plane)) { |
570 | DPU_ERROR("dpu_plane_init failed\n"); | 579 | DPU_ERROR("dpu_plane_init failed\n"); |
571 | ret = PTR_ERR(plane); | 580 | ret = PTR_ERR(plane); |
@@ -573,7 +582,9 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms) | |||
573 | } | 582 | } |
574 | priv->planes[priv->num_planes++] = plane; | 583 | priv->planes[priv->num_planes++] = plane; |
575 | 584 | ||
576 | if (primary) | 585 | if (type == DRM_PLANE_TYPE_CURSOR) |
586 | cursor_planes[cursor_planes_idx++] = plane; | ||
587 | else if (type == DRM_PLANE_TYPE_PRIMARY) | ||
577 | primary_planes[primary_planes_idx++] = plane; | 588 | primary_planes[primary_planes_idx++] = plane; |
578 | } | 589 | } |
579 | 590 | ||
@@ -581,7 +592,7 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms) | |||
581 | 592 | ||
582 | /* Create one CRTC per encoder */ | 593 | /* Create one CRTC per encoder */ |
583 | for (i = 0; i < max_crtc_count; i++) { | 594 | for (i = 0; i < max_crtc_count; i++) { |
584 | crtc = dpu_crtc_init(dev, primary_planes[i]); | 595 | crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]); |
585 | if (IS_ERR(crtc)) { | 596 | if (IS_ERR(crtc)) { |
586 | ret = PTR_ERR(crtc); | 597 | ret = PTR_ERR(crtc); |
587 | goto fail; | 598 | goto fail; |
@@ -956,8 +967,7 @@ static void dpu_kms_handle_power_event(u32 event_type, void *usr) | |||
956 | if (!dpu_kms) | 967 | if (!dpu_kms) |
957 | return; | 968 | return; |
958 | 969 | ||
959 | if (event_type == DPU_POWER_EVENT_POST_ENABLE) | 970 | dpu_vbif_init_memtypes(dpu_kms); |
960 | dpu_vbif_init_memtypes(dpu_kms); | ||
961 | } | 971 | } |
962 | 972 | ||
963 | static int dpu_kms_hw_init(struct msm_kms *kms) | 973 | static int dpu_kms_hw_init(struct msm_kms *kms) |
@@ -1144,10 +1154,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms) | |||
1144 | /* | 1154 | /* |
1145 | * Handle (re)initializations during power enable | 1155 | * Handle (re)initializations during power enable |
1146 | */ | 1156 | */ |
1147 | dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms); | 1157 | dpu_kms_handle_power_event(DPU_POWER_EVENT_ENABLE, dpu_kms); |
1148 | dpu_kms->power_event = dpu_power_handle_register_event( | 1158 | dpu_kms->power_event = dpu_power_handle_register_event( |
1149 | &dpu_kms->phandle, | 1159 | &dpu_kms->phandle, DPU_POWER_EVENT_ENABLE, |
1150 | DPU_POWER_EVENT_POST_ENABLE, | ||
1151 | dpu_kms_handle_power_event, dpu_kms, "kms"); | 1160 | dpu_kms_handle_power_event, dpu_kms, "kms"); |
1152 | 1161 | ||
1153 | pm_runtime_put_sync(&dpu_kms->pdev->dev); | 1162 | pm_runtime_put_sync(&dpu_kms->pdev->dev); |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c index 9e533b86682c..2235ef8129f4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c | |||
@@ -158,6 +158,8 @@ static void dpu_mdss_destroy(struct drm_device *dev) | |||
158 | 158 | ||
159 | _dpu_mdss_irq_domain_fini(dpu_mdss); | 159 | _dpu_mdss_irq_domain_fini(dpu_mdss); |
160 | 160 | ||
161 | free_irq(platform_get_irq(pdev, 0), dpu_mdss); | ||
162 | |||
161 | msm_dss_put_clk(mp->clk_config, mp->num_clk); | 163 | msm_dss_put_clk(mp->clk_config, mp->num_clk); |
162 | devm_kfree(&pdev->dev, mp->clk_config); | 164 | devm_kfree(&pdev->dev, mp->clk_config); |
163 | 165 | ||
@@ -215,7 +217,7 @@ int dpu_mdss_init(struct drm_device *dev) | |||
215 | if (ret) | 217 | if (ret) |
216 | goto irq_domain_error; | 218 | goto irq_domain_error; |
217 | 219 | ||
218 | ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0), | 220 | ret = request_irq(platform_get_irq(pdev, 0), |
219 | dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss); | 221 | dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss); |
220 | if (ret) { | 222 | if (ret) { |
221 | DPU_ERROR("failed to init irq: %d\n", ret); | 223 | DPU_ERROR("failed to init irq: %d\n", ret); |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 015341e2dd4c..f549daf30fe6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c | |||
@@ -125,26 +125,11 @@ struct dpu_plane { | |||
125 | 125 | ||
126 | static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane) | 126 | static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane) |
127 | { | 127 | { |
128 | struct msm_drm_private *priv; | 128 | struct msm_drm_private *priv = plane->dev->dev_private; |
129 | 129 | ||
130 | if (!plane || !plane->dev) | ||
131 | return NULL; | ||
132 | priv = plane->dev->dev_private; | ||
133 | if (!priv) | ||
134 | return NULL; | ||
135 | return to_dpu_kms(priv->kms); | 130 | return to_dpu_kms(priv->kms); |
136 | } | 131 | } |
137 | 132 | ||
138 | static bool dpu_plane_enabled(struct drm_plane_state *state) | ||
139 | { | ||
140 | return state && state->fb && state->crtc; | ||
141 | } | ||
142 | |||
143 | static bool dpu_plane_sspp_enabled(struct drm_plane_state *state) | ||
144 | { | ||
145 | return state && state->crtc; | ||
146 | } | ||
147 | |||
148 | /** | 133 | /** |
149 | * _dpu_plane_calc_fill_level - calculate fill level of the given source format | 134 | * _dpu_plane_calc_fill_level - calculate fill level of the given source format |
150 | * @plane: Pointer to drm plane | 135 | * @plane: Pointer to drm plane |
@@ -160,7 +145,7 @@ static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane, | |||
160 | u32 fixed_buff_size; | 145 | u32 fixed_buff_size; |
161 | u32 total_fl; | 146 | u32 total_fl; |
162 | 147 | ||
163 | if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) { | 148 | if (!fmt || !plane->state || !src_width || !fmt->bpp) { |
164 | DPU_ERROR("invalid arguments\n"); | 149 | DPU_ERROR("invalid arguments\n"); |
165 | return 0; | 150 | return 0; |
166 | } | 151 | } |
@@ -170,7 +155,7 @@ static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane, | |||
170 | fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size; | 155 | fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size; |
171 | 156 | ||
172 | list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) { | 157 | list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) { |
173 | if (!dpu_plane_enabled(tmp->base.state)) | 158 | if (!tmp->base.state->visible) |
174 | continue; | 159 | continue; |
175 | DPU_DEBUG("plane%d/%d src_width:%d/%d\n", | 160 | DPU_DEBUG("plane%d/%d src_width:%d/%d\n", |
176 | pdpu->base.base.id, tmp->base.base.id, | 161 | pdpu->base.base.id, tmp->base.base.id, |
@@ -241,26 +226,11 @@ static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl, | |||
241 | static void _dpu_plane_set_qos_lut(struct drm_plane *plane, | 226 | static void _dpu_plane_set_qos_lut(struct drm_plane *plane, |
242 | struct drm_framebuffer *fb) | 227 | struct drm_framebuffer *fb) |
243 | { | 228 | { |
244 | struct dpu_plane *pdpu; | 229 | struct dpu_plane *pdpu = to_dpu_plane(plane); |
245 | const struct dpu_format *fmt = NULL; | 230 | const struct dpu_format *fmt = NULL; |
246 | u64 qos_lut; | 231 | u64 qos_lut; |
247 | u32 total_fl = 0, lut_usage; | 232 | u32 total_fl = 0, lut_usage; |
248 | 233 | ||
249 | if (!plane || !fb) { | ||
250 | DPU_ERROR("invalid arguments plane %d fb %d\n", | ||
251 | plane != 0, fb != 0); | ||
252 | return; | ||
253 | } | ||
254 | |||
255 | pdpu = to_dpu_plane(plane); | ||
256 | |||
257 | if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) { | ||
258 | DPU_ERROR("invalid arguments\n"); | ||
259 | return; | ||
260 | } else if (!pdpu->pipe_hw->ops.setup_creq_lut) { | ||
261 | return; | ||
262 | } | ||
263 | |||
264 | if (!pdpu->is_rt_pipe) { | 234 | if (!pdpu->is_rt_pipe) { |
265 | lut_usage = DPU_QOS_LUT_USAGE_NRT; | 235 | lut_usage = DPU_QOS_LUT_USAGE_NRT; |
266 | } else { | 236 | } else { |
@@ -302,24 +272,10 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane, | |||
302 | static void _dpu_plane_set_danger_lut(struct drm_plane *plane, | 272 | static void _dpu_plane_set_danger_lut(struct drm_plane *plane, |
303 | struct drm_framebuffer *fb) | 273 | struct drm_framebuffer *fb) |
304 | { | 274 | { |
305 | struct dpu_plane *pdpu; | 275 | struct dpu_plane *pdpu = to_dpu_plane(plane); |
306 | const struct dpu_format *fmt = NULL; | 276 | const struct dpu_format *fmt = NULL; |
307 | u32 danger_lut, safe_lut; | 277 | u32 danger_lut, safe_lut; |
308 | 278 | ||
309 | if (!plane || !fb) { | ||
310 | DPU_ERROR("invalid arguments\n"); | ||
311 | return; | ||
312 | } | ||
313 | |||
314 | pdpu = to_dpu_plane(plane); | ||
315 | |||
316 | if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) { | ||
317 | DPU_ERROR("invalid arguments\n"); | ||
318 | return; | ||
319 | } else if (!pdpu->pipe_hw->ops.setup_danger_safe_lut) { | ||
320 | return; | ||
321 | } | ||
322 | |||
323 | if (!pdpu->is_rt_pipe) { | 279 | if (!pdpu->is_rt_pipe) { |
324 | danger_lut = pdpu->catalog->perf.danger_lut_tbl | 280 | danger_lut = pdpu->catalog->perf.danger_lut_tbl |
325 | [DPU_QOS_LUT_USAGE_NRT]; | 281 | [DPU_QOS_LUT_USAGE_NRT]; |
@@ -373,21 +329,7 @@ static void _dpu_plane_set_danger_lut(struct drm_plane *plane, | |||
373 | static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane, | 329 | static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane, |
374 | bool enable, u32 flags) | 330 | bool enable, u32 flags) |
375 | { | 331 | { |
376 | struct dpu_plane *pdpu; | 332 | struct dpu_plane *pdpu = to_dpu_plane(plane); |
377 | |||
378 | if (!plane) { | ||
379 | DPU_ERROR("invalid arguments\n"); | ||
380 | return; | ||
381 | } | ||
382 | |||
383 | pdpu = to_dpu_plane(plane); | ||
384 | |||
385 | if (!pdpu->pipe_hw || !pdpu->pipe_sblk) { | ||
386 | DPU_ERROR("invalid arguments\n"); | ||
387 | return; | ||
388 | } else if (!pdpu->pipe_hw->ops.setup_qos_ctrl) { | ||
389 | return; | ||
390 | } | ||
391 | 333 | ||
392 | if (flags & DPU_PLANE_QOS_VBLANK_CTRL) { | 334 | if (flags & DPU_PLANE_QOS_VBLANK_CTRL) { |
393 | pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank; | 335 | pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank; |
@@ -423,35 +365,17 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane, | |||
423 | &pdpu->pipe_qos_cfg); | 365 | &pdpu->pipe_qos_cfg); |
424 | } | 366 | } |
425 | 367 | ||
426 | int dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) | 368 | static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) |
427 | { | 369 | { |
428 | struct dpu_plane *pdpu; | 370 | struct dpu_plane *pdpu = to_dpu_plane(plane); |
429 | struct msm_drm_private *priv; | 371 | struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); |
430 | struct dpu_kms *dpu_kms; | ||
431 | |||
432 | if (!plane || !plane->dev) { | ||
433 | DPU_ERROR("invalid arguments\n"); | ||
434 | return -EINVAL; | ||
435 | } | ||
436 | |||
437 | priv = plane->dev->dev_private; | ||
438 | if (!priv || !priv->kms) { | ||
439 | DPU_ERROR("invalid KMS reference\n"); | ||
440 | return -EINVAL; | ||
441 | } | ||
442 | |||
443 | dpu_kms = to_dpu_kms(priv->kms); | ||
444 | pdpu = to_dpu_plane(plane); | ||
445 | 372 | ||
446 | if (!pdpu->is_rt_pipe) | 373 | if (!pdpu->is_rt_pipe) |
447 | goto end; | 374 | return; |
448 | 375 | ||
449 | pm_runtime_get_sync(&dpu_kms->pdev->dev); | 376 | pm_runtime_get_sync(&dpu_kms->pdev->dev); |
450 | _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL); | 377 | _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL); |
451 | pm_runtime_put_sync(&dpu_kms->pdev->dev); | 378 | pm_runtime_put_sync(&dpu_kms->pdev->dev); |
452 | |||
453 | end: | ||
454 | return 0; | ||
455 | } | 379 | } |
456 | 380 | ||
457 | /** | 381 | /** |
@@ -462,29 +386,9 @@ end: | |||
462 | static void _dpu_plane_set_ot_limit(struct drm_plane *plane, | 386 | static void _dpu_plane_set_ot_limit(struct drm_plane *plane, |
463 | struct drm_crtc *crtc) | 387 | struct drm_crtc *crtc) |
464 | { | 388 | { |
465 | struct dpu_plane *pdpu; | 389 | struct dpu_plane *pdpu = to_dpu_plane(plane); |
466 | struct dpu_vbif_set_ot_params ot_params; | 390 | struct dpu_vbif_set_ot_params ot_params; |
467 | struct msm_drm_private *priv; | 391 | struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); |
468 | struct dpu_kms *dpu_kms; | ||
469 | |||
470 | if (!plane || !plane->dev || !crtc) { | ||
471 | DPU_ERROR("invalid arguments plane %d crtc %d\n", | ||
472 | plane != 0, crtc != 0); | ||
473 | return; | ||
474 | } | ||
475 | |||
476 | priv = plane->dev->dev_private; | ||
477 | if (!priv || !priv->kms) { | ||
478 | DPU_ERROR("invalid KMS reference\n"); | ||
479 | return; | ||
480 | } | ||
481 | |||
482 | dpu_kms = to_dpu_kms(priv->kms); | ||
483 | pdpu = to_dpu_plane(plane); | ||
484 | if (!pdpu->pipe_hw) { | ||
485 | DPU_ERROR("invalid pipe reference\n"); | ||
486 | return; | ||
487 | } | ||
488 | 392 | ||
489 | memset(&ot_params, 0, sizeof(ot_params)); | 393 | memset(&ot_params, 0, sizeof(ot_params)); |
490 | ot_params.xin_id = pdpu->pipe_hw->cap->xin_id; | 394 | ot_params.xin_id = pdpu->pipe_hw->cap->xin_id; |
@@ -506,28 +410,9 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane, | |||
506 | */ | 410 | */ |
507 | static void _dpu_plane_set_qos_remap(struct drm_plane *plane) | 411 | static void _dpu_plane_set_qos_remap(struct drm_plane *plane) |
508 | { | 412 | { |
509 | struct dpu_plane *pdpu; | 413 | struct dpu_plane *pdpu = to_dpu_plane(plane); |
510 | struct dpu_vbif_set_qos_params qos_params; | 414 | struct dpu_vbif_set_qos_params qos_params; |
511 | struct msm_drm_private *priv; | 415 | struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); |
512 | struct dpu_kms *dpu_kms; | ||
513 | |||
514 | if (!plane || !plane->dev) { | ||
515 | DPU_ERROR("invalid arguments\n"); | ||
516 | return; | ||
517 | } | ||
518 | |||
519 | priv = plane->dev->dev_private; | ||
520 | if (!priv || !priv->kms) { | ||
521 | DPU_ERROR("invalid KMS reference\n"); | ||
522 | return; | ||
523 | } | ||
524 | |||
525 | dpu_kms = to_dpu_kms(priv->kms); | ||
526 | pdpu = to_dpu_plane(plane); | ||
527 | if (!pdpu->pipe_hw) { | ||
528 | DPU_ERROR("invalid pipe reference\n"); | ||
529 | return; | ||
530 | } | ||
531 | 416 | ||
532 | memset(&qos_params, 0, sizeof(qos_params)); | 417 | memset(&qos_params, 0, sizeof(qos_params)); |
533 | qos_params.vbif_idx = VBIF_RT; | 418 | qos_params.vbif_idx = VBIF_RT; |
@@ -548,27 +433,12 @@ static void _dpu_plane_set_qos_remap(struct drm_plane *plane) | |||
548 | /** | 433 | /** |
549 | * _dpu_plane_get_aspace: gets the address space | 434 | * _dpu_plane_get_aspace: gets the address space |
550 | */ | 435 | */ |
551 | static int _dpu_plane_get_aspace( | 436 | static inline struct msm_gem_address_space *_dpu_plane_get_aspace( |
552 | struct dpu_plane *pdpu, | 437 | struct dpu_plane *pdpu) |
553 | struct dpu_plane_state *pstate, | ||
554 | struct msm_gem_address_space **aspace) | ||
555 | { | 438 | { |
556 | struct dpu_kms *kms; | 439 | struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); |
557 | 440 | ||
558 | if (!pdpu || !pstate || !aspace) { | 441 | return kms->base.aspace; |
559 | DPU_ERROR("invalid parameters\n"); | ||
560 | return -EINVAL; | ||
561 | } | ||
562 | |||
563 | kms = _dpu_plane_get_kms(&pdpu->base); | ||
564 | if (!kms) { | ||
565 | DPU_ERROR("invalid kms\n"); | ||
566 | return -EINVAL; | ||
567 | } | ||
568 | |||
569 | *aspace = kms->base.aspace; | ||
570 | |||
571 | return 0; | ||
572 | } | 442 | } |
573 | 443 | ||
574 | static inline void _dpu_plane_set_scanout(struct drm_plane *plane, | 444 | static inline void _dpu_plane_set_scanout(struct drm_plane *plane, |
@@ -576,29 +446,10 @@ static inline void _dpu_plane_set_scanout(struct drm_plane *plane, | |||
576 | struct dpu_hw_pipe_cfg *pipe_cfg, | 446 | struct dpu_hw_pipe_cfg *pipe_cfg, |
577 | struct drm_framebuffer *fb) | 447 | struct drm_framebuffer *fb) |
578 | { | 448 | { |
579 | struct dpu_plane *pdpu; | 449 | struct dpu_plane *pdpu = to_dpu_plane(plane); |
580 | struct msm_gem_address_space *aspace = NULL; | 450 | struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu); |
581 | int ret; | 451 | int ret; |
582 | 452 | ||
583 | if (!plane || !pstate || !pipe_cfg || !fb) { | ||
584 | DPU_ERROR( | ||
585 | "invalid arg(s), plane %d state %d cfg %d fb %d\n", | ||
586 | plane != 0, pstate != 0, pipe_cfg != 0, fb != 0); | ||
587 | return; | ||
588 | } | ||
589 | |||
590 | pdpu = to_dpu_plane(plane); | ||
591 | if (!pdpu->pipe_hw) { | ||
592 | DPU_ERROR_PLANE(pdpu, "invalid pipe_hw\n"); | ||
593 | return; | ||
594 | } | ||
595 | |||
596 | ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace); | ||
597 | if (ret) { | ||
598 | DPU_ERROR_PLANE(pdpu, "Failed to get aspace %d\n", ret); | ||
599 | return; | ||
600 | } | ||
601 | |||
602 | ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout); | 453 | ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout); |
603 | if (ret == -EAGAIN) | 454 | if (ret == -EAGAIN) |
604 | DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n"); | 455 | DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n"); |
@@ -622,15 +473,6 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu, | |||
622 | { | 473 | { |
623 | uint32_t i; | 474 | uint32_t i; |
624 | 475 | ||
625 | if (!pdpu || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h || | ||
626 | !chroma_subsmpl_v) { | ||
627 | DPU_ERROR( | ||
628 | "pdpu %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n", | ||
629 | !!pdpu, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h, | ||
630 | chroma_subsmpl_v); | ||
631 | return; | ||
632 | } | ||
633 | |||
634 | memset(scale_cfg, 0, sizeof(*scale_cfg)); | 476 | memset(scale_cfg, 0, sizeof(*scale_cfg)); |
635 | memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext)); | 477 | memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext)); |
636 | 478 | ||
@@ -734,17 +576,8 @@ static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu, | |||
734 | struct dpu_plane_state *pstate, | 576 | struct dpu_plane_state *pstate, |
735 | const struct dpu_format *fmt, bool color_fill) | 577 | const struct dpu_format *fmt, bool color_fill) |
736 | { | 578 | { |
737 | struct dpu_hw_pixel_ext *pe; | ||
738 | uint32_t chroma_subsmpl_h, chroma_subsmpl_v; | 579 | uint32_t chroma_subsmpl_h, chroma_subsmpl_v; |
739 | 580 | ||
740 | if (!pdpu || !fmt || !pstate) { | ||
741 | DPU_ERROR("invalid arg(s), plane %d fmt %d state %d\n", | ||
742 | pdpu != 0, fmt != 0, pstate != 0); | ||
743 | return; | ||
744 | } | ||
745 | |||
746 | pe = &pstate->pixel_ext; | ||
747 | |||
748 | /* don't chroma subsample if decimating */ | 581 | /* don't chroma subsample if decimating */ |
749 | chroma_subsmpl_h = | 582 | chroma_subsmpl_h = |
750 | drm_format_horz_chroma_subsampling(fmt->base.pixel_format); | 583 | drm_format_horz_chroma_subsampling(fmt->base.pixel_format); |
@@ -772,21 +605,8 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu, | |||
772 | uint32_t color, uint32_t alpha) | 605 | uint32_t color, uint32_t alpha) |
773 | { | 606 | { |
774 | const struct dpu_format *fmt; | 607 | const struct dpu_format *fmt; |
775 | const struct drm_plane *plane; | 608 | const struct drm_plane *plane = &pdpu->base; |
776 | struct dpu_plane_state *pstate; | 609 | struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state); |
777 | |||
778 | if (!pdpu || !pdpu->base.state) { | ||
779 | DPU_ERROR("invalid plane\n"); | ||
780 | return -EINVAL; | ||
781 | } | ||
782 | |||
783 | if (!pdpu->pipe_hw) { | ||
784 | DPU_ERROR_PLANE(pdpu, "invalid plane h/w pointer\n"); | ||
785 | return -EINVAL; | ||
786 | } | ||
787 | |||
788 | plane = &pdpu->base; | ||
789 | pstate = to_dpu_plane_state(plane->state); | ||
790 | 610 | ||
791 | DPU_DEBUG_PLANE(pdpu, "\n"); | 611 | DPU_DEBUG_PLANE(pdpu, "\n"); |
792 | 612 | ||
@@ -837,12 +657,7 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu, | |||
837 | 657 | ||
838 | void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state) | 658 | void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state) |
839 | { | 659 | { |
840 | struct dpu_plane_state *pstate; | 660 | struct dpu_plane_state *pstate = to_dpu_plane_state(drm_state); |
841 | |||
842 | if (!drm_state) | ||
843 | return; | ||
844 | |||
845 | pstate = to_dpu_plane_state(drm_state); | ||
846 | 661 | ||
847 | pstate->multirect_index = DPU_SSPP_RECT_SOLO; | 662 | pstate->multirect_index = DPU_SSPP_RECT_SOLO; |
848 | pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE; | 663 | pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE; |
@@ -973,15 +788,6 @@ done: | |||
973 | void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl, | 788 | void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl, |
974 | u32 *flush_sspp) | 789 | u32 *flush_sspp) |
975 | { | 790 | { |
976 | struct dpu_plane_state *pstate; | ||
977 | |||
978 | if (!plane || !flush_sspp) { | ||
979 | DPU_ERROR("invalid parameters\n"); | ||
980 | return; | ||
981 | } | ||
982 | |||
983 | pstate = to_dpu_plane_state(plane->state); | ||
984 | |||
985 | *flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane)); | 791 | *flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane)); |
986 | } | 792 | } |
987 | 793 | ||
@@ -995,7 +801,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane, | |||
995 | struct drm_gem_object *obj; | 801 | struct drm_gem_object *obj; |
996 | struct msm_gem_object *msm_obj; | 802 | struct msm_gem_object *msm_obj; |
997 | struct dma_fence *fence; | 803 | struct dma_fence *fence; |
998 | struct msm_gem_address_space *aspace; | 804 | struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu); |
999 | int ret; | 805 | int ret; |
1000 | 806 | ||
1001 | if (!new_state->fb) | 807 | if (!new_state->fb) |
@@ -1003,12 +809,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane, | |||
1003 | 809 | ||
1004 | DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id); | 810 | DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id); |
1005 | 811 | ||
1006 | ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace); | ||
1007 | if (ret) { | ||
1008 | DPU_ERROR_PLANE(pdpu, "Failed to get aspace\n"); | ||
1009 | return ret; | ||
1010 | } | ||
1011 | |||
1012 | /* cache aspace */ | 812 | /* cache aspace */ |
1013 | pstate->aspace = aspace; | 813 | pstate->aspace = aspace; |
1014 | 814 | ||
@@ -1078,33 +878,30 @@ static bool dpu_plane_validate_src(struct drm_rect *src, | |||
1078 | drm_rect_equals(fb_rect, src); | 878 | drm_rect_equals(fb_rect, src); |
1079 | } | 879 | } |
1080 | 880 | ||
1081 | static int dpu_plane_sspp_atomic_check(struct drm_plane *plane, | 881 | static int dpu_plane_atomic_check(struct drm_plane *plane, |
1082 | struct drm_plane_state *state) | 882 | struct drm_plane_state *state) |
1083 | { | 883 | { |
1084 | int ret = 0; | 884 | int ret = 0, min_scale; |
1085 | struct dpu_plane *pdpu; | 885 | struct dpu_plane *pdpu = to_dpu_plane(plane); |
1086 | struct dpu_plane_state *pstate; | 886 | const struct drm_crtc_state *crtc_state = NULL; |
1087 | const struct dpu_format *fmt; | 887 | const struct dpu_format *fmt; |
1088 | struct drm_rect src, dst, fb_rect = { 0 }; | 888 | struct drm_rect src, dst, fb_rect = { 0 }; |
1089 | uint32_t max_upscale = 1, max_downscale = 1; | ||
1090 | uint32_t min_src_size, max_linewidth; | 889 | uint32_t min_src_size, max_linewidth; |
1091 | int hscale = 1, vscale = 1; | ||
1092 | 890 | ||
1093 | if (!plane || !state) { | 891 | if (state->crtc) |
1094 | DPU_ERROR("invalid arg(s), plane %d state %d\n", | 892 | crtc_state = drm_atomic_get_new_crtc_state(state->state, |
1095 | plane != 0, state != 0); | 893 | state->crtc); |
1096 | ret = -EINVAL; | ||
1097 | goto exit; | ||
1098 | } | ||
1099 | |||
1100 | pdpu = to_dpu_plane(plane); | ||
1101 | pstate = to_dpu_plane_state(state); | ||
1102 | 894 | ||
1103 | if (!pdpu->pipe_sblk) { | 895 | min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale); |
1104 | DPU_ERROR_PLANE(pdpu, "invalid catalog\n"); | 896 | ret = drm_atomic_helper_check_plane_state(state, crtc_state, min_scale, |
1105 | ret = -EINVAL; | 897 | pdpu->pipe_sblk->maxupscale << 16, |
1106 | goto exit; | 898 | true, true); |
899 | if (ret) { | ||
900 | DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret); | ||
901 | return ret; | ||
1107 | } | 902 | } |
903 | if (!state->visible) | ||
904 | return 0; | ||
1108 | 905 | ||
1109 | src.x1 = state->src_x >> 16; | 906 | src.x1 = state->src_x >> 16; |
1110 | src.y1 = state->src_y >> 16; | 907 | src.y1 = state->src_y >> 16; |
@@ -1118,25 +915,6 @@ static int dpu_plane_sspp_atomic_check(struct drm_plane *plane, | |||
1118 | 915 | ||
1119 | max_linewidth = pdpu->pipe_sblk->common->maxlinewidth; | 916 | max_linewidth = pdpu->pipe_sblk->common->maxlinewidth; |
1120 | 917 | ||
1121 | if (pdpu->features & DPU_SSPP_SCALER) { | ||
1122 | max_downscale = pdpu->pipe_sblk->maxdwnscale; | ||
1123 | max_upscale = pdpu->pipe_sblk->maxupscale; | ||
1124 | } | ||
1125 | if (drm_rect_width(&src) < drm_rect_width(&dst)) | ||
1126 | hscale = drm_rect_calc_hscale(&src, &dst, 1, max_upscale); | ||
1127 | else | ||
1128 | hscale = drm_rect_calc_hscale(&dst, &src, 1, max_downscale); | ||
1129 | if (drm_rect_height(&src) < drm_rect_height(&dst)) | ||
1130 | vscale = drm_rect_calc_vscale(&src, &dst, 1, max_upscale); | ||
1131 | else | ||
1132 | vscale = drm_rect_calc_vscale(&dst, &src, 1, max_downscale); | ||
1133 | |||
1134 | DPU_DEBUG_PLANE(pdpu, "check %d -> %d\n", | ||
1135 | dpu_plane_enabled(plane->state), dpu_plane_enabled(state)); | ||
1136 | |||
1137 | if (!dpu_plane_enabled(state)) | ||
1138 | goto exit; | ||
1139 | |||
1140 | fmt = to_dpu_format(msm_framebuffer_format(state->fb)); | 918 | fmt = to_dpu_format(msm_framebuffer_format(state->fb)); |
1141 | 919 | ||
1142 | min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1; | 920 | min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1; |
@@ -1147,13 +925,13 @@ static int dpu_plane_sspp_atomic_check(struct drm_plane *plane, | |||
1147 | | BIT(DPU_SSPP_CSC_10BIT))))) { | 925 | | BIT(DPU_SSPP_CSC_10BIT))))) { |
1148 | DPU_ERROR_PLANE(pdpu, | 926 | DPU_ERROR_PLANE(pdpu, |
1149 | "plane doesn't have scaler/csc for yuv\n"); | 927 | "plane doesn't have scaler/csc for yuv\n"); |
1150 | ret = -EINVAL; | 928 | return -EINVAL; |
1151 | 929 | ||
1152 | /* check src bounds */ | 930 | /* check src bounds */ |
1153 | } else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) { | 931 | } else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) { |
1154 | DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n", | 932 | DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n", |
1155 | DRM_RECT_ARG(&src)); | 933 | DRM_RECT_ARG(&src)); |
1156 | ret = -E2BIG; | 934 | return -E2BIG; |
1157 | 935 | ||
1158 | /* valid yuv image */ | 936 | /* valid yuv image */ |
1159 | } else if (DPU_FORMAT_IS_YUV(fmt) && | 937 | } else if (DPU_FORMAT_IS_YUV(fmt) && |
@@ -1162,41 +940,22 @@ static int dpu_plane_sspp_atomic_check(struct drm_plane *plane, | |||
1162 | drm_rect_height(&src) & 0x1)) { | 940 | drm_rect_height(&src) & 0x1)) { |
1163 | DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n", | 941 | DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n", |
1164 | DRM_RECT_ARG(&src)); | 942 | DRM_RECT_ARG(&src)); |
1165 | ret = -EINVAL; | 943 | return -EINVAL; |
1166 | 944 | ||
1167 | /* min dst support */ | 945 | /* min dst support */ |
1168 | } else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) { | 946 | } else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) { |
1169 | DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n", | 947 | DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n", |
1170 | DRM_RECT_ARG(&dst)); | 948 | DRM_RECT_ARG(&dst)); |
1171 | ret = -EINVAL; | 949 | return -EINVAL; |
1172 | 950 | ||
1173 | /* check decimated source width */ | 951 | /* check decimated source width */ |
1174 | } else if (drm_rect_width(&src) > max_linewidth) { | 952 | } else if (drm_rect_width(&src) > max_linewidth) { |
1175 | DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", | 953 | DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", |
1176 | DRM_RECT_ARG(&src), max_linewidth); | 954 | DRM_RECT_ARG(&src), max_linewidth); |
1177 | ret = -E2BIG; | 955 | return -E2BIG; |
1178 | |||
1179 | /* check scaler capability */ | ||
1180 | } else if (hscale < 0 || vscale < 0) { | ||
1181 | DPU_ERROR_PLANE(pdpu, "invalid scaling requested src=" | ||
1182 | DRM_RECT_FMT " dst=" DRM_RECT_FMT "\n", | ||
1183 | DRM_RECT_ARG(&src), DRM_RECT_ARG(&dst)); | ||
1184 | ret = -E2BIG; | ||
1185 | } | 956 | } |
1186 | 957 | ||
1187 | exit: | 958 | return 0; |
1188 | return ret; | ||
1189 | } | ||
1190 | |||
1191 | static int dpu_plane_atomic_check(struct drm_plane *plane, | ||
1192 | struct drm_plane_state *state) | ||
1193 | { | ||
1194 | if (!state->fb) | ||
1195 | return 0; | ||
1196 | |||
1197 | DPU_DEBUG_PLANE(to_dpu_plane(plane), "\n"); | ||
1198 | |||
1199 | return dpu_plane_sspp_atomic_check(plane, state); | ||
1200 | } | 959 | } |
1201 | 960 | ||
1202 | void dpu_plane_flush(struct drm_plane *plane) | 961 | void dpu_plane_flush(struct drm_plane *plane) |
@@ -1245,46 +1004,16 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error) | |||
1245 | pdpu->is_error = error; | 1004 | pdpu->is_error = error; |
1246 | } | 1005 | } |
1247 | 1006 | ||
1248 | static int dpu_plane_sspp_atomic_update(struct drm_plane *plane, | 1007 | static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) |
1249 | struct drm_plane_state *old_state) | ||
1250 | { | 1008 | { |
1251 | uint32_t nplanes, src_flags; | 1009 | uint32_t src_flags; |
1252 | struct dpu_plane *pdpu; | 1010 | struct dpu_plane *pdpu = to_dpu_plane(plane); |
1253 | struct drm_plane_state *state; | 1011 | struct drm_plane_state *state = plane->state; |
1254 | struct dpu_plane_state *pstate; | 1012 | struct dpu_plane_state *pstate = to_dpu_plane_state(state); |
1255 | struct dpu_plane_state *old_pstate; | 1013 | struct drm_crtc *crtc = state->crtc; |
1256 | const struct dpu_format *fmt; | 1014 | struct drm_framebuffer *fb = state->fb; |
1257 | struct drm_crtc *crtc; | 1015 | const struct dpu_format *fmt = |
1258 | struct drm_framebuffer *fb; | 1016 | to_dpu_format(msm_framebuffer_format(fb)); |
1259 | struct drm_rect src, dst; | ||
1260 | |||
1261 | if (!plane) { | ||
1262 | DPU_ERROR("invalid plane\n"); | ||
1263 | return -EINVAL; | ||
1264 | } else if (!plane->state) { | ||
1265 | DPU_ERROR("invalid plane state\n"); | ||
1266 | return -EINVAL; | ||
1267 | } else if (!old_state) { | ||
1268 | DPU_ERROR("invalid old state\n"); | ||
1269 | return -EINVAL; | ||
1270 | } | ||
1271 | |||
1272 | pdpu = to_dpu_plane(plane); | ||
1273 | state = plane->state; | ||
1274 | |||
1275 | pstate = to_dpu_plane_state(state); | ||
1276 | |||
1277 | old_pstate = to_dpu_plane_state(old_state); | ||
1278 | |||
1279 | crtc = state->crtc; | ||
1280 | fb = state->fb; | ||
1281 | if (!crtc || !fb) { | ||
1282 | DPU_ERROR_PLANE(pdpu, "invalid crtc %d or fb %d\n", | ||
1283 | crtc != 0, fb != 0); | ||
1284 | return -EINVAL; | ||
1285 | } | ||
1286 | fmt = to_dpu_format(msm_framebuffer_format(fb)); | ||
1287 | nplanes = fmt->num_planes; | ||
1288 | 1017 | ||
1289 | memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg)); | 1018 | memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg)); |
1290 | 1019 | ||
@@ -1295,28 +1024,27 @@ static int dpu_plane_sspp_atomic_update(struct drm_plane *plane, | |||
1295 | pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT); | 1024 | pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT); |
1296 | _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL); | 1025 | _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL); |
1297 | 1026 | ||
1298 | src.x1 = state->src_x >> 16; | 1027 | DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT |
1299 | src.y1 = state->src_y >> 16; | 1028 | ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src), |
1300 | src.x2 = src.x1 + (state->src_w >> 16); | 1029 | crtc->base.id, DRM_RECT_ARG(&state->dst), |
1301 | src.y2 = src.y1 + (state->src_h >> 16); | 1030 | (char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt)); |
1302 | 1031 | ||
1303 | dst = drm_plane_state_dest(state); | 1032 | pdpu->pipe_cfg.src_rect = state->src; |
1304 | 1033 | ||
1305 | DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FMT "->crtc%u " DRM_RECT_FMT | 1034 | /* state->src is 16.16, src_rect is not */ |
1306 | ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_ARG(&src), | 1035 | pdpu->pipe_cfg.src_rect.x1 >>= 16; |
1307 | crtc->base.id, DRM_RECT_ARG(&dst), | 1036 | pdpu->pipe_cfg.src_rect.x2 >>= 16; |
1308 | (char *)&fmt->base.pixel_format, | 1037 | pdpu->pipe_cfg.src_rect.y1 >>= 16; |
1309 | DPU_FORMAT_IS_UBWC(fmt)); | 1038 | pdpu->pipe_cfg.src_rect.y2 >>= 16; |
1310 | 1039 | ||
1311 | pdpu->pipe_cfg.src_rect = src; | 1040 | pdpu->pipe_cfg.dst_rect = state->dst; |
1312 | pdpu->pipe_cfg.dst_rect = dst; | ||
1313 | 1041 | ||
1314 | _dpu_plane_setup_scaler(pdpu, pstate, fmt, false); | 1042 | _dpu_plane_setup_scaler(pdpu, pstate, fmt, false); |
1315 | 1043 | ||
1316 | /* override for color fill */ | 1044 | /* override for color fill */ |
1317 | if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) { | 1045 | if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) { |
1318 | /* skip remaining processing on color fill */ | 1046 | /* skip remaining processing on color fill */ |
1319 | return 0; | 1047 | return; |
1320 | } | 1048 | } |
1321 | 1049 | ||
1322 | if (pdpu->pipe_hw->ops.setup_rects) { | 1050 | if (pdpu->pipe_hw->ops.setup_rects) { |
@@ -1387,30 +1115,13 @@ static int dpu_plane_sspp_atomic_update(struct drm_plane *plane, | |||
1387 | } | 1115 | } |
1388 | 1116 | ||
1389 | _dpu_plane_set_qos_remap(plane); | 1117 | _dpu_plane_set_qos_remap(plane); |
1390 | return 0; | ||
1391 | } | 1118 | } |
1392 | 1119 | ||
1393 | static void _dpu_plane_atomic_disable(struct drm_plane *plane, | 1120 | static void _dpu_plane_atomic_disable(struct drm_plane *plane) |
1394 | struct drm_plane_state *old_state) | ||
1395 | { | 1121 | { |
1396 | struct dpu_plane *pdpu; | 1122 | struct dpu_plane *pdpu = to_dpu_plane(plane); |
1397 | struct drm_plane_state *state; | 1123 | struct drm_plane_state *state = plane->state; |
1398 | struct dpu_plane_state *pstate; | 1124 | struct dpu_plane_state *pstate = to_dpu_plane_state(state); |
1399 | |||
1400 | if (!plane) { | ||
1401 | DPU_ERROR("invalid plane\n"); | ||
1402 | return; | ||
1403 | } else if (!plane->state) { | ||
1404 | DPU_ERROR("invalid plane state\n"); | ||
1405 | return; | ||
1406 | } else if (!old_state) { | ||
1407 | DPU_ERROR("invalid old state\n"); | ||
1408 | return; | ||
1409 | } | ||
1410 | |||
1411 | pdpu = to_dpu_plane(plane); | ||
1412 | state = plane->state; | ||
1413 | pstate = to_dpu_plane_state(state); | ||
1414 | 1125 | ||
1415 | trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane), | 1126 | trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane), |
1416 | pstate->multirect_mode); | 1127 | pstate->multirect_mode); |
@@ -1426,31 +1137,17 @@ static void _dpu_plane_atomic_disable(struct drm_plane *plane, | |||
1426 | static void dpu_plane_atomic_update(struct drm_plane *plane, | 1137 | static void dpu_plane_atomic_update(struct drm_plane *plane, |
1427 | struct drm_plane_state *old_state) | 1138 | struct drm_plane_state *old_state) |
1428 | { | 1139 | { |
1429 | struct dpu_plane *pdpu; | 1140 | struct dpu_plane *pdpu = to_dpu_plane(plane); |
1430 | struct drm_plane_state *state; | 1141 | struct drm_plane_state *state = plane->state; |
1431 | |||
1432 | if (!plane) { | ||
1433 | DPU_ERROR("invalid plane\n"); | ||
1434 | return; | ||
1435 | } else if (!plane->state) { | ||
1436 | DPU_ERROR("invalid plane state\n"); | ||
1437 | return; | ||
1438 | } | ||
1439 | 1142 | ||
1440 | pdpu = to_dpu_plane(plane); | ||
1441 | pdpu->is_error = false; | 1143 | pdpu->is_error = false; |
1442 | state = plane->state; | ||
1443 | 1144 | ||
1444 | DPU_DEBUG_PLANE(pdpu, "\n"); | 1145 | DPU_DEBUG_PLANE(pdpu, "\n"); |
1445 | 1146 | ||
1446 | if (!dpu_plane_sspp_enabled(state)) { | 1147 | if (!state->visible) { |
1447 | _dpu_plane_atomic_disable(plane, old_state); | 1148 | _dpu_plane_atomic_disable(plane); |
1448 | } else { | 1149 | } else { |
1449 | int ret; | 1150 | dpu_plane_sspp_atomic_update(plane); |
1450 | |||
1451 | ret = dpu_plane_sspp_atomic_update(plane, old_state); | ||
1452 | /* atomic_check should have ensured that this doesn't fail */ | ||
1453 | WARN_ON(ret < 0); | ||
1454 | } | 1151 | } |
1455 | } | 1152 | } |
1456 | 1153 | ||
@@ -1487,8 +1184,7 @@ static void dpu_plane_destroy(struct drm_plane *plane) | |||
1487 | /* this will destroy the states as well */ | 1184 | /* this will destroy the states as well */ |
1488 | drm_plane_cleanup(plane); | 1185 | drm_plane_cleanup(plane); |
1489 | 1186 | ||
1490 | if (pdpu->pipe_hw) | 1187 | dpu_hw_sspp_destroy(pdpu->pipe_hw); |
1491 | dpu_hw_sspp_destroy(pdpu->pipe_hw); | ||
1492 | 1188 | ||
1493 | kfree(pdpu); | 1189 | kfree(pdpu); |
1494 | } | 1190 | } |
@@ -1507,9 +1203,7 @@ static void dpu_plane_destroy_state(struct drm_plane *plane, | |||
1507 | 1203 | ||
1508 | pstate = to_dpu_plane_state(state); | 1204 | pstate = to_dpu_plane_state(state); |
1509 | 1205 | ||
1510 | /* remove ref count for frame buffers */ | 1206 | __drm_atomic_helper_plane_destroy_state(state); |
1511 | if (state->fb) | ||
1512 | drm_framebuffer_put(state->fb); | ||
1513 | 1207 | ||
1514 | kfree(pstate); | 1208 | kfree(pstate); |
1515 | } | 1209 | } |
@@ -1829,40 +1523,17 @@ bool is_dpu_plane_virtual(struct drm_plane *plane) | |||
1829 | 1523 | ||
1830 | /* initialize plane */ | 1524 | /* initialize plane */ |
1831 | struct drm_plane *dpu_plane_init(struct drm_device *dev, | 1525 | struct drm_plane *dpu_plane_init(struct drm_device *dev, |
1832 | uint32_t pipe, bool primary_plane, | 1526 | uint32_t pipe, enum drm_plane_type type, |
1833 | unsigned long possible_crtcs, u32 master_plane_id) | 1527 | unsigned long possible_crtcs, u32 master_plane_id) |
1834 | { | 1528 | { |
1835 | struct drm_plane *plane = NULL, *master_plane = NULL; | 1529 | struct drm_plane *plane = NULL, *master_plane = NULL; |
1836 | const struct dpu_format_extended *format_list; | 1530 | const struct dpu_format_extended *format_list; |
1837 | struct dpu_plane *pdpu; | 1531 | struct dpu_plane *pdpu; |
1838 | struct msm_drm_private *priv; | 1532 | struct msm_drm_private *priv = dev->dev_private; |
1839 | struct dpu_kms *kms; | 1533 | struct dpu_kms *kms = to_dpu_kms(priv->kms); |
1840 | enum drm_plane_type type; | ||
1841 | int zpos_max = DPU_ZPOS_MAX; | 1534 | int zpos_max = DPU_ZPOS_MAX; |
1842 | int ret = -EINVAL; | 1535 | int ret = -EINVAL; |
1843 | 1536 | ||
1844 | if (!dev) { | ||
1845 | DPU_ERROR("[%u]device is NULL\n", pipe); | ||
1846 | goto exit; | ||
1847 | } | ||
1848 | |||
1849 | priv = dev->dev_private; | ||
1850 | if (!priv) { | ||
1851 | DPU_ERROR("[%u]private data is NULL\n", pipe); | ||
1852 | goto exit; | ||
1853 | } | ||
1854 | |||
1855 | if (!priv->kms) { | ||
1856 | DPU_ERROR("[%u]invalid KMS reference\n", pipe); | ||
1857 | goto exit; | ||
1858 | } | ||
1859 | kms = to_dpu_kms(priv->kms); | ||
1860 | |||
1861 | if (!kms->catalog) { | ||
1862 | DPU_ERROR("[%u]invalid catalog reference\n", pipe); | ||
1863 | goto exit; | ||
1864 | } | ||
1865 | |||
1866 | /* create and zero local structure */ | 1537 | /* create and zero local structure */ |
1867 | pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL); | 1538 | pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL); |
1868 | if (!pdpu) { | 1539 | if (!pdpu) { |
@@ -1918,12 +1589,6 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev, | |||
1918 | goto clean_sspp; | 1589 | goto clean_sspp; |
1919 | } | 1590 | } |
1920 | 1591 | ||
1921 | if (pdpu->features & BIT(DPU_SSPP_CURSOR)) | ||
1922 | type = DRM_PLANE_TYPE_CURSOR; | ||
1923 | else if (primary_plane) | ||
1924 | type = DRM_PLANE_TYPE_PRIMARY; | ||
1925 | else | ||
1926 | type = DRM_PLANE_TYPE_OVERLAY; | ||
1927 | ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs, | 1592 | ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs, |
1928 | pdpu->formats, pdpu->nformats, | 1593 | pdpu->formats, pdpu->nformats, |
1929 | NULL, type, NULL); | 1594 | NULL, type, NULL); |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h index f6fe6ddc7a3a..7fed0b627708 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h | |||
@@ -122,7 +122,7 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error); | |||
122 | * dpu_plane_init - create new dpu plane for the given pipe | 122 | * dpu_plane_init - create new dpu plane for the given pipe |
123 | * @dev: Pointer to DRM device | 123 | * @dev: Pointer to DRM device |
124 | * @pipe: dpu hardware pipe identifier | 124 | * @pipe: dpu hardware pipe identifier |
125 | * @primary_plane: true if this pipe is primary plane for crtc | 125 | * @type: Plane type - PRIMARY/OVERLAY/CURSOR |
126 | * @possible_crtcs: bitmask of crtc that can be attached to the given pipe | 126 | * @possible_crtcs: bitmask of crtc that can be attached to the given pipe |
127 | * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for | 127 | * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for |
128 | * a regular plane initialization. A non-zero primary plane | 128 | * a regular plane initialization. A non-zero primary plane |
@@ -130,7 +130,7 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error); | |||
130 | * | 130 | * |
131 | */ | 131 | */ |
132 | struct drm_plane *dpu_plane_init(struct drm_device *dev, | 132 | struct drm_plane *dpu_plane_init(struct drm_device *dev, |
133 | uint32_t pipe, bool primary_plane, | 133 | uint32_t pipe, enum drm_plane_type type, |
134 | unsigned long possible_crtcs, u32 master_plane_id); | 134 | unsigned long possible_crtcs, u32 master_plane_id); |
135 | 135 | ||
136 | /** | 136 | /** |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c index a75eebca2f37..fc14116789f2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c | |||
@@ -145,6 +145,7 @@ int dpu_power_resource_enable(struct dpu_power_handle *phandle, | |||
145 | bool changed = false; | 145 | bool changed = false; |
146 | u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx; | 146 | u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx; |
147 | struct dpu_power_client *client; | 147 | struct dpu_power_client *client; |
148 | u32 event_type; | ||
148 | 149 | ||
149 | if (!phandle || !pclient) { | 150 | if (!phandle || !pclient) { |
150 | pr_err("invalid input argument\n"); | 151 | pr_err("invalid input argument\n"); |
@@ -181,19 +182,9 @@ int dpu_power_resource_enable(struct dpu_power_handle *phandle, | |||
181 | if (!changed) | 182 | if (!changed) |
182 | goto end; | 183 | goto end; |
183 | 184 | ||
184 | if (enable) { | 185 | event_type = enable ? DPU_POWER_EVENT_ENABLE : DPU_POWER_EVENT_DISABLE; |
185 | dpu_power_event_trigger_locked(phandle, | ||
186 | DPU_POWER_EVENT_PRE_ENABLE); | ||
187 | dpu_power_event_trigger_locked(phandle, | ||
188 | DPU_POWER_EVENT_POST_ENABLE); | ||
189 | |||
190 | } else { | ||
191 | dpu_power_event_trigger_locked(phandle, | ||
192 | DPU_POWER_EVENT_PRE_DISABLE); | ||
193 | dpu_power_event_trigger_locked(phandle, | ||
194 | DPU_POWER_EVENT_POST_DISABLE); | ||
195 | } | ||
196 | 186 | ||
187 | dpu_power_event_trigger_locked(phandle, event_type); | ||
197 | end: | 188 | end: |
198 | mutex_unlock(&phandle->phandle_lock); | 189 | mutex_unlock(&phandle->phandle_lock); |
199 | return 0; | 190 | return 0; |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h index 344f74464eca..a65b7a297f21 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h | |||
@@ -23,17 +23,9 @@ | |||
23 | 23 | ||
24 | #include "dpu_io_util.h" | 24 | #include "dpu_io_util.h" |
25 | 25 | ||
26 | /* event will be triggered before power handler disable */ | 26 | /* events will be triggered on power handler enable/disable */ |
27 | #define DPU_POWER_EVENT_PRE_DISABLE 0x1 | 27 | #define DPU_POWER_EVENT_DISABLE BIT(0) |
28 | 28 | #define DPU_POWER_EVENT_ENABLE BIT(1) | |
29 | /* event will be triggered after power handler disable */ | ||
30 | #define DPU_POWER_EVENT_POST_DISABLE 0x2 | ||
31 | |||
32 | /* event will be triggered before power handler enable */ | ||
33 | #define DPU_POWER_EVENT_PRE_ENABLE 0x4 | ||
34 | |||
35 | /* event will be triggered after power handler enable */ | ||
36 | #define DPU_POWER_EVENT_POST_ENABLE 0x8 | ||
37 | 29 | ||
38 | /** | 30 | /** |
39 | * mdss_bus_vote_type: register bus vote type | 31 | * mdss_bus_vote_type: register bus vote type |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 13c0a36d4ef9..bdb117709674 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include "dpu_kms.h" | 16 | #include "dpu_kms.h" |
17 | #include "dpu_hw_lm.h" | 17 | #include "dpu_hw_lm.h" |
18 | #include "dpu_hw_ctl.h" | 18 | #include "dpu_hw_ctl.h" |
19 | #include "dpu_hw_cdm.h" | ||
20 | #include "dpu_hw_pingpong.h" | 19 | #include "dpu_hw_pingpong.h" |
21 | #include "dpu_hw_intf.h" | 20 | #include "dpu_hw_intf.h" |
22 | #include "dpu_encoder.h" | 21 | #include "dpu_encoder.h" |
@@ -25,38 +24,13 @@ | |||
25 | #define RESERVED_BY_OTHER(h, r) \ | 24 | #define RESERVED_BY_OTHER(h, r) \ |
26 | ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id)) | 25 | ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id)) |
27 | 26 | ||
28 | #define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_LOCK)) | ||
29 | #define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_CLEAR)) | ||
30 | #define RM_RQ_DS(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_DS)) | ||
31 | #define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \ | ||
32 | (t).num_comp_enc == (r).num_enc && \ | ||
33 | (t).num_intf == (r).num_intf) | ||
34 | |||
35 | struct dpu_rm_topology_def { | ||
36 | enum dpu_rm_topology_name top_name; | ||
37 | int num_lm; | ||
38 | int num_comp_enc; | ||
39 | int num_intf; | ||
40 | int num_ctl; | ||
41 | int needs_split_display; | ||
42 | }; | ||
43 | |||
44 | static const struct dpu_rm_topology_def g_top_table[] = { | ||
45 | { DPU_RM_TOPOLOGY_NONE, 0, 0, 0, 0, false }, | ||
46 | { DPU_RM_TOPOLOGY_SINGLEPIPE, 1, 0, 1, 1, false }, | ||
47 | { DPU_RM_TOPOLOGY_DUALPIPE, 2, 0, 2, 2, true }, | ||
48 | { DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE, 2, 0, 1, 1, false }, | ||
49 | }; | ||
50 | |||
51 | /** | 27 | /** |
52 | * struct dpu_rm_requirements - Reservation requirements parameter bundle | 28 | * struct dpu_rm_requirements - Reservation requirements parameter bundle |
53 | * @top_ctrl: topology control preference from kernel client | 29 | * @topology: selected topology for the display |
54 | * @top: selected topology for the display | ||
55 | * @hw_res: Hardware resources required as reported by the encoders | 30 | * @hw_res: Hardware resources required as reported by the encoders |
56 | */ | 31 | */ |
57 | struct dpu_rm_requirements { | 32 | struct dpu_rm_requirements { |
58 | uint64_t top_ctrl; | 33 | struct msm_display_topology topology; |
59 | const struct dpu_rm_topology_def *topology; | ||
60 | struct dpu_encoder_hw_resources hw_res; | 34 | struct dpu_encoder_hw_resources hw_res; |
61 | }; | 35 | }; |
62 | 36 | ||
@@ -72,13 +46,11 @@ struct dpu_rm_requirements { | |||
72 | * @enc_id: Reservations are tracked by Encoder DRM object ID. | 46 | * @enc_id: Reservations are tracked by Encoder DRM object ID. |
73 | * CRTCs may be connected to multiple Encoders. | 47 | * CRTCs may be connected to multiple Encoders. |
74 | * An encoder or connector id identifies the display path. | 48 | * An encoder or connector id identifies the display path. |
75 | * @topology DRM<->HW topology use case | ||
76 | */ | 49 | */ |
77 | struct dpu_rm_rsvp { | 50 | struct dpu_rm_rsvp { |
78 | struct list_head list; | 51 | struct list_head list; |
79 | uint32_t seq; | 52 | uint32_t seq; |
80 | uint32_t enc_id; | 53 | uint32_t enc_id; |
81 | enum dpu_rm_topology_name topology; | ||
82 | }; | 54 | }; |
83 | 55 | ||
84 | /** | 56 | /** |
@@ -122,8 +94,8 @@ static void _dpu_rm_print_rsvps( | |||
122 | DPU_DEBUG("%d\n", stage); | 94 | DPU_DEBUG("%d\n", stage); |
123 | 95 | ||
124 | list_for_each_entry(rsvp, &rm->rsvps, list) { | 96 | list_for_each_entry(rsvp, &rm->rsvps, list) { |
125 | DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq, | 97 | DRM_DEBUG_KMS("%d rsvp[s%ue%u]\n", stage, rsvp->seq, |
126 | rsvp->enc_id, rsvp->topology); | 98 | rsvp->enc_id); |
127 | } | 99 | } |
128 | 100 | ||
129 | for (type = 0; type < DPU_HW_BLK_MAX; type++) { | 101 | for (type = 0; type < DPU_HW_BLK_MAX; type++) { |
@@ -146,18 +118,6 @@ struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm) | |||
146 | return rm->hw_mdp; | 118 | return rm->hw_mdp; |
147 | } | 119 | } |
148 | 120 | ||
149 | enum dpu_rm_topology_name | ||
150 | dpu_rm_get_topology_name(struct msm_display_topology topology) | ||
151 | { | ||
152 | int i; | ||
153 | |||
154 | for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) | ||
155 | if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology)) | ||
156 | return g_top_table[i].top_name; | ||
157 | |||
158 | return DPU_RM_TOPOLOGY_NONE; | ||
159 | } | ||
160 | |||
161 | void dpu_rm_init_hw_iter( | 121 | void dpu_rm_init_hw_iter( |
162 | struct dpu_rm_hw_iter *iter, | 122 | struct dpu_rm_hw_iter *iter, |
163 | uint32_t enc_id, | 123 | uint32_t enc_id, |
@@ -229,9 +189,6 @@ static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw) | |||
229 | case DPU_HW_BLK_CTL: | 189 | case DPU_HW_BLK_CTL: |
230 | dpu_hw_ctl_destroy(hw); | 190 | dpu_hw_ctl_destroy(hw); |
231 | break; | 191 | break; |
232 | case DPU_HW_BLK_CDM: | ||
233 | dpu_hw_cdm_destroy(hw); | ||
234 | break; | ||
235 | case DPU_HW_BLK_PINGPONG: | 192 | case DPU_HW_BLK_PINGPONG: |
236 | dpu_hw_pingpong_destroy(hw); | 193 | dpu_hw_pingpong_destroy(hw); |
237 | break; | 194 | break; |
@@ -305,9 +262,6 @@ static int _dpu_rm_hw_blk_create( | |||
305 | case DPU_HW_BLK_CTL: | 262 | case DPU_HW_BLK_CTL: |
306 | hw = dpu_hw_ctl_init(id, mmio, cat); | 263 | hw = dpu_hw_ctl_init(id, mmio, cat); |
307 | break; | 264 | break; |
308 | case DPU_HW_BLK_CDM: | ||
309 | hw = dpu_hw_cdm_init(id, mmio, cat, hw_mdp); | ||
310 | break; | ||
311 | case DPU_HW_BLK_PINGPONG: | 265 | case DPU_HW_BLK_PINGPONG: |
312 | hw = dpu_hw_pingpong_init(id, mmio, cat); | 266 | hw = dpu_hw_pingpong_init(id, mmio, cat); |
313 | break; | 267 | break; |
@@ -438,15 +392,6 @@ int dpu_rm_init(struct dpu_rm *rm, | |||
438 | } | 392 | } |
439 | } | 393 | } |
440 | 394 | ||
441 | for (i = 0; i < cat->cdm_count; i++) { | ||
442 | rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CDM, | ||
443 | cat->cdm[i].id, &cat->cdm[i]); | ||
444 | if (rc) { | ||
445 | DPU_ERROR("failed: cdm hw not available\n"); | ||
446 | goto fail; | ||
447 | } | ||
448 | } | ||
449 | |||
450 | return 0; | 395 | return 0; |
451 | 396 | ||
452 | fail: | 397 | fail: |
@@ -455,6 +400,11 @@ fail: | |||
455 | return rc; | 400 | return rc; |
456 | } | 401 | } |
457 | 402 | ||
403 | static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) | ||
404 | { | ||
405 | return top->num_intf > 1; | ||
406 | } | ||
407 | |||
458 | /** | 408 | /** |
459 | * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets | 409 | * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets |
460 | * proposed use case requirements, incl. hardwired dependent blocks like | 410 | * proposed use case requirements, incl. hardwired dependent blocks like |
@@ -538,14 +488,14 @@ static int _dpu_rm_reserve_lms( | |||
538 | int lm_count = 0; | 488 | int lm_count = 0; |
539 | int i, rc = 0; | 489 | int i, rc = 0; |
540 | 490 | ||
541 | if (!reqs->topology->num_lm) { | 491 | if (!reqs->topology.num_lm) { |
542 | DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm); | 492 | DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm); |
543 | return -EINVAL; | 493 | return -EINVAL; |
544 | } | 494 | } |
545 | 495 | ||
546 | /* Find a primary mixer */ | 496 | /* Find a primary mixer */ |
547 | dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM); | 497 | dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM); |
548 | while (lm_count != reqs->topology->num_lm && | 498 | while (lm_count != reqs->topology.num_lm && |
549 | _dpu_rm_get_hw_locked(rm, &iter_i)) { | 499 | _dpu_rm_get_hw_locked(rm, &iter_i)) { |
550 | memset(&lm, 0, sizeof(lm)); | 500 | memset(&lm, 0, sizeof(lm)); |
551 | memset(&pp, 0, sizeof(pp)); | 501 | memset(&pp, 0, sizeof(pp)); |
@@ -563,7 +513,7 @@ static int _dpu_rm_reserve_lms( | |||
563 | /* Valid primary mixer found, find matching peers */ | 513 | /* Valid primary mixer found, find matching peers */ |
564 | dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM); | 514 | dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM); |
565 | 515 | ||
566 | while (lm_count != reqs->topology->num_lm && | 516 | while (lm_count != reqs->topology.num_lm && |
567 | _dpu_rm_get_hw_locked(rm, &iter_j)) { | 517 | _dpu_rm_get_hw_locked(rm, &iter_j)) { |
568 | if (iter_i.blk == iter_j.blk) | 518 | if (iter_i.blk == iter_j.blk) |
569 | continue; | 519 | continue; |
@@ -578,7 +528,7 @@ static int _dpu_rm_reserve_lms( | |||
578 | } | 528 | } |
579 | } | 529 | } |
580 | 530 | ||
581 | if (lm_count != reqs->topology->num_lm) { | 531 | if (lm_count != reqs->topology.num_lm) { |
582 | DPU_DEBUG("unable to find appropriate mixers\n"); | 532 | DPU_DEBUG("unable to find appropriate mixers\n"); |
583 | return -ENAVAIL; | 533 | return -ENAVAIL; |
584 | } | 534 | } |
@@ -600,14 +550,20 @@ static int _dpu_rm_reserve_lms( | |||
600 | static int _dpu_rm_reserve_ctls( | 550 | static int _dpu_rm_reserve_ctls( |
601 | struct dpu_rm *rm, | 551 | struct dpu_rm *rm, |
602 | struct dpu_rm_rsvp *rsvp, | 552 | struct dpu_rm_rsvp *rsvp, |
603 | const struct dpu_rm_topology_def *top) | 553 | const struct msm_display_topology *top) |
604 | { | 554 | { |
605 | struct dpu_rm_hw_blk *ctls[MAX_BLOCKS]; | 555 | struct dpu_rm_hw_blk *ctls[MAX_BLOCKS]; |
606 | struct dpu_rm_hw_iter iter; | 556 | struct dpu_rm_hw_iter iter; |
607 | int i = 0; | 557 | int i = 0, num_ctls = 0; |
558 | bool needs_split_display = false; | ||
608 | 559 | ||
609 | memset(&ctls, 0, sizeof(ctls)); | 560 | memset(&ctls, 0, sizeof(ctls)); |
610 | 561 | ||
562 | /* each hw_intf needs its own hw_ctrl to program its control path */ | ||
563 | num_ctls = top->num_intf; | ||
564 | |||
565 | needs_split_display = _dpu_rm_needs_split_display(top); | ||
566 | |||
611 | dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL); | 567 | dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL); |
612 | while (_dpu_rm_get_hw_locked(rm, &iter)) { | 568 | while (_dpu_rm_get_hw_locked(rm, &iter)) { |
613 | const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw); | 569 | const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw); |
@@ -621,20 +577,20 @@ static int _dpu_rm_reserve_ctls( | |||
621 | 577 | ||
622 | DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features); | 578 | DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features); |
623 | 579 | ||
624 | if (top->needs_split_display != has_split_display) | 580 | if (needs_split_display != has_split_display) |
625 | continue; | 581 | continue; |
626 | 582 | ||
627 | ctls[i] = iter.blk; | 583 | ctls[i] = iter.blk; |
628 | DPU_DEBUG("ctl %d match\n", iter.blk->id); | 584 | DPU_DEBUG("ctl %d match\n", iter.blk->id); |
629 | 585 | ||
630 | if (++i == top->num_ctl) | 586 | if (++i == num_ctls) |
631 | break; | 587 | break; |
632 | } | 588 | } |
633 | 589 | ||
634 | if (i != top->num_ctl) | 590 | if (i != num_ctls) |
635 | return -ENAVAIL; | 591 | return -ENAVAIL; |
636 | 592 | ||
637 | for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) { | 593 | for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) { |
638 | ctls[i]->rsvp_nxt = rsvp; | 594 | ctls[i]->rsvp_nxt = rsvp; |
639 | trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type, | 595 | trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type, |
640 | rsvp->enc_id); | 596 | rsvp->enc_id); |
@@ -643,55 +599,11 @@ static int _dpu_rm_reserve_ctls( | |||
643 | return 0; | 599 | return 0; |
644 | } | 600 | } |
645 | 601 | ||
646 | static int _dpu_rm_reserve_cdm( | ||
647 | struct dpu_rm *rm, | ||
648 | struct dpu_rm_rsvp *rsvp, | ||
649 | uint32_t id, | ||
650 | enum dpu_hw_blk_type type) | ||
651 | { | ||
652 | struct dpu_rm_hw_iter iter; | ||
653 | |||
654 | DRM_DEBUG_KMS("type %d id %d\n", type, id); | ||
655 | |||
656 | dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CDM); | ||
657 | while (_dpu_rm_get_hw_locked(rm, &iter)) { | ||
658 | const struct dpu_hw_cdm *cdm = to_dpu_hw_cdm(iter.blk->hw); | ||
659 | const struct dpu_cdm_cfg *caps = cdm->caps; | ||
660 | bool match = false; | ||
661 | |||
662 | if (RESERVED_BY_OTHER(iter.blk, rsvp)) | ||
663 | continue; | ||
664 | |||
665 | if (type == DPU_HW_BLK_INTF && id != INTF_MAX) | ||
666 | match = test_bit(id, &caps->intf_connect); | ||
667 | |||
668 | DRM_DEBUG_KMS("iter: type:%d id:%d enc:%d cdm:%lu match:%d\n", | ||
669 | iter.blk->type, iter.blk->id, rsvp->enc_id, | ||
670 | caps->intf_connect, match); | ||
671 | |||
672 | if (!match) | ||
673 | continue; | ||
674 | |||
675 | trace_dpu_rm_reserve_cdm(iter.blk->id, iter.blk->type, | ||
676 | rsvp->enc_id); | ||
677 | iter.blk->rsvp_nxt = rsvp; | ||
678 | break; | ||
679 | } | ||
680 | |||
681 | if (!iter.hw) { | ||
682 | DPU_ERROR("couldn't reserve cdm for type %d id %d\n", type, id); | ||
683 | return -ENAVAIL; | ||
684 | } | ||
685 | |||
686 | return 0; | ||
687 | } | ||
688 | |||
689 | static int _dpu_rm_reserve_intf( | 602 | static int _dpu_rm_reserve_intf( |
690 | struct dpu_rm *rm, | 603 | struct dpu_rm *rm, |
691 | struct dpu_rm_rsvp *rsvp, | 604 | struct dpu_rm_rsvp *rsvp, |
692 | uint32_t id, | 605 | uint32_t id, |
693 | enum dpu_hw_blk_type type, | 606 | enum dpu_hw_blk_type type) |
694 | bool needs_cdm) | ||
695 | { | 607 | { |
696 | struct dpu_rm_hw_iter iter; | 608 | struct dpu_rm_hw_iter iter; |
697 | int ret = 0; | 609 | int ret = 0; |
@@ -719,9 +631,6 @@ static int _dpu_rm_reserve_intf( | |||
719 | return -EINVAL; | 631 | return -EINVAL; |
720 | } | 632 | } |
721 | 633 | ||
722 | if (needs_cdm) | ||
723 | ret = _dpu_rm_reserve_cdm(rm, rsvp, id, type); | ||
724 | |||
725 | return ret; | 634 | return ret; |
726 | } | 635 | } |
727 | 636 | ||
@@ -738,7 +647,7 @@ static int _dpu_rm_reserve_intf_related_hw( | |||
738 | continue; | 647 | continue; |
739 | id = i + INTF_0; | 648 | id = i + INTF_0; |
740 | ret = _dpu_rm_reserve_intf(rm, rsvp, id, | 649 | ret = _dpu_rm_reserve_intf(rm, rsvp, id, |
741 | DPU_HW_BLK_INTF, hw_res->needs_cdm); | 650 | DPU_HW_BLK_INTF); |
742 | if (ret) | 651 | if (ret) |
743 | return ret; | 652 | return ret; |
744 | } | 653 | } |
@@ -750,17 +659,14 @@ static int _dpu_rm_make_next_rsvp( | |||
750 | struct dpu_rm *rm, | 659 | struct dpu_rm *rm, |
751 | struct drm_encoder *enc, | 660 | struct drm_encoder *enc, |
752 | struct drm_crtc_state *crtc_state, | 661 | struct drm_crtc_state *crtc_state, |
753 | struct drm_connector_state *conn_state, | ||
754 | struct dpu_rm_rsvp *rsvp, | 662 | struct dpu_rm_rsvp *rsvp, |
755 | struct dpu_rm_requirements *reqs) | 663 | struct dpu_rm_requirements *reqs) |
756 | { | 664 | { |
757 | int ret; | 665 | int ret; |
758 | struct dpu_rm_topology_def topology; | ||
759 | 666 | ||
760 | /* Create reservation info, tag reserved blocks with it as we go */ | 667 | /* Create reservation info, tag reserved blocks with it as we go */ |
761 | rsvp->seq = ++rm->rsvp_next_seq; | 668 | rsvp->seq = ++rm->rsvp_next_seq; |
762 | rsvp->enc_id = enc->base.id; | 669 | rsvp->enc_id = enc->base.id; |
763 | rsvp->topology = reqs->topology->top_name; | ||
764 | list_add_tail(&rsvp->list, &rm->rsvps); | 670 | list_add_tail(&rsvp->list, &rm->rsvps); |
765 | 671 | ||
766 | ret = _dpu_rm_reserve_lms(rm, rsvp, reqs); | 672 | ret = _dpu_rm_reserve_lms(rm, rsvp, reqs); |
@@ -769,23 +675,12 @@ static int _dpu_rm_make_next_rsvp( | |||
769 | return ret; | 675 | return ret; |
770 | } | 676 | } |
771 | 677 | ||
772 | /* | 678 | ret = _dpu_rm_reserve_ctls(rm, rsvp, &reqs->topology); |
773 | * Do assignment preferring to give away low-resource CTLs first: | ||
774 | * - Check mixers without Split Display | ||
775 | * - Only then allow to grab from CTLs with split display capability | ||
776 | */ | ||
777 | _dpu_rm_reserve_ctls(rm, rsvp, reqs->topology); | ||
778 | if (ret && !reqs->topology->needs_split_display) { | ||
779 | memcpy(&topology, reqs->topology, sizeof(topology)); | ||
780 | topology.needs_split_display = true; | ||
781 | _dpu_rm_reserve_ctls(rm, rsvp, &topology); | ||
782 | } | ||
783 | if (ret) { | 679 | if (ret) { |
784 | DPU_ERROR("unable to find appropriate CTL\n"); | 680 | DPU_ERROR("unable to find appropriate CTL\n"); |
785 | return ret; | 681 | return ret; |
786 | } | 682 | } |
787 | 683 | ||
788 | /* Assign INTFs and blks whose usage is tied to them: CTL & CDM */ | ||
789 | ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res); | 684 | ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res); |
790 | if (ret) | 685 | if (ret) |
791 | return ret; | 686 | return ret; |
@@ -797,44 +692,16 @@ static int _dpu_rm_populate_requirements( | |||
797 | struct dpu_rm *rm, | 692 | struct dpu_rm *rm, |
798 | struct drm_encoder *enc, | 693 | struct drm_encoder *enc, |
799 | struct drm_crtc_state *crtc_state, | 694 | struct drm_crtc_state *crtc_state, |
800 | struct drm_connector_state *conn_state, | ||
801 | struct dpu_rm_requirements *reqs, | 695 | struct dpu_rm_requirements *reqs, |
802 | struct msm_display_topology req_topology) | 696 | struct msm_display_topology req_topology) |
803 | { | 697 | { |
804 | int i; | 698 | dpu_encoder_get_hw_resources(enc, &reqs->hw_res); |
805 | 699 | ||
806 | memset(reqs, 0, sizeof(*reqs)); | 700 | reqs->topology = req_topology; |
807 | 701 | ||
808 | dpu_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state); | 702 | DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n", |
809 | 703 | reqs->topology.num_lm, reqs->topology.num_enc, | |
810 | for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) { | 704 | reqs->topology.num_intf); |
811 | if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], | ||
812 | req_topology)) { | ||
813 | reqs->topology = &g_top_table[i]; | ||
814 | break; | ||
815 | } | ||
816 | } | ||
817 | |||
818 | if (!reqs->topology) { | ||
819 | DPU_ERROR("invalid topology for the display\n"); | ||
820 | return -EINVAL; | ||
821 | } | ||
822 | |||
823 | /** | ||
824 | * Set the requirement based on caps if not set from user space | ||
825 | * This will ensure to select LM tied with DS blocks | ||
826 | * Currently, DS blocks are tied with LM 0 and LM 1 (primary display) | ||
827 | */ | ||
828 | if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler && | ||
829 | conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI) | ||
830 | reqs->top_ctrl |= BIT(DPU_RM_TOPCTL_DS); | ||
831 | |||
832 | DRM_DEBUG_KMS("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl, | ||
833 | reqs->hw_res.display_num_of_h_tiles); | ||
834 | DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n", | ||
835 | reqs->topology->num_lm, reqs->topology->num_ctl, | ||
836 | reqs->topology->top_name, | ||
837 | reqs->topology->needs_split_display); | ||
838 | 705 | ||
839 | return 0; | 706 | return 0; |
840 | } | 707 | } |
@@ -860,29 +727,12 @@ static struct dpu_rm_rsvp *_dpu_rm_get_rsvp( | |||
860 | return NULL; | 727 | return NULL; |
861 | } | 728 | } |
862 | 729 | ||
863 | static struct drm_connector *_dpu_rm_get_connector( | ||
864 | struct drm_encoder *enc) | ||
865 | { | ||
866 | struct drm_connector *conn = NULL; | ||
867 | struct list_head *connector_list = | ||
868 | &enc->dev->mode_config.connector_list; | ||
869 | |||
870 | list_for_each_entry(conn, connector_list, head) | ||
871 | if (conn->encoder == enc) | ||
872 | return conn; | ||
873 | |||
874 | return NULL; | ||
875 | } | ||
876 | |||
877 | /** | 730 | /** |
878 | * _dpu_rm_release_rsvp - release resources and release a reservation | 731 | * _dpu_rm_release_rsvp - release resources and release a reservation |
879 | * @rm: KMS handle | 732 | * @rm: KMS handle |
880 | * @rsvp: RSVP pointer to release and release resources for | 733 | * @rsvp: RSVP pointer to release and release resources for |
881 | */ | 734 | */ |
882 | static void _dpu_rm_release_rsvp( | 735 | static void _dpu_rm_release_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp) |
883 | struct dpu_rm *rm, | ||
884 | struct dpu_rm_rsvp *rsvp, | ||
885 | struct drm_connector *conn) | ||
886 | { | 736 | { |
887 | struct dpu_rm_rsvp *rsvp_c, *rsvp_n; | 737 | struct dpu_rm_rsvp *rsvp_c, *rsvp_n; |
888 | struct dpu_rm_hw_blk *blk; | 738 | struct dpu_rm_hw_blk *blk; |
@@ -923,7 +773,6 @@ static void _dpu_rm_release_rsvp( | |||
923 | void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc) | 773 | void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc) |
924 | { | 774 | { |
925 | struct dpu_rm_rsvp *rsvp; | 775 | struct dpu_rm_rsvp *rsvp; |
926 | struct drm_connector *conn; | ||
927 | 776 | ||
928 | if (!rm || !enc) { | 777 | if (!rm || !enc) { |
929 | DPU_ERROR("invalid params\n"); | 778 | DPU_ERROR("invalid params\n"); |
@@ -938,25 +787,15 @@ void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc) | |||
938 | goto end; | 787 | goto end; |
939 | } | 788 | } |
940 | 789 | ||
941 | conn = _dpu_rm_get_connector(enc); | 790 | _dpu_rm_release_rsvp(rm, rsvp); |
942 | if (!conn) { | ||
943 | DPU_ERROR("failed to get connector for enc %d\n", enc->base.id); | ||
944 | goto end; | ||
945 | } | ||
946 | |||
947 | _dpu_rm_release_rsvp(rm, rsvp, conn); | ||
948 | end: | 791 | end: |
949 | mutex_unlock(&rm->rm_lock); | 792 | mutex_unlock(&rm->rm_lock); |
950 | } | 793 | } |
951 | 794 | ||
952 | static int _dpu_rm_commit_rsvp( | 795 | static void _dpu_rm_commit_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp) |
953 | struct dpu_rm *rm, | ||
954 | struct dpu_rm_rsvp *rsvp, | ||
955 | struct drm_connector_state *conn_state) | ||
956 | { | 796 | { |
957 | struct dpu_rm_hw_blk *blk; | 797 | struct dpu_rm_hw_blk *blk; |
958 | enum dpu_hw_blk_type type; | 798 | enum dpu_hw_blk_type type; |
959 | int ret = 0; | ||
960 | 799 | ||
961 | /* Swap next rsvp to be the active */ | 800 | /* Swap next rsvp to be the active */ |
962 | for (type = 0; type < DPU_HW_BLK_MAX; type++) { | 801 | for (type = 0; type < DPU_HW_BLK_MAX; type++) { |
@@ -967,19 +806,12 @@ static int _dpu_rm_commit_rsvp( | |||
967 | } | 806 | } |
968 | } | 807 | } |
969 | } | 808 | } |
970 | |||
971 | if (!ret) | ||
972 | DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id, | ||
973 | rsvp->topology); | ||
974 | |||
975 | return ret; | ||
976 | } | 809 | } |
977 | 810 | ||
978 | int dpu_rm_reserve( | 811 | int dpu_rm_reserve( |
979 | struct dpu_rm *rm, | 812 | struct dpu_rm *rm, |
980 | struct drm_encoder *enc, | 813 | struct drm_encoder *enc, |
981 | struct drm_crtc_state *crtc_state, | 814 | struct drm_crtc_state *crtc_state, |
982 | struct drm_connector_state *conn_state, | ||
983 | struct msm_display_topology topology, | 815 | struct msm_display_topology topology, |
984 | bool test_only) | 816 | bool test_only) |
985 | { | 817 | { |
@@ -987,25 +819,19 @@ int dpu_rm_reserve( | |||
987 | struct dpu_rm_requirements reqs; | 819 | struct dpu_rm_requirements reqs; |
988 | int ret; | 820 | int ret; |
989 | 821 | ||
990 | if (!rm || !enc || !crtc_state || !conn_state) { | ||
991 | DPU_ERROR("invalid arguments\n"); | ||
992 | return -EINVAL; | ||
993 | } | ||
994 | |||
995 | /* Check if this is just a page-flip */ | 822 | /* Check if this is just a page-flip */ |
996 | if (!drm_atomic_crtc_needs_modeset(crtc_state)) | 823 | if (!drm_atomic_crtc_needs_modeset(crtc_state)) |
997 | return 0; | 824 | return 0; |
998 | 825 | ||
999 | DRM_DEBUG_KMS("reserving hw for conn %d enc %d crtc %d test_only %d\n", | 826 | DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n", |
1000 | conn_state->connector->base.id, enc->base.id, | 827 | enc->base.id, crtc_state->crtc->base.id, test_only); |
1001 | crtc_state->crtc->base.id, test_only); | ||
1002 | 828 | ||
1003 | mutex_lock(&rm->rm_lock); | 829 | mutex_lock(&rm->rm_lock); |
1004 | 830 | ||
1005 | _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN); | 831 | _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN); |
1006 | 832 | ||
1007 | ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, | 833 | ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs, |
1008 | conn_state, &reqs, topology); | 834 | topology); |
1009 | if (ret) { | 835 | if (ret) { |
1010 | DPU_ERROR("failed to populate hw requirements\n"); | 836 | DPU_ERROR("failed to populate hw requirements\n"); |
1011 | goto end; | 837 | goto end; |
@@ -1030,28 +856,15 @@ int dpu_rm_reserve( | |||
1030 | 856 | ||
1031 | rsvp_cur = _dpu_rm_get_rsvp(rm, enc); | 857 | rsvp_cur = _dpu_rm_get_rsvp(rm, enc); |
1032 | 858 | ||
1033 | /* | ||
1034 | * User can request that we clear out any reservation during the | ||
1035 | * atomic_check phase by using this CLEAR bit | ||
1036 | */ | ||
1037 | if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) { | ||
1038 | DPU_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n", | ||
1039 | rsvp_cur->seq, rsvp_cur->enc_id); | ||
1040 | _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector); | ||
1041 | rsvp_cur = NULL; | ||
1042 | _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_CLEAR); | ||
1043 | } | ||
1044 | |||
1045 | /* Check the proposed reservation, store it in hw's "next" field */ | 859 | /* Check the proposed reservation, store it in hw's "next" field */ |
1046 | ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state, | 860 | ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, rsvp_nxt, &reqs); |
1047 | rsvp_nxt, &reqs); | ||
1048 | 861 | ||
1049 | _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT); | 862 | _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT); |
1050 | 863 | ||
1051 | if (ret) { | 864 | if (ret) { |
1052 | DPU_ERROR("failed to reserve hw resources: %d\n", ret); | 865 | DPU_ERROR("failed to reserve hw resources: %d\n", ret); |
1053 | _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector); | 866 | _dpu_rm_release_rsvp(rm, rsvp_nxt); |
1054 | } else if (test_only && !RM_RQ_LOCK(&reqs)) { | 867 | } else if (test_only) { |
1055 | /* | 868 | /* |
1056 | * Normally, if test_only, test the reservation and then undo | 869 | * Normally, if test_only, test the reservation and then undo |
1057 | * However, if the user requests LOCK, then keep the reservation | 870 | * However, if the user requests LOCK, then keep the reservation |
@@ -1059,15 +872,11 @@ int dpu_rm_reserve( | |||
1059 | */ | 872 | */ |
1060 | DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n", | 873 | DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n", |
1061 | rsvp_nxt->seq, rsvp_nxt->enc_id); | 874 | rsvp_nxt->seq, rsvp_nxt->enc_id); |
1062 | _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector); | 875 | _dpu_rm_release_rsvp(rm, rsvp_nxt); |
1063 | } else { | 876 | } else { |
1064 | if (test_only && RM_RQ_LOCK(&reqs)) | 877 | _dpu_rm_release_rsvp(rm, rsvp_cur); |
1065 | DPU_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n", | ||
1066 | rsvp_nxt->seq, rsvp_nxt->enc_id); | ||
1067 | |||
1068 | _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector); | ||
1069 | 878 | ||
1070 | ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state); | 879 | _dpu_rm_commit_rsvp(rm, rsvp_nxt); |
1071 | } | 880 | } |
1072 | 881 | ||
1073 | _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL); | 882 | _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL); |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h index ffd1841a6067..b8273bd23801 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h | |||
@@ -21,39 +21,6 @@ | |||
21 | #include "dpu_hw_top.h" | 21 | #include "dpu_hw_top.h" |
22 | 22 | ||
23 | /** | 23 | /** |
24 | * enum dpu_rm_topology_name - HW resource use case in use by connector | ||
25 | * @DPU_RM_TOPOLOGY_NONE: No topology in use currently | ||
26 | * @DPU_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB | ||
27 | * @DPU_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB | ||
28 | * @DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB | ||
29 | */ | ||
30 | enum dpu_rm_topology_name { | ||
31 | DPU_RM_TOPOLOGY_NONE = 0, | ||
32 | DPU_RM_TOPOLOGY_SINGLEPIPE, | ||
33 | DPU_RM_TOPOLOGY_DUALPIPE, | ||
34 | DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE, | ||
35 | DPU_RM_TOPOLOGY_MAX, | ||
36 | }; | ||
37 | |||
38 | /** | ||
39 | * enum dpu_rm_topology_control - HW resource use case in use by connector | ||
40 | * @DPU_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful | ||
41 | * test, reserve the resources for this display. | ||
42 | * Normal behavior would not impact the reservation | ||
43 | * list during the AtomicTest phase. | ||
44 | * @DPU_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing, | ||
45 | * release any reservation held by this display. | ||
46 | * Normal behavior would not impact the | ||
47 | * reservation list during the AtomicTest phase. | ||
48 | * @DPU_RM_TOPCTL_DS : Require layer mixers with DS capabilities | ||
49 | */ | ||
50 | enum dpu_rm_topology_control { | ||
51 | DPU_RM_TOPCTL_RESERVE_LOCK, | ||
52 | DPU_RM_TOPCTL_RESERVE_CLEAR, | ||
53 | DPU_RM_TOPCTL_DS, | ||
54 | }; | ||
55 | |||
56 | /** | ||
57 | * struct dpu_rm - DPU dynamic hardware resource manager | 24 | * struct dpu_rm - DPU dynamic hardware resource manager |
58 | * @dev: device handle for event logging purposes | 25 | * @dev: device handle for event logging purposes |
59 | * @rsvps: list of hardware reservations by each crtc->encoder->connector | 26 | * @rsvps: list of hardware reservations by each crtc->encoder->connector |
@@ -125,7 +92,6 @@ int dpu_rm_destroy(struct dpu_rm *rm); | |||
125 | * @rm: DPU Resource Manager handle | 92 | * @rm: DPU Resource Manager handle |
126 | * @drm_enc: DRM Encoder handle | 93 | * @drm_enc: DRM Encoder handle |
127 | * @crtc_state: Proposed Atomic DRM CRTC State handle | 94 | * @crtc_state: Proposed Atomic DRM CRTC State handle |
128 | * @conn_state: Proposed Atomic DRM Connector State handle | ||
129 | * @topology: Pointer to topology info for the display | 95 | * @topology: Pointer to topology info for the display |
130 | * @test_only: Atomic-Test phase, discard results (unless property overrides) | 96 | * @test_only: Atomic-Test phase, discard results (unless property overrides) |
131 | * @Return: 0 on Success otherwise -ERROR | 97 | * @Return: 0 on Success otherwise -ERROR |
@@ -133,7 +99,6 @@ int dpu_rm_destroy(struct dpu_rm *rm); | |||
133 | int dpu_rm_reserve(struct dpu_rm *rm, | 99 | int dpu_rm_reserve(struct dpu_rm *rm, |
134 | struct drm_encoder *drm_enc, | 100 | struct drm_encoder *drm_enc, |
135 | struct drm_crtc_state *crtc_state, | 101 | struct drm_crtc_state *crtc_state, |
136 | struct drm_connector_state *conn_state, | ||
137 | struct msm_display_topology topology, | 102 | struct msm_display_topology topology, |
138 | bool test_only); | 103 | bool test_only); |
139 | 104 | ||
@@ -187,13 +152,4 @@ bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter); | |||
187 | */ | 152 | */ |
188 | int dpu_rm_check_property_topctl(uint64_t val); | 153 | int dpu_rm_check_property_topctl(uint64_t val); |
189 | 154 | ||
190 | /** | ||
191 | * dpu_rm_get_topology_name - returns the name of the the given topology | ||
192 | * definition | ||
193 | * @topology: topology definition | ||
194 | * @Return: name of the topology | ||
195 | */ | ||
196 | enum dpu_rm_topology_name | ||
197 | dpu_rm_get_topology_name(struct msm_display_topology topology); | ||
198 | |||
199 | #endif /* __DPU_RM_H__ */ | 155 | #endif /* __DPU_RM_H__ */ |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h index ae0ca5076238..e12c4cefb742 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h | |||
@@ -468,14 +468,16 @@ TRACE_EVENT(dpu_enc_frame_done_cb, | |||
468 | 468 | ||
469 | TRACE_EVENT(dpu_enc_trigger_flush, | 469 | TRACE_EVENT(dpu_enc_trigger_flush, |
470 | TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx, | 470 | TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx, |
471 | int pending_kickoff_cnt, int ctl_idx, u32 pending_flush_ret), | 471 | int pending_kickoff_cnt, int ctl_idx, u32 extra_flush_bits, |
472 | u32 pending_flush_ret), | ||
472 | TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx, | 473 | TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx, |
473 | pending_flush_ret), | 474 | extra_flush_bits, pending_flush_ret), |
474 | TP_STRUCT__entry( | 475 | TP_STRUCT__entry( |
475 | __field( uint32_t, drm_id ) | 476 | __field( uint32_t, drm_id ) |
476 | __field( enum dpu_intf, intf_idx ) | 477 | __field( enum dpu_intf, intf_idx ) |
477 | __field( int, pending_kickoff_cnt ) | 478 | __field( int, pending_kickoff_cnt ) |
478 | __field( int, ctl_idx ) | 479 | __field( int, ctl_idx ) |
480 | __field( u32, extra_flush_bits ) | ||
479 | __field( u32, pending_flush_ret ) | 481 | __field( u32, pending_flush_ret ) |
480 | ), | 482 | ), |
481 | TP_fast_assign( | 483 | TP_fast_assign( |
@@ -483,12 +485,14 @@ TRACE_EVENT(dpu_enc_trigger_flush, | |||
483 | __entry->intf_idx = intf_idx; | 485 | __entry->intf_idx = intf_idx; |
484 | __entry->pending_kickoff_cnt = pending_kickoff_cnt; | 486 | __entry->pending_kickoff_cnt = pending_kickoff_cnt; |
485 | __entry->ctl_idx = ctl_idx; | 487 | __entry->ctl_idx = ctl_idx; |
488 | __entry->extra_flush_bits = extra_flush_bits; | ||
486 | __entry->pending_flush_ret = pending_flush_ret; | 489 | __entry->pending_flush_ret = pending_flush_ret; |
487 | ), | 490 | ), |
488 | TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d " | 491 | TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d " |
489 | "pending_flush_ret=%u", __entry->drm_id, | 492 | "extra_flush_bits=0x%x pending_flush_ret=0x%x", |
490 | __entry->intf_idx, __entry->pending_kickoff_cnt, | 493 | __entry->drm_id, __entry->intf_idx, |
491 | __entry->ctl_idx, __entry->pending_flush_ret) | 494 | __entry->pending_kickoff_cnt, __entry->ctl_idx, |
495 | __entry->extra_flush_bits, __entry->pending_flush_ret) | ||
492 | ); | 496 | ); |
493 | 497 | ||
494 | DECLARE_EVENT_CLASS(dpu_enc_ktime_template, | 498 | DECLARE_EVENT_CLASS(dpu_enc_ktime_template, |
@@ -682,37 +686,41 @@ TRACE_EVENT(dpu_crtc_setup_mixer, | |||
682 | TP_STRUCT__entry( | 686 | TP_STRUCT__entry( |
683 | __field( uint32_t, crtc_id ) | 687 | __field( uint32_t, crtc_id ) |
684 | __field( uint32_t, plane_id ) | 688 | __field( uint32_t, plane_id ) |
685 | __field( struct drm_plane_state*,state ) | 689 | __field( uint32_t, fb_id ) |
686 | __field( struct dpu_plane_state*,pstate ) | 690 | __field_struct( struct drm_rect, src_rect ) |
691 | __field_struct( struct drm_rect, dst_rect ) | ||
687 | __field( uint32_t, stage_idx ) | 692 | __field( uint32_t, stage_idx ) |
693 | __field( enum dpu_stage, stage ) | ||
688 | __field( enum dpu_sspp, sspp ) | 694 | __field( enum dpu_sspp, sspp ) |
695 | __field( uint32_t, multirect_idx ) | ||
696 | __field( uint32_t, multirect_mode ) | ||
689 | __field( uint32_t, pixel_format ) | 697 | __field( uint32_t, pixel_format ) |
690 | __field( uint64_t, modifier ) | 698 | __field( uint64_t, modifier ) |
691 | ), | 699 | ), |
692 | TP_fast_assign( | 700 | TP_fast_assign( |
693 | __entry->crtc_id = crtc_id; | 701 | __entry->crtc_id = crtc_id; |
694 | __entry->plane_id = plane_id; | 702 | __entry->plane_id = plane_id; |
695 | __entry->state = state; | 703 | __entry->fb_id = state ? state->fb->base.id : 0; |
696 | __entry->pstate = pstate; | 704 | __entry->src_rect = drm_plane_state_src(state); |
705 | __entry->dst_rect = drm_plane_state_dest(state); | ||
697 | __entry->stage_idx = stage_idx; | 706 | __entry->stage_idx = stage_idx; |
707 | __entry->stage = pstate->stage; | ||
698 | __entry->sspp = sspp; | 708 | __entry->sspp = sspp; |
709 | __entry->multirect_idx = pstate->multirect_index; | ||
710 | __entry->multirect_mode = pstate->multirect_mode; | ||
699 | __entry->pixel_format = pixel_format; | 711 | __entry->pixel_format = pixel_format; |
700 | __entry->modifier = modifier; | 712 | __entry->modifier = modifier; |
701 | ), | 713 | ), |
702 | TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:{%ux%u+%ux%u} " | 714 | TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:" DRM_RECT_FP_FMT |
703 | "dst:{%ux%u+%ux%u} stage_idx:%u stage:%d, sspp:%d " | 715 | " dst:" DRM_RECT_FMT " stage_idx:%u stage:%d, sspp:%d " |
704 | "multirect_index:%d multirect_mode:%u pix_format:%u " | 716 | "multirect_index:%d multirect_mode:%u pix_format:%u " |
705 | "modifier:%llu", | 717 | "modifier:%llu", |
706 | __entry->crtc_id, __entry->plane_id, | 718 | __entry->crtc_id, __entry->plane_id, __entry->fb_id, |
707 | __entry->state->fb ? __entry->state->fb->base.id : -1, | 719 | DRM_RECT_FP_ARG(&__entry->src_rect), |
708 | __entry->state->src_w >> 16, __entry->state->src_h >> 16, | 720 | DRM_RECT_ARG(&__entry->dst_rect), |
709 | __entry->state->src_x >> 16, __entry->state->src_y >> 16, | 721 | __entry->stage_idx, __entry->stage, __entry->sspp, |
710 | __entry->state->crtc_w, __entry->state->crtc_h, | 722 | __entry->multirect_idx, __entry->multirect_mode, |
711 | __entry->state->crtc_x, __entry->state->crtc_y, | 723 | __entry->pixel_format, __entry->modifier) |
712 | __entry->stage_idx, __entry->pstate->stage, __entry->sspp, | ||
713 | __entry->pstate->multirect_index, | ||
714 | __entry->pstate->multirect_mode, __entry->pixel_format, | ||
715 | __entry->modifier) | ||
716 | ); | 724 | ); |
717 | 725 | ||
718 | TRACE_EVENT(dpu_crtc_setup_lm_bounds, | 726 | TRACE_EVENT(dpu_crtc_setup_lm_bounds, |
@@ -721,15 +729,15 @@ TRACE_EVENT(dpu_crtc_setup_lm_bounds, | |||
721 | TP_STRUCT__entry( | 729 | TP_STRUCT__entry( |
722 | __field( uint32_t, drm_id ) | 730 | __field( uint32_t, drm_id ) |
723 | __field( int, mixer ) | 731 | __field( int, mixer ) |
724 | __field( struct drm_rect *, bounds ) | 732 | __field_struct( struct drm_rect, bounds ) |
725 | ), | 733 | ), |
726 | TP_fast_assign( | 734 | TP_fast_assign( |
727 | __entry->drm_id = drm_id; | 735 | __entry->drm_id = drm_id; |
728 | __entry->mixer = mixer; | 736 | __entry->mixer = mixer; |
729 | __entry->bounds = bounds; | 737 | __entry->bounds = *bounds; |
730 | ), | 738 | ), |
731 | TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id, | 739 | TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id, |
732 | __entry->mixer, DRM_RECT_ARG(__entry->bounds)) | 740 | __entry->mixer, DRM_RECT_ARG(&__entry->bounds)) |
733 | ); | 741 | ); |
734 | 742 | ||
735 | TRACE_EVENT(dpu_crtc_vblank_enable, | 743 | TRACE_EVENT(dpu_crtc_vblank_enable, |
@@ -740,21 +748,25 @@ TRACE_EVENT(dpu_crtc_vblank_enable, | |||
740 | __field( uint32_t, drm_id ) | 748 | __field( uint32_t, drm_id ) |
741 | __field( uint32_t, enc_id ) | 749 | __field( uint32_t, enc_id ) |
742 | __field( bool, enable ) | 750 | __field( bool, enable ) |
743 | __field( struct dpu_crtc *, crtc ) | 751 | __field( bool, enabled ) |
752 | __field( bool, suspend ) | ||
753 | __field( bool, vblank_requested ) | ||
744 | ), | 754 | ), |
745 | TP_fast_assign( | 755 | TP_fast_assign( |
746 | __entry->drm_id = drm_id; | 756 | __entry->drm_id = drm_id; |
747 | __entry->enc_id = enc_id; | 757 | __entry->enc_id = enc_id; |
748 | __entry->enable = enable; | 758 | __entry->enable = enable; |
749 | __entry->crtc = crtc; | 759 | __entry->enabled = crtc->enabled; |
760 | __entry->suspend = crtc->suspend; | ||
761 | __entry->vblank_requested = crtc->vblank_requested; | ||
750 | ), | 762 | ), |
751 | TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s " | 763 | TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s " |
752 | "vblank_req:%s}", | 764 | "vblank_req:%s}", |
753 | __entry->drm_id, __entry->enc_id, | 765 | __entry->drm_id, __entry->enc_id, |
754 | __entry->enable ? "true" : "false", | 766 | __entry->enable ? "true" : "false", |
755 | __entry->crtc->enabled ? "true" : "false", | 767 | __entry->enabled ? "true" : "false", |
756 | __entry->crtc->suspend ? "true" : "false", | 768 | __entry->suspend ? "true" : "false", |
757 | __entry->crtc->vblank_requested ? "true" : "false") | 769 | __entry->vblank_requested ? "true" : "false") |
758 | ); | 770 | ); |
759 | 771 | ||
760 | DECLARE_EVENT_CLASS(dpu_crtc_enable_template, | 772 | DECLARE_EVENT_CLASS(dpu_crtc_enable_template, |
@@ -763,18 +775,22 @@ DECLARE_EVENT_CLASS(dpu_crtc_enable_template, | |||
763 | TP_STRUCT__entry( | 775 | TP_STRUCT__entry( |
764 | __field( uint32_t, drm_id ) | 776 | __field( uint32_t, drm_id ) |
765 | __field( bool, enable ) | 777 | __field( bool, enable ) |
766 | __field( struct dpu_crtc *, crtc ) | 778 | __field( bool, enabled ) |
779 | __field( bool, suspend ) | ||
780 | __field( bool, vblank_requested ) | ||
767 | ), | 781 | ), |
768 | TP_fast_assign( | 782 | TP_fast_assign( |
769 | __entry->drm_id = drm_id; | 783 | __entry->drm_id = drm_id; |
770 | __entry->enable = enable; | 784 | __entry->enable = enable; |
771 | __entry->crtc = crtc; | 785 | __entry->enabled = crtc->enabled; |
786 | __entry->suspend = crtc->suspend; | ||
787 | __entry->vblank_requested = crtc->vblank_requested; | ||
772 | ), | 788 | ), |
773 | TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}", | 789 | TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}", |
774 | __entry->drm_id, __entry->enable ? "true" : "false", | 790 | __entry->drm_id, __entry->enable ? "true" : "false", |
775 | __entry->crtc->enabled ? "true" : "false", | 791 | __entry->enabled ? "true" : "false", |
776 | __entry->crtc->suspend ? "true" : "false", | 792 | __entry->suspend ? "true" : "false", |
777 | __entry->crtc->vblank_requested ? "true" : "false") | 793 | __entry->vblank_requested ? "true" : "false") |
778 | ); | 794 | ); |
779 | DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend, | 795 | DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend, |
780 | TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc), | 796 | TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc), |
@@ -814,24 +830,24 @@ TRACE_EVENT(dpu_plane_set_scanout, | |||
814 | TP_ARGS(index, layout, multirect_index), | 830 | TP_ARGS(index, layout, multirect_index), |
815 | TP_STRUCT__entry( | 831 | TP_STRUCT__entry( |
816 | __field( enum dpu_sspp, index ) | 832 | __field( enum dpu_sspp, index ) |
817 | __field( struct dpu_hw_fmt_layout*, layout ) | 833 | __field_struct( struct dpu_hw_fmt_layout, layout ) |
818 | __field( enum dpu_sspp_multirect_index, multirect_index) | 834 | __field( enum dpu_sspp_multirect_index, multirect_index) |
819 | ), | 835 | ), |
820 | TP_fast_assign( | 836 | TP_fast_assign( |
821 | __entry->index = index; | 837 | __entry->index = index; |
822 | __entry->layout = layout; | 838 | __entry->layout = *layout; |
823 | __entry->multirect_index = multirect_index; | 839 | __entry->multirect_index = multirect_index; |
824 | ), | 840 | ), |
825 | TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} " | 841 | TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} " |
826 | "multirect_index:%d", __entry->index, __entry->layout->width, | 842 | "multirect_index:%d", __entry->index, __entry->layout.width, |
827 | __entry->layout->height, __entry->layout->plane_addr[0], | 843 | __entry->layout.height, __entry->layout.plane_addr[0], |
828 | __entry->layout->plane_size[0], | 844 | __entry->layout.plane_size[0], |
829 | __entry->layout->plane_addr[1], | 845 | __entry->layout.plane_addr[1], |
830 | __entry->layout->plane_size[1], | 846 | __entry->layout.plane_size[1], |
831 | __entry->layout->plane_addr[2], | 847 | __entry->layout.plane_addr[2], |
832 | __entry->layout->plane_size[2], | 848 | __entry->layout.plane_size[2], |
833 | __entry->layout->plane_addr[3], | 849 | __entry->layout.plane_addr[3], |
834 | __entry->layout->plane_size[3], __entry->multirect_index) | 850 | __entry->layout.plane_size[3], __entry->multirect_index) |
835 | ); | 851 | ); |
836 | 852 | ||
837 | TRACE_EVENT(dpu_plane_disable, | 853 | TRACE_EVENT(dpu_plane_disable, |
@@ -868,10 +884,6 @@ DECLARE_EVENT_CLASS(dpu_rm_iter_template, | |||
868 | TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type, | 884 | TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type, |
869 | __entry->enc_id) | 885 | __entry->enc_id) |
870 | ); | 886 | ); |
871 | DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_cdm, | ||
872 | TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id), | ||
873 | TP_ARGS(id, type, enc_id) | ||
874 | ); | ||
875 | DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf, | 887 | DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf, |
876 | TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id), | 888 | TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id), |
877 | TP_ARGS(id, type, enc_id) | 889 | TP_ARGS(id, type, enc_id) |
@@ -979,16 +991,16 @@ TRACE_EVENT(dpu_core_perf_update_clk, | |||
979 | TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate), | 991 | TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate), |
980 | TP_ARGS(dev, stop_req, clk_rate), | 992 | TP_ARGS(dev, stop_req, clk_rate), |
981 | TP_STRUCT__entry( | 993 | TP_STRUCT__entry( |
982 | __field( struct drm_device *, dev ) | 994 | __string( dev_name, dev->unique ) |
983 | __field( bool, stop_req ) | 995 | __field( bool, stop_req ) |
984 | __field( u64, clk_rate ) | 996 | __field( u64, clk_rate ) |
985 | ), | 997 | ), |
986 | TP_fast_assign( | 998 | TP_fast_assign( |
987 | __entry->dev = dev; | 999 | __assign_str(dev_name, dev->unique); |
988 | __entry->stop_req = stop_req; | 1000 | __entry->stop_req = stop_req; |
989 | __entry->clk_rate = clk_rate; | 1001 | __entry->clk_rate = clk_rate; |
990 | ), | 1002 | ), |
991 | TP_printk("dev:%s stop_req:%s clk_rate:%llu", __entry->dev->unique, | 1003 | TP_printk("dev:%s stop_req:%s clk_rate:%llu", __get_str(dev_name), |
992 | __entry->stop_req ? "true" : "false", __entry->clk_rate) | 1004 | __entry->stop_req ? "true" : "false", __entry->clk_rate) |
993 | ); | 1005 | ); |
994 | 1006 | ||
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c index 7d306c5acd09..7f42c3e68a53 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c | |||
@@ -185,7 +185,7 @@ static void mdp5_plane_reset(struct drm_plane *plane) | |||
185 | struct mdp5_plane_state *mdp5_state; | 185 | struct mdp5_plane_state *mdp5_state; |
186 | 186 | ||
187 | if (plane->state && plane->state->fb) | 187 | if (plane->state && plane->state->fb) |
188 | drm_framebuffer_unreference(plane->state->fb); | 188 | drm_framebuffer_put(plane->state->fb); |
189 | 189 | ||
190 | kfree(to_mdp5_plane_state(plane->state)); | 190 | kfree(to_mdp5_plane_state(plane->state)); |
191 | mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL); | 191 | mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL); |
@@ -228,7 +228,7 @@ static void mdp5_plane_destroy_state(struct drm_plane *plane, | |||
228 | struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); | 228 | struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); |
229 | 229 | ||
230 | if (state->fb) | 230 | if (state->fb) |
231 | drm_framebuffer_unreference(state->fb); | 231 | drm_framebuffer_put(state->fb); |
232 | 232 | ||
233 | kfree(pstate); | 233 | kfree(pstate); |
234 | } | 234 | } |
@@ -259,7 +259,6 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane, | |||
259 | msm_framebuffer_cleanup(fb, kms->aspace); | 259 | msm_framebuffer_cleanup(fb, kms->aspace); |
260 | } | 260 | } |
261 | 261 | ||
262 | #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) | ||
263 | static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, | 262 | static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, |
264 | struct drm_plane_state *state) | 263 | struct drm_plane_state *state) |
265 | { | 264 | { |
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index ff8164cc6738..a9768f823290 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c | |||
@@ -83,6 +83,7 @@ static struct msm_dsi *dsi_init(struct platform_device *pdev) | |||
83 | return ERR_PTR(-ENOMEM); | 83 | return ERR_PTR(-ENOMEM); |
84 | DBG("dsi probed=%p", msm_dsi); | 84 | DBG("dsi probed=%p", msm_dsi); |
85 | 85 | ||
86 | msm_dsi->id = -1; | ||
86 | msm_dsi->pdev = pdev; | 87 | msm_dsi->pdev = pdev; |
87 | platform_set_drvdata(pdev, msm_dsi); | 88 | platform_set_drvdata(pdev, msm_dsi); |
88 | 89 | ||
@@ -117,8 +118,13 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) | |||
117 | 118 | ||
118 | DBG(""); | 119 | DBG(""); |
119 | msm_dsi = dsi_init(pdev); | 120 | msm_dsi = dsi_init(pdev); |
120 | if (IS_ERR(msm_dsi)) | 121 | if (IS_ERR(msm_dsi)) { |
121 | return PTR_ERR(msm_dsi); | 122 | /* Don't fail the bind if the dsi port is not connected */ |
123 | if (PTR_ERR(msm_dsi) == -ENODEV) | ||
124 | return 0; | ||
125 | else | ||
126 | return PTR_ERR(msm_dsi); | ||
127 | } | ||
122 | 128 | ||
123 | priv->dsi[msm_dsi->id] = msm_dsi; | 129 | priv->dsi[msm_dsi->id] = msm_dsi; |
124 | 130 | ||
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 96fb5f635314..9c6c523eacdc 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c | |||
@@ -1750,6 +1750,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) | |||
1750 | if (ret) { | 1750 | if (ret) { |
1751 | dev_err(dev, "%s: invalid lane configuration %d\n", | 1751 | dev_err(dev, "%s: invalid lane configuration %d\n", |
1752 | __func__, ret); | 1752 | __func__, ret); |
1753 | ret = -EINVAL; | ||
1753 | goto err; | 1754 | goto err; |
1754 | } | 1755 | } |
1755 | 1756 | ||
@@ -1757,6 +1758,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) | |||
1757 | device_node = of_graph_get_remote_node(np, 1, 0); | 1758 | device_node = of_graph_get_remote_node(np, 1, 0); |
1758 | if (!device_node) { | 1759 | if (!device_node) { |
1759 | dev_dbg(dev, "%s: no valid device\n", __func__); | 1760 | dev_dbg(dev, "%s: no valid device\n", __func__); |
1761 | ret = -ENODEV; | ||
1760 | goto err; | 1762 | goto err; |
1761 | } | 1763 | } |
1762 | 1764 | ||
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 5224010d90e4..80aa6344185e 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c | |||
@@ -839,6 +839,8 @@ void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi) | |||
839 | 839 | ||
840 | if (msm_dsi->host) | 840 | if (msm_dsi->host) |
841 | msm_dsi_host_unregister(msm_dsi->host); | 841 | msm_dsi_host_unregister(msm_dsi->host); |
842 | msm_dsim->dsi[msm_dsi->id] = NULL; | 842 | |
843 | if (msm_dsi->id >= 0) | ||
844 | msm_dsim->dsi[msm_dsi->id] = NULL; | ||
843 | } | 845 | } |
844 | 846 | ||
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index c1abad8a8612..4904d0d41409 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
@@ -337,7 +337,7 @@ static int msm_drm_uninit(struct device *dev) | |||
337 | mdss->funcs->destroy(ddev); | 337 | mdss->funcs->destroy(ddev); |
338 | 338 | ||
339 | ddev->dev_private = NULL; | 339 | ddev->dev_private = NULL; |
340 | drm_dev_unref(ddev); | 340 | drm_dev_put(ddev); |
341 | 341 | ||
342 | kfree(priv); | 342 | kfree(priv); |
343 | 343 | ||
@@ -452,7 +452,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) | |||
452 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 452 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
453 | if (!priv) { | 453 | if (!priv) { |
454 | ret = -ENOMEM; | 454 | ret = -ENOMEM; |
455 | goto err_unref_drm_dev; | 455 | goto err_put_drm_dev; |
456 | } | 456 | } |
457 | 457 | ||
458 | ddev->dev_private = priv; | 458 | ddev->dev_private = priv; |
@@ -653,8 +653,8 @@ err_destroy_mdss: | |||
653 | mdss->funcs->destroy(ddev); | 653 | mdss->funcs->destroy(ddev); |
654 | err_free_priv: | 654 | err_free_priv: |
655 | kfree(priv); | 655 | kfree(priv); |
656 | err_unref_drm_dev: | 656 | err_put_drm_dev: |
657 | drm_dev_unref(ddev); | 657 | drm_dev_put(ddev); |
658 | return ret; | 658 | return ret; |
659 | } | 659 | } |
660 | 660 | ||
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 8e510d5c758a..9d11f321f5a9 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
@@ -62,6 +62,8 @@ struct msm_gem_vma; | |||
62 | #define MAX_BRIDGES 8 | 62 | #define MAX_BRIDGES 8 |
63 | #define MAX_CONNECTORS 8 | 63 | #define MAX_CONNECTORS 8 |
64 | 64 | ||
65 | #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) | ||
66 | |||
65 | struct msm_file_private { | 67 | struct msm_file_private { |
66 | rwlock_t queuelock; | 68 | rwlock_t queuelock; |
67 | struct list_head submitqueues; | 69 | struct list_head submitqueues; |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 7bd83e0afa97..7a7923e6220d 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
@@ -144,7 +144,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
144 | goto out_unlock; | 144 | goto out_unlock; |
145 | } | 145 | } |
146 | 146 | ||
147 | drm_gem_object_reference(obj); | 147 | drm_gem_object_get(obj); |
148 | 148 | ||
149 | submit->bos[i].obj = msm_obj; | 149 | submit->bos[i].obj = msm_obj; |
150 | 150 | ||
@@ -396,7 +396,7 @@ static void submit_cleanup(struct msm_gem_submit *submit) | |||
396 | struct msm_gem_object *msm_obj = submit->bos[i].obj; | 396 | struct msm_gem_object *msm_obj = submit->bos[i].obj; |
397 | submit_unlock_unpin_bo(submit, i, false); | 397 | submit_unlock_unpin_bo(submit, i, false); |
398 | list_del_init(&msm_obj->submit_entry); | 398 | list_del_init(&msm_obj->submit_entry); |
399 | drm_gem_object_unreference(&msm_obj->base); | 399 | drm_gem_object_put(&msm_obj->base); |
400 | } | 400 | } |
401 | 401 | ||
402 | ww_acquire_fini(&submit->ticket); | 402 | ww_acquire_fini(&submit->ticket); |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 5e808cfec345..11aac8337066 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
@@ -41,7 +41,11 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, | |||
41 | if (IS_ERR(opp)) | 41 | if (IS_ERR(opp)) |
42 | return PTR_ERR(opp); | 42 | return PTR_ERR(opp); |
43 | 43 | ||
44 | clk_set_rate(gpu->core_clk, *freq); | 44 | if (gpu->funcs->gpu_set_freq) |
45 | gpu->funcs->gpu_set_freq(gpu, (u64)*freq); | ||
46 | else | ||
47 | clk_set_rate(gpu->core_clk, *freq); | ||
48 | |||
45 | dev_pm_opp_put(opp); | 49 | dev_pm_opp_put(opp); |
46 | 50 | ||
47 | return 0; | 51 | return 0; |
@@ -51,16 +55,14 @@ static int msm_devfreq_get_dev_status(struct device *dev, | |||
51 | struct devfreq_dev_status *status) | 55 | struct devfreq_dev_status *status) |
52 | { | 56 | { |
53 | struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev)); | 57 | struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev)); |
54 | u64 cycles; | ||
55 | u32 freq = ((u32) status->current_frequency) / 1000000; | ||
56 | ktime_t time; | 58 | ktime_t time; |
57 | 59 | ||
58 | status->current_frequency = (unsigned long) clk_get_rate(gpu->core_clk); | 60 | if (gpu->funcs->gpu_get_freq) |
59 | gpu->funcs->gpu_busy(gpu, &cycles); | 61 | status->current_frequency = gpu->funcs->gpu_get_freq(gpu); |
60 | 62 | else | |
61 | status->busy_time = ((u32) (cycles - gpu->devfreq.busy_cycles)) / freq; | 63 | status->current_frequency = clk_get_rate(gpu->core_clk); |
62 | 64 | ||
63 | gpu->devfreq.busy_cycles = cycles; | 65 | status->busy_time = gpu->funcs->gpu_busy(gpu); |
64 | 66 | ||
65 | time = ktime_get(); | 67 | time = ktime_get(); |
66 | status->total_time = ktime_us_delta(time, gpu->devfreq.time); | 68 | status->total_time = ktime_us_delta(time, gpu->devfreq.time); |
@@ -73,7 +75,10 @@ static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) | |||
73 | { | 75 | { |
74 | struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev)); | 76 | struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev)); |
75 | 77 | ||
76 | *freq = (unsigned long) clk_get_rate(gpu->core_clk); | 78 | if (gpu->funcs->gpu_get_freq) |
79 | *freq = gpu->funcs->gpu_get_freq(gpu); | ||
80 | else | ||
81 | *freq = clk_get_rate(gpu->core_clk); | ||
77 | 82 | ||
78 | return 0; | 83 | return 0; |
79 | } | 84 | } |
@@ -88,7 +93,7 @@ static struct devfreq_dev_profile msm_devfreq_profile = { | |||
88 | static void msm_devfreq_init(struct msm_gpu *gpu) | 93 | static void msm_devfreq_init(struct msm_gpu *gpu) |
89 | { | 94 | { |
90 | /* We need target support to do devfreq */ | 95 | /* We need target support to do devfreq */ |
91 | if (!gpu->funcs->gpu_busy || !gpu->core_clk) | 96 | if (!gpu->funcs->gpu_busy) |
92 | return; | 97 | return; |
93 | 98 | ||
94 | msm_devfreq_profile.initial_freq = gpu->fast_rate; | 99 | msm_devfreq_profile.initial_freq = gpu->fast_rate; |
@@ -105,6 +110,8 @@ static void msm_devfreq_init(struct msm_gpu *gpu) | |||
105 | dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); | 110 | dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); |
106 | gpu->devfreq.devfreq = NULL; | 111 | gpu->devfreq.devfreq = NULL; |
107 | } | 112 | } |
113 | |||
114 | devfreq_suspend_device(gpu->devfreq.devfreq); | ||
108 | } | 115 | } |
109 | 116 | ||
110 | static int enable_pwrrail(struct msm_gpu *gpu) | 117 | static int enable_pwrrail(struct msm_gpu *gpu) |
@@ -184,6 +191,14 @@ static int disable_axi(struct msm_gpu *gpu) | |||
184 | return 0; | 191 | return 0; |
185 | } | 192 | } |
186 | 193 | ||
194 | void msm_gpu_resume_devfreq(struct msm_gpu *gpu) | ||
195 | { | ||
196 | gpu->devfreq.busy_cycles = 0; | ||
197 | gpu->devfreq.time = ktime_get(); | ||
198 | |||
199 | devfreq_resume_device(gpu->devfreq.devfreq); | ||
200 | } | ||
201 | |||
187 | int msm_gpu_pm_resume(struct msm_gpu *gpu) | 202 | int msm_gpu_pm_resume(struct msm_gpu *gpu) |
188 | { | 203 | { |
189 | int ret; | 204 | int ret; |
@@ -202,12 +217,7 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu) | |||
202 | if (ret) | 217 | if (ret) |
203 | return ret; | 218 | return ret; |
204 | 219 | ||
205 | if (gpu->devfreq.devfreq) { | 220 | msm_gpu_resume_devfreq(gpu); |
206 | gpu->devfreq.busy_cycles = 0; | ||
207 | gpu->devfreq.time = ktime_get(); | ||
208 | |||
209 | devfreq_resume_device(gpu->devfreq.devfreq); | ||
210 | } | ||
211 | 221 | ||
212 | gpu->needs_hw_init = true; | 222 | gpu->needs_hw_init = true; |
213 | 223 | ||
@@ -220,8 +230,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu) | |||
220 | 230 | ||
221 | DBG("%s", gpu->name); | 231 | DBG("%s", gpu->name); |
222 | 232 | ||
223 | if (gpu->devfreq.devfreq) | 233 | devfreq_suspend_device(gpu->devfreq.devfreq); |
224 | devfreq_suspend_device(gpu->devfreq.devfreq); | ||
225 | 234 | ||
226 | ret = disable_axi(gpu); | 235 | ret = disable_axi(gpu); |
227 | if (ret) | 236 | if (ret) |
@@ -367,8 +376,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, | |||
367 | msm_gpu_devcoredump_read, msm_gpu_devcoredump_free); | 376 | msm_gpu_devcoredump_read, msm_gpu_devcoredump_free); |
368 | } | 377 | } |
369 | #else | 378 | #else |
370 | static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, char *comm, | 379 | static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, |
371 | char *cmd) | 380 | struct msm_gem_submit *submit, char *comm, char *cmd) |
372 | { | 381 | { |
373 | } | 382 | } |
374 | #endif | 383 | #endif |
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 9122ee6e55e4..f82bac086666 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h | |||
@@ -70,9 +70,11 @@ struct msm_gpu_funcs { | |||
70 | /* for generation specific debugfs: */ | 70 | /* for generation specific debugfs: */ |
71 | int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor); | 71 | int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor); |
72 | #endif | 72 | #endif |
73 | int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value); | 73 | unsigned long (*gpu_busy)(struct msm_gpu *gpu); |
74 | struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu); | 74 | struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu); |
75 | int (*gpu_state_put)(struct msm_gpu_state *state); | 75 | int (*gpu_state_put)(struct msm_gpu_state *state); |
76 | unsigned long (*gpu_get_freq)(struct msm_gpu *gpu); | ||
77 | void (*gpu_set_freq)(struct msm_gpu *gpu, unsigned long freq); | ||
76 | }; | 78 | }; |
77 | 79 | ||
78 | struct msm_gpu { | 80 | struct msm_gpu { |
@@ -264,6 +266,7 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val) | |||
264 | 266 | ||
265 | int msm_gpu_pm_suspend(struct msm_gpu *gpu); | 267 | int msm_gpu_pm_suspend(struct msm_gpu *gpu); |
266 | int msm_gpu_pm_resume(struct msm_gpu *gpu); | 268 | int msm_gpu_pm_resume(struct msm_gpu *gpu); |
269 | void msm_gpu_resume_devfreq(struct msm_gpu *gpu); | ||
267 | 270 | ||
268 | int msm_gpu_hw_init(struct msm_gpu *gpu); | 271 | int msm_gpu_hw_init(struct msm_gpu *gpu); |
269 | 272 | ||
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 3aa8a8576abe..cca933458439 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c | |||
@@ -366,7 +366,7 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, | |||
366 | va_list args; | 366 | va_list args; |
367 | 367 | ||
368 | va_start(args, fmt); | 368 | va_start(args, fmt); |
369 | n = vsnprintf(msg, sizeof(msg), fmt, args); | 369 | n = vscnprintf(msg, sizeof(msg), fmt, args); |
370 | va_end(args); | 370 | va_end(args); |
371 | 371 | ||
372 | rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); | 372 | rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); |
@@ -375,11 +375,11 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, | |||
375 | rcu_read_lock(); | 375 | rcu_read_lock(); |
376 | task = pid_task(submit->pid, PIDTYPE_PID); | 376 | task = pid_task(submit->pid, PIDTYPE_PID); |
377 | if (task) { | 377 | if (task) { |
378 | n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u", | 378 | n = scnprintf(msg, sizeof(msg), "%.*s/%d: fence=%u", |
379 | TASK_COMM_LEN, task->comm, | 379 | TASK_COMM_LEN, task->comm, |
380 | pid_nr(submit->pid), submit->seqno); | 380 | pid_nr(submit->pid), submit->seqno); |
381 | } else { | 381 | } else { |
382 | n = snprintf(msg, sizeof(msg), "???/%d: fence=%u", | 382 | n = scnprintf(msg, sizeof(msg), "???/%d: fence=%u", |
383 | pid_nr(submit->pid), submit->seqno); | 383 | pid_nr(submit->pid), submit->seqno); |
384 | } | 384 | } |
385 | rcu_read_unlock(); | 385 | rcu_read_unlock(); |