aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-15 18:52:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-15 18:52:01 -0500
commit988adfdffdd43cfd841df734664727993076d7cb (patch)
tree6794f7bba8f595500c2b7d33376ad6614adcfaf2 /drivers/gpu/drm/msm
parent26178ec11ef3c6c814bf16a0a2b9c2f7242e3c64 (diff)
parent4e0cd68115620bc3236ff4e58e4c073948629b41 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "Highlights: - AMD KFD driver merge This is the AMD HSA interface for exposing a lowlevel interface for GPGPU use. They have an open source userspace built on top of this interface, and the code looks as good as it was going to get out of tree. - Initial atomic modesetting work The need for an atomic modesetting interface to allow userspace to try and send a complete set of modesetting state to the driver has arisen, and been suffering from neglect this past year. No more, the start of the common code and changes for msm driver to use it are in this tree. Ongoing work to get the userspace ioctl finished and the code clean will probably wait until next kernel. - DisplayID 1.3 and tiled monitor exposed to userspace. Tiled monitor property is now exposed for userspace to make use of. - Rockchip drm driver merged. - imx gpu driver moved out of staging Other stuff: - core: panel - MIPI DSI + new panels. expose suggested x/y properties for virtual GPUs - i915: Initial Skylake (SKL) support gen3/4 reset work start of dri1/ums removal infoframe tracking fixes for lots of things. - nouveau: tegra k1 voltage support GM204 modesetting support GT21x memory reclocking work - radeon: CI dpm fixes GPUVM improvements Initial DPM fan control - rcar-du: HDMI support added removed some support for old boards slave encoder driver for Analog Devices adv7511 - exynos: Exynos4415 SoC support - msm: a4xx gpu support atomic helper conversion - tegra: iommu support universal plane support ganged-mode DSI support - sti: HDMI i2c improvements - vmwgfx: some late fixes. - qxl: use suggested x/y properties" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (969 commits) drm: sti: fix module compilation issue drm/i915: save/restore GMBUS freq across suspend/resume on gen4 drm: sti: correctly cleanup CRTC and planes drm: sti: add HQVDP plane drm: sti: add cursor plane drm: sti: enable auxiliary CRTC drm: sti: fix delay in VTG programming drm: sti: prepare sti_tvout to support auxiliary crtc drm: sti: use drm_crtc_vblank_{on/off} instead of drm_vblank_{on/off} drm: sti: fix hdmi avi infoframe drm: sti: remove event lock while disabling vblank drm: sti: simplify gdp code drm: sti: clear all mixer control drm: sti: remove gpio for HDMI hot plug detection drm: sti: allow to change hdmi ddc i2c adapter drm/doc: Document drm_add_modes_noedid() usage drm/i915: Remove '& 0xffff' from the mask given to WA_REG() drm/i915: Invert the mask and val arguments in wa_add() and WA_REG() drm: Zero out DRM object memory upon cleanup drm/i915/bdw: Fix the write setting up the WIZ hashing mode ...
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile4
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h26
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h247
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c91
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h2144
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c604
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h34
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h17
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c31
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h126
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h75
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c144
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h17
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c3
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c7
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c348
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c17
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h17
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c121
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c207
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h91
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c466
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c322
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h122
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c24
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c93
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c273
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h131
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c328
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c241
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h23
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c163
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c25
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h35
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c45
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c40
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h13
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c13
50 files changed, 5578 insertions, 1230 deletions
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 9d907c526c94..5b2a1ff95d3d 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,6 +3,7 @@ config DRM_MSM
3 tristate "MSM DRM" 3 tristate "MSM DRM"
4 depends on DRM 4 depends on DRM
5 depends on ARCH_QCOM || (ARM && COMPILE_TEST) 5 depends on ARCH_QCOM || (ARM && COMPILE_TEST)
6 select REGULATOR
6 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
7 select DRM_PANEL 8 select DRM_PANEL
8 select SHMEM 9 select SHMEM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 6283dcb96af5..143d988f8add 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -7,6 +7,7 @@ msm-y := \
7 adreno/adreno_device.o \ 7 adreno/adreno_device.o \
8 adreno/adreno_gpu.o \ 8 adreno/adreno_gpu.o \
9 adreno/a3xx_gpu.o \ 9 adreno/a3xx_gpu.o \
10 adreno/a4xx_gpu.o \
10 hdmi/hdmi.o \ 11 hdmi/hdmi.o \
11 hdmi/hdmi_audio.o \ 12 hdmi/hdmi_audio.o \
12 hdmi/hdmi_bridge.o \ 13 hdmi/hdmi_bridge.o \
@@ -24,12 +25,15 @@ msm-y := \
24 mdp/mdp4/mdp4_irq.o \ 25 mdp/mdp4/mdp4_irq.o \
25 mdp/mdp4/mdp4_kms.o \ 26 mdp/mdp4/mdp4_kms.o \
26 mdp/mdp4/mdp4_plane.o \ 27 mdp/mdp4/mdp4_plane.o \
28 mdp/mdp5/mdp5_cfg.o \
29 mdp/mdp5/mdp5_ctl.o \
27 mdp/mdp5/mdp5_crtc.o \ 30 mdp/mdp5/mdp5_crtc.o \
28 mdp/mdp5/mdp5_encoder.o \ 31 mdp/mdp5/mdp5_encoder.o \
29 mdp/mdp5/mdp5_irq.o \ 32 mdp/mdp5/mdp5_irq.o \
30 mdp/mdp5/mdp5_kms.o \ 33 mdp/mdp5/mdp5_kms.o \
31 mdp/mdp5/mdp5_plane.o \ 34 mdp/mdp5/mdp5_plane.o \
32 mdp/mdp5/mdp5_smp.o \ 35 mdp/mdp5/mdp5_smp.o \
36 msm_atomic.o \
33 msm_drv.o \ 37 msm_drv.o \
34 msm_fb.o \ 38 msm_fb.o \
35 msm_gem.o \ 39 msm_gem.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index a3104598c27f..22882cc0a573 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
@@ -926,11 +926,11 @@ static inline uint32_t A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size
926#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000 926#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
927#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000 927#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
928#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000 928#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
929#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000 929#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000
930#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16 930#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24
931static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val) 931static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
932{ 932{
933 return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK; 933 return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
934} 934}
935 935
936#define REG_A2XX_VGT_IMMED_DATA 0x000021fd 936#define REG_A2XX_VGT_IMMED_DATA 0x000021fd
@@ -1243,13 +1243,13 @@ static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val)
1243#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0 1243#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0
1244static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val) 1244static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val)
1245{ 1245{
1246 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK; 1246 return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
1247} 1247}
1248#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000 1248#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000
1249#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16 1249#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16
1250static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val) 1250static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
1251{ 1251{
1252 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK; 1252 return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
1253} 1253}
1254 1254
1255#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281 1255#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281
@@ -1257,13 +1257,13 @@ static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
1257#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0 1257#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0
1258static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val) 1258static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val)
1259{ 1259{
1260 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK; 1260 return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
1261} 1261}
1262#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000 1262#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000
1263#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16 1263#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16
1264static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val) 1264static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
1265{ 1265{
1266 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK; 1266 return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
1267} 1267}
1268 1268
1269#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282 1269#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282
@@ -1271,7 +1271,7 @@ static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
1271#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0 1271#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0
1272static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val) 1272static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val)
1273{ 1273{
1274 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK; 1274 return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
1275} 1275}
1276 1276
1277#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283 1277#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 82d015279b47..109e9a263daf 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
@@ -86,6 +86,14 @@ enum a3xx_vtx_fmt {
86 VFMT_NORM_USHORT_16_16 = 29, 86 VFMT_NORM_USHORT_16_16 = 29,
87 VFMT_NORM_USHORT_16_16_16 = 30, 87 VFMT_NORM_USHORT_16_16_16 = 30,
88 VFMT_NORM_USHORT_16_16_16_16 = 31, 88 VFMT_NORM_USHORT_16_16_16_16 = 31,
89 VFMT_UINT_32 = 32,
90 VFMT_UINT_32_32 = 33,
91 VFMT_UINT_32_32_32 = 34,
92 VFMT_UINT_32_32_32_32 = 35,
93 VFMT_INT_32 = 36,
94 VFMT_INT_32_32 = 37,
95 VFMT_INT_32_32_32 = 38,
96 VFMT_INT_32_32_32_32 = 39,
89 VFMT_UBYTE_8 = 40, 97 VFMT_UBYTE_8 = 40,
90 VFMT_UBYTE_8_8 = 41, 98 VFMT_UBYTE_8_8 = 41,
91 VFMT_UBYTE_8_8_8 = 42, 99 VFMT_UBYTE_8_8_8 = 42,
@@ -112,7 +120,9 @@ enum a3xx_tex_fmt {
112 TFMT_NORM_USHORT_565 = 4, 120 TFMT_NORM_USHORT_565 = 4,
113 TFMT_NORM_USHORT_5551 = 6, 121 TFMT_NORM_USHORT_5551 = 6,
114 TFMT_NORM_USHORT_4444 = 7, 122 TFMT_NORM_USHORT_4444 = 7,
123 TFMT_NORM_USHORT_Z16 = 9,
115 TFMT_NORM_UINT_X8Z24 = 10, 124 TFMT_NORM_UINT_X8Z24 = 10,
125 TFMT_FLOAT_Z32 = 11,
116 TFMT_NORM_UINT_NV12_UV_TILED = 17, 126 TFMT_NORM_UINT_NV12_UV_TILED = 17,
117 TFMT_NORM_UINT_NV12_Y_TILED = 19, 127 TFMT_NORM_UINT_NV12_Y_TILED = 19,
118 TFMT_NORM_UINT_NV12_UV = 21, 128 TFMT_NORM_UINT_NV12_UV = 21,
@@ -121,18 +131,38 @@ enum a3xx_tex_fmt {
121 TFMT_NORM_UINT_I420_U = 26, 131 TFMT_NORM_UINT_I420_U = 26,
122 TFMT_NORM_UINT_I420_V = 27, 132 TFMT_NORM_UINT_I420_V = 27,
123 TFMT_NORM_UINT_2_10_10_10 = 41, 133 TFMT_NORM_UINT_2_10_10_10 = 41,
134 TFMT_FLOAT_9_9_9_E5 = 42,
135 TFMT_FLOAT_10_11_11 = 43,
124 TFMT_NORM_UINT_A8 = 44, 136 TFMT_NORM_UINT_A8 = 44,
125 TFMT_NORM_UINT_L8_A8 = 47, 137 TFMT_NORM_UINT_L8_A8 = 47,
126 TFMT_NORM_UINT_8 = 48, 138 TFMT_NORM_UINT_8 = 48,
127 TFMT_NORM_UINT_8_8 = 49, 139 TFMT_NORM_UINT_8_8 = 49,
128 TFMT_NORM_UINT_8_8_8 = 50, 140 TFMT_NORM_UINT_8_8_8 = 50,
129 TFMT_NORM_UINT_8_8_8_8 = 51, 141 TFMT_NORM_UINT_8_8_8_8 = 51,
142 TFMT_NORM_SINT_8_8 = 53,
143 TFMT_NORM_SINT_8_8_8_8 = 55,
144 TFMT_UINT_8_8 = 57,
145 TFMT_UINT_8_8_8_8 = 59,
146 TFMT_SINT_8_8 = 61,
147 TFMT_SINT_8_8_8_8 = 63,
130 TFMT_FLOAT_16 = 64, 148 TFMT_FLOAT_16 = 64,
131 TFMT_FLOAT_16_16 = 65, 149 TFMT_FLOAT_16_16 = 65,
132 TFMT_FLOAT_16_16_16_16 = 67, 150 TFMT_FLOAT_16_16_16_16 = 67,
151 TFMT_UINT_16 = 68,
152 TFMT_UINT_16_16 = 69,
153 TFMT_UINT_16_16_16_16 = 71,
154 TFMT_SINT_16 = 72,
155 TFMT_SINT_16_16 = 73,
156 TFMT_SINT_16_16_16_16 = 75,
133 TFMT_FLOAT_32 = 84, 157 TFMT_FLOAT_32 = 84,
134 TFMT_FLOAT_32_32 = 85, 158 TFMT_FLOAT_32_32 = 85,
135 TFMT_FLOAT_32_32_32_32 = 87, 159 TFMT_FLOAT_32_32_32_32 = 87,
160 TFMT_UINT_32 = 88,
161 TFMT_UINT_32_32 = 89,
162 TFMT_UINT_32_32_32_32 = 91,
163 TFMT_SINT_32 = 92,
164 TFMT_SINT_32_32 = 93,
165 TFMT_SINT_32_32_32_32 = 95,
136}; 166};
137 167
138enum a3xx_tex_fetchsize { 168enum a3xx_tex_fetchsize {
@@ -145,19 +175,34 @@ enum a3xx_tex_fetchsize {
145}; 175};
146 176
147enum a3xx_color_fmt { 177enum a3xx_color_fmt {
178 RB_R5G6B5_UNORM = 0,
179 RB_R5G5B5A1_UNORM = 1,
180 RB_R4G4B4A4_UNORM = 3,
148 RB_R8G8B8_UNORM = 4, 181 RB_R8G8B8_UNORM = 4,
149 RB_R8G8B8A8_UNORM = 8, 182 RB_R8G8B8A8_UNORM = 8,
150 RB_Z16_UNORM = 12, 183 RB_R8G8B8A8_UINT = 10,
184 RB_R8G8B8A8_SINT = 11,
185 RB_R8G8_UNORM = 12,
186 RB_R8_UINT = 14,
187 RB_R8_SINT = 15,
188 RB_R10G10B10A2_UNORM = 16,
151 RB_A8_UNORM = 20, 189 RB_A8_UNORM = 20,
190 RB_R8_UNORM = 21,
152 RB_R16G16B16A16_FLOAT = 27, 191 RB_R16G16B16A16_FLOAT = 27,
192 RB_R11G11B10_FLOAT = 28,
193 RB_R16_SINT = 40,
194 RB_R16G16_SINT = 41,
195 RB_R16G16B16A16_SINT = 43,
196 RB_R16_UINT = 44,
197 RB_R16G16_UINT = 45,
198 RB_R16G16B16A16_UINT = 47,
153 RB_R32G32B32A32_FLOAT = 51, 199 RB_R32G32B32A32_FLOAT = 51,
154}; 200 RB_R32_SINT = 52,
155 201 RB_R32G32_SINT = 53,
156enum a3xx_color_swap { 202 RB_R32G32B32A32_SINT = 55,
157 WZYX = 0, 203 RB_R32_UINT = 56,
158 WXYZ = 1, 204 RB_R32G32_UINT = 57,
159 ZYXW = 2, 205 RB_R32G32B32A32_UINT = 59,
160 XYZW = 3,
161}; 206};
162 207
163enum a3xx_sp_perfcounter_select { 208enum a3xx_sp_perfcounter_select {
@@ -194,6 +239,11 @@ enum a3xx_rb_blend_opcode {
194 BLEND_MAX_DST_SRC = 4, 239 BLEND_MAX_DST_SRC = 4,
195}; 240};
196 241
242enum a3xx_intp_mode {
243 SMOOTH = 0,
244 FLAT = 1,
245};
246
197enum a3xx_tex_filter { 247enum a3xx_tex_filter {
198 A3XX_TEX_NEAREST = 0, 248 A3XX_TEX_NEAREST = 0,
199 A3XX_TEX_LINEAR = 1, 249 A3XX_TEX_LINEAR = 1,
@@ -536,6 +586,10 @@ enum a3xx_tex_type {
536 586
537#define REG_A3XX_CP_MEQ_DATA 0x000001db 587#define REG_A3XX_CP_MEQ_DATA 0x000001db
538 588
589#define REG_A3XX_CP_WFI_PEND_CTR 0x000001f5
590
591#define REG_A3XX_RBBM_PM_OVERRIDE2 0x0000039d
592
539#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445 593#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445
540 594
541#define REG_A3XX_CP_HW_FAULT 0x0000045c 595#define REG_A3XX_CP_HW_FAULT 0x0000045c
@@ -550,6 +604,12 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
550 604
551#define REG_A3XX_CP_AHB_FAULT 0x0000054d 605#define REG_A3XX_CP_AHB_FAULT 0x0000054d
552 606
607#define REG_A3XX_SQ_GPR_MANAGEMENT 0x00000d00
608
609#define REG_A3XX_SQ_INST_STORE_MANAGMENT 0x00000d02
610
611#define REG_A3XX_TP0_CHICKEN 0x00000e1e
612
553#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22 613#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22
554 614
555#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23 615#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23
@@ -632,13 +692,13 @@ static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
632#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0 692#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
633static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val) 693static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val)
634{ 694{
635 return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK; 695 return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
636} 696}
637#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000 697#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
638#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16 698#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
639static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val) 699static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
640{ 700{
641 return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK; 701 return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
642} 702}
643 703
644#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069 704#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
@@ -646,7 +706,7 @@ static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
646#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0 706#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0
647static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val) 707static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
648{ 708{
649 return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK; 709 return ((((int32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
650} 710}
651 711
652#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c 712#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
@@ -654,7 +714,7 @@ static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
654#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0 714#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
655static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val) 715static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
656{ 716{
657 return ((((uint32_t)(val * 28.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK; 717 return ((((int32_t)(val * 16384.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
658} 718}
659 719
660#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d 720#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
@@ -662,7 +722,7 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
662#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0 722#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
663static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) 723static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
664{ 724{
665 return ((((uint32_t)(val * 28.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; 725 return ((((int32_t)(val * 16384.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
666} 726}
667 727
668#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070 728#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
@@ -673,7 +733,7 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
673#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3 733#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
674static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val) 734static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
675{ 735{
676 return ((((uint32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK; 736 return ((((int32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
677} 737}
678#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800 738#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
679 739
@@ -863,6 +923,7 @@ static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
863{ 923{
864 return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; 924 return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
865} 925}
926#define A3XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00004000
866#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000 927#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000
867#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17 928#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
868static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val) 929static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
@@ -1001,6 +1062,7 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
1001{ 1062{
1002 return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK; 1063 return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
1003} 1064}
1065#define A3XX_RB_COPY_CONTROL_UNK12 0x00001000
1004#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000 1066#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
1005#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14 1067#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
1006static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val) 1068static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
@@ -1079,7 +1141,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
1079#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101 1141#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
1080 1142
1081#define REG_A3XX_RB_DEPTH_INFO 0x00002102 1143#define REG_A3XX_RB_DEPTH_INFO 0x00002102
1082#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001 1144#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003
1083#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0 1145#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
1084static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val) 1146static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
1085{ 1147{
@@ -1265,6 +1327,7 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_
1265{ 1327{
1266 return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK; 1328 return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
1267} 1329}
1330#define A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000
1268#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000 1331#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
1269#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000 1332#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
1270 1333
@@ -1281,7 +1344,12 @@ static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize
1281#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200 1344#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
1282#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400 1345#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
1283#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000 1346#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
1284#define A3XX_HLSQ_CONTROL_0_REG_CONSTSWITCHMODE 0x08000000 1347#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
1348#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
1349static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
1350{
1351 return ((val) << A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
1352}
1285#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000 1353#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
1286#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000 1354#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
1287#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000 1355#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
@@ -1484,6 +1552,8 @@ static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
1484 1552
1485#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245 1553#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
1486 1554
1555#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
1556
1487static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; } 1557static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
1488 1558
1489static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; } 1559static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
@@ -1537,6 +1607,7 @@ static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
1537{ 1607{
1538 return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK; 1608 return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
1539} 1609}
1610#define A3XX_VFD_DECODE_INSTR_INT 0x00100000
1540#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000 1611#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
1541#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22 1612#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
1542static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val) 1613static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
@@ -1604,6 +1675,102 @@ static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
1604static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; } 1675static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
1605 1676
1606static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; } 1677static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
1678#define A3XX_VPC_VARYING_INTERP_MODE_C0__MASK 0x00000003
1679#define A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT 0
1680static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C0(enum a3xx_intp_mode val)
1681{
1682 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C0__MASK;
1683}
1684#define A3XX_VPC_VARYING_INTERP_MODE_C1__MASK 0x0000000c
1685#define A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT 2
1686static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C1(enum a3xx_intp_mode val)
1687{
1688 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C1__MASK;
1689}
1690#define A3XX_VPC_VARYING_INTERP_MODE_C2__MASK 0x00000030
1691#define A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT 4
1692static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C2(enum a3xx_intp_mode val)
1693{
1694 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C2__MASK;
1695}
1696#define A3XX_VPC_VARYING_INTERP_MODE_C3__MASK 0x000000c0
1697#define A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT 6
1698static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C3(enum a3xx_intp_mode val)
1699{
1700 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C3__MASK;
1701}
1702#define A3XX_VPC_VARYING_INTERP_MODE_C4__MASK 0x00000300
1703#define A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT 8
1704static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C4(enum a3xx_intp_mode val)
1705{
1706 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C4__MASK;
1707}
1708#define A3XX_VPC_VARYING_INTERP_MODE_C5__MASK 0x00000c00
1709#define A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT 10
1710static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C5(enum a3xx_intp_mode val)
1711{
1712 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C5__MASK;
1713}
1714#define A3XX_VPC_VARYING_INTERP_MODE_C6__MASK 0x00003000
1715#define A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT 12
1716static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C6(enum a3xx_intp_mode val)
1717{
1718 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C6__MASK;
1719}
1720#define A3XX_VPC_VARYING_INTERP_MODE_C7__MASK 0x0000c000
1721#define A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT 14
1722static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C7(enum a3xx_intp_mode val)
1723{
1724 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C7__MASK;
1725}
1726#define A3XX_VPC_VARYING_INTERP_MODE_C8__MASK 0x00030000
1727#define A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT 16
1728static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C8(enum a3xx_intp_mode val)
1729{
1730 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C8__MASK;
1731}
1732#define A3XX_VPC_VARYING_INTERP_MODE_C9__MASK 0x000c0000
1733#define A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT 18
1734static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C9(enum a3xx_intp_mode val)
1735{
1736 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C9__MASK;
1737}
1738#define A3XX_VPC_VARYING_INTERP_MODE_CA__MASK 0x00300000
1739#define A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT 20
1740static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CA(enum a3xx_intp_mode val)
1741{
1742 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CA__MASK;
1743}
1744#define A3XX_VPC_VARYING_INTERP_MODE_CB__MASK 0x00c00000
1745#define A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT 22
1746static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CB(enum a3xx_intp_mode val)
1747{
1748 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CB__MASK;
1749}
1750#define A3XX_VPC_VARYING_INTERP_MODE_CC__MASK 0x03000000
1751#define A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT 24
1752static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CC(enum a3xx_intp_mode val)
1753{
1754 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CC__MASK;
1755}
1756#define A3XX_VPC_VARYING_INTERP_MODE_CD__MASK 0x0c000000
1757#define A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT 26
1758static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CD(enum a3xx_intp_mode val)
1759{
1760 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CD__MASK;
1761}
1762#define A3XX_VPC_VARYING_INTERP_MODE_CE__MASK 0x30000000
1763#define A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT 28
1764static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CE(enum a3xx_intp_mode val)
1765{
1766 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CE__MASK;
1767}
1768#define A3XX_VPC_VARYING_INTERP_MODE_CF__MASK 0xc0000000
1769#define A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT 30
1770static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CF(enum a3xx_intp_mode val)
1771{
1772 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CF__MASK;
1773}
1607 1774
1608static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; } 1775static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
1609 1776
@@ -1928,6 +2095,8 @@ static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
1928 return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK; 2095 return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK;
1929} 2096}
1930#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100 2097#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
2098#define A3XX_SP_FS_MRT_REG_SINT 0x00000400
2099#define A3XX_SP_FS_MRT_REG_UINT 0x00000800
1931 2100
1932static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; } 2101static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
1933 2102
@@ -1947,6 +2116,8 @@ static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val)
1947 return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK; 2116 return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK;
1948} 2117}
1949 2118
2119#define REG_A3XX_PA_SC_AA_CONFIG 0x00002301
2120
1950#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340 2121#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340
1951#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff 2122#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
1952#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0 2123#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
@@ -2297,11 +2468,11 @@ static inline uint32_t A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size
2297#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000 2468#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
2298#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000 2469#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
2299#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000 2470#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
2300#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000 2471#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000
2301#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16 2472#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24
2302static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val) 2473static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
2303{ 2474{
2304 return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK; 2475 return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
2305} 2476}
2306 2477
2307#define REG_A3XX_VGT_IMMED_DATA 0x000021fd 2478#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
@@ -2347,17 +2518,23 @@ static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val
2347#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000 2518#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
2348 2519
2349#define REG_A3XX_TEX_SAMP_1 0x00000001 2520#define REG_A3XX_TEX_SAMP_1 0x00000001
2521#define A3XX_TEX_SAMP_1_LOD_BIAS__MASK 0x000007ff
2522#define A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT 0
2523static inline uint32_t A3XX_TEX_SAMP_1_LOD_BIAS(float val)
2524{
2525 return ((((int32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT) & A3XX_TEX_SAMP_1_LOD_BIAS__MASK;
2526}
2350#define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000 2527#define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000
2351#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12 2528#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12
2352static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val) 2529static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val)
2353{ 2530{
2354 return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK; 2531 return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
2355} 2532}
2356#define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000 2533#define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000
2357#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22 2534#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22
2358static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val) 2535static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
2359{ 2536{
2360 return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK; 2537 return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
2361} 2538}
2362 2539
2363#define REG_A3XX_TEX_CONST_0 0x00000000 2540#define REG_A3XX_TEX_CONST_0 0x00000000
@@ -2448,6 +2625,24 @@ static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
2448} 2625}
2449 2626
2450#define REG_A3XX_TEX_CONST_3 0x00000003 2627#define REG_A3XX_TEX_CONST_3 0x00000003
2628#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0000000f
2629#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0
2630static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
2631{
2632 return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ1__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ1__MASK;
2633}
2634#define A3XX_TEX_CONST_3_DEPTH__MASK 0x0ffe0000
2635#define A3XX_TEX_CONST_3_DEPTH__SHIFT 17
2636static inline uint32_t A3XX_TEX_CONST_3_DEPTH(uint32_t val)
2637{
2638 return ((val) << A3XX_TEX_CONST_3_DEPTH__SHIFT) & A3XX_TEX_CONST_3_DEPTH__MASK;
2639}
2640#define A3XX_TEX_CONST_3_LAYERSZ2__MASK 0xf0000000
2641#define A3XX_TEX_CONST_3_LAYERSZ2__SHIFT 28
2642static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
2643{
2644 return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ2__MASK;
2645}
2451 2646
2452 2647
2453#endif /* A3XX_XML */ 2648#endif /* A3XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 218c5b060398..b66c53bdc039 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -2,6 +2,8 @@
2 * Copyright (C) 2013 Red Hat 2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 3 * Author: Rob Clark <robdclark@gmail.com>
4 * 4 *
5 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
6 *
5 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by 8 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation. 9 * the Free Software Foundation.
@@ -406,6 +408,94 @@ static void a3xx_dump(struct msm_gpu *gpu)
406 gpu_read(gpu, REG_A3XX_RBBM_STATUS)); 408 gpu_read(gpu, REG_A3XX_RBBM_STATUS));
407 adreno_dump(gpu); 409 adreno_dump(gpu);
408} 410}
411/* Register offset defines for A3XX */
412static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
413 REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
414 REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
415 REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
416 REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
417 REG_A3XX_CP_PFP_UCODE_DATA),
418 REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
419 REG_A3XX_CP_PFP_UCODE_ADDR),
420 REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
421 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
422 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
423 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
424 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
425 REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
426 REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
427 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
428 REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
429 REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
430 REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
431 REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
432 REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
433 REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
434 REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
435 REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
436 REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
437 REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
438 REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
439 REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
440 REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
441 REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
442 REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
443 REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
444 REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
445 REG_A3XX_CP_PROTECT_STATUS),
446 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
447 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
448 REG_A3XX_RBBM_PERFCTR_CTL),
449 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
450 REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
451 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
452 REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
453 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
454 REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
455 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
456 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
457 REG_A3XX_RBBM_INT_0_STATUS),
458 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
459 REG_A3XX_RBBM_AHB_ERROR_STATUS),
460 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
461 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
462 REG_A3XX_RBBM_INT_CLEAR_CMD),
463 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
464 REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
465 REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
466 REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
467 REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
468 REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
469 REG_A3XX_VSC_SIZE_ADDRESS),
470 REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
471 REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
472 REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
473 REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
474 REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
475 REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
476 REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
477 REG_A3XX_SP_VS_OBJ_START_REG),
478 REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
479 REG_A3XX_SP_FS_OBJ_START_REG),
480 REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
481 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
482 REG_A3XX_RBBM_PM_OVERRIDE2),
483 REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
484 REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
485 REG_A3XX_SQ_GPR_MANAGEMENT),
486 REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
487 REG_A3XX_SQ_INST_STORE_MANAGMENT),
488 REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
489 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
490 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
491 REG_A3XX_RBBM_SW_RESET_CMD),
492 REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
493 REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
494 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
495 REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
496 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
497 REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
498};
409 499
410static const struct adreno_gpu_funcs funcs = { 500static const struct adreno_gpu_funcs funcs = {
411 .base = { 501 .base = {
@@ -463,6 +553,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
463 gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs); 553 gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
464 554
465 adreno_gpu->registers = a3xx_registers; 555 adreno_gpu->registers = a3xx_registers;
556 adreno_gpu->reg_offsets = a3xx_register_offsets;
466 557
467 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs); 558 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
468 if (ret) 559 if (ret)
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
new file mode 100644
index 000000000000..5a24c416d2dd
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -0,0 +1,2144 @@
1#ifndef A4XX_XML
2#define A4XX_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
18
19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark)
21
22Permission is hereby granted, free of charge, to any person obtaining
23a copy of this software and associated documentation files (the
24"Software"), to deal in the Software without restriction, including
25without limitation the rights to use, copy, modify, merge, publish,
26distribute, sublicense, and/or sell copies of the Software, and to
27permit persons to whom the Software is furnished to do so, subject to
28the following conditions:
29
30The above copyright notice and this permission notice (including the
31next paragraph) shall be included in all copies or substantial
32portions of the Software.
33
34THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
35EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
36MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
37IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
38LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
39OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
40WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
41*/
42
43
44enum a4xx_color_fmt {
45 RB4_A8_UNORM = 1,
46 RB4_R5G6R5_UNORM = 14,
47 RB4_Z16_UNORM = 15,
48 RB4_R8G8B8_UNORM = 25,
49 RB4_R8G8B8A8_UNORM = 26,
50};
51
52enum a4xx_tile_mode {
53 TILE4_LINEAR = 0,
54 TILE4_3 = 3,
55};
56
57enum a4xx_rb_blend_opcode {
58 BLEND_DST_PLUS_SRC = 0,
59 BLEND_SRC_MINUS_DST = 1,
60 BLEND_DST_MINUS_SRC = 2,
61 BLEND_MIN_DST_SRC = 3,
62 BLEND_MAX_DST_SRC = 4,
63};
64
65enum a4xx_vtx_fmt {
66 VFMT4_FLOAT_32 = 1,
67 VFMT4_FLOAT_32_32 = 2,
68 VFMT4_FLOAT_32_32_32 = 3,
69 VFMT4_FLOAT_32_32_32_32 = 4,
70 VFMT4_FLOAT_16 = 5,
71 VFMT4_FLOAT_16_16 = 6,
72 VFMT4_FLOAT_16_16_16 = 7,
73 VFMT4_FLOAT_16_16_16_16 = 8,
74 VFMT4_FIXED_32 = 9,
75 VFMT4_FIXED_32_32 = 10,
76 VFMT4_FIXED_32_32_32 = 11,
77 VFMT4_FIXED_32_32_32_32 = 12,
78 VFMT4_SHORT_16 = 16,
79 VFMT4_SHORT_16_16 = 17,
80 VFMT4_SHORT_16_16_16 = 18,
81 VFMT4_SHORT_16_16_16_16 = 19,
82 VFMT4_USHORT_16 = 20,
83 VFMT4_USHORT_16_16 = 21,
84 VFMT4_USHORT_16_16_16 = 22,
85 VFMT4_USHORT_16_16_16_16 = 23,
86 VFMT4_NORM_SHORT_16 = 24,
87 VFMT4_NORM_SHORT_16_16 = 25,
88 VFMT4_NORM_SHORT_16_16_16 = 26,
89 VFMT4_NORM_SHORT_16_16_16_16 = 27,
90 VFMT4_NORM_USHORT_16 = 28,
91 VFMT4_NORM_USHORT_16_16 = 29,
92 VFMT4_NORM_USHORT_16_16_16 = 30,
93 VFMT4_NORM_USHORT_16_16_16_16 = 31,
94 VFMT4_UBYTE_8 = 40,
95 VFMT4_UBYTE_8_8 = 41,
96 VFMT4_UBYTE_8_8_8 = 42,
97 VFMT4_UBYTE_8_8_8_8 = 43,
98 VFMT4_NORM_UBYTE_8 = 44,
99 VFMT4_NORM_UBYTE_8_8 = 45,
100 VFMT4_NORM_UBYTE_8_8_8 = 46,
101 VFMT4_NORM_UBYTE_8_8_8_8 = 47,
102 VFMT4_BYTE_8 = 48,
103 VFMT4_BYTE_8_8 = 49,
104 VFMT4_BYTE_8_8_8 = 50,
105 VFMT4_BYTE_8_8_8_8 = 51,
106 VFMT4_NORM_BYTE_8 = 52,
107 VFMT4_NORM_BYTE_8_8 = 53,
108 VFMT4_NORM_BYTE_8_8_8 = 54,
109 VFMT4_NORM_BYTE_8_8_8_8 = 55,
110 VFMT4_UINT_10_10_10_2 = 60,
111 VFMT4_NORM_UINT_10_10_10_2 = 61,
112 VFMT4_INT_10_10_10_2 = 62,
113 VFMT4_NORM_INT_10_10_10_2 = 63,
114};
115
116enum a4xx_tex_fmt {
117 TFMT4_NORM_USHORT_565 = 11,
118 TFMT4_NORM_USHORT_5551 = 10,
119 TFMT4_NORM_USHORT_4444 = 8,
120 TFMT4_NORM_UINT_X8Z24 = 71,
121 TFMT4_NORM_UINT_2_10_10_10 = 33,
122 TFMT4_NORM_UINT_A8 = 3,
123 TFMT4_NORM_UINT_L8_A8 = 13,
124 TFMT4_NORM_UINT_8 = 4,
125 TFMT4_NORM_UINT_8_8_8_8 = 28,
126 TFMT4_FLOAT_16 = 20,
127 TFMT4_FLOAT_16_16 = 40,
128 TFMT4_FLOAT_16_16_16_16 = 53,
129 TFMT4_FLOAT_32 = 43,
130 TFMT4_FLOAT_32_32 = 56,
131 TFMT4_FLOAT_32_32_32_32 = 63,
132};
133
134enum a4xx_depth_format {
135 DEPTH4_NONE = 0,
136 DEPTH4_16 = 1,
137 DEPTH4_24_8 = 2,
138};
139
140enum a4xx_tex_filter {
141 A4XX_TEX_NEAREST = 0,
142 A4XX_TEX_LINEAR = 1,
143};
144
145enum a4xx_tex_clamp {
146 A4XX_TEX_REPEAT = 0,
147 A4XX_TEX_CLAMP_TO_EDGE = 1,
148 A4XX_TEX_MIRROR_REPEAT = 2,
149 A4XX_TEX_CLAMP_NONE = 3,
150};
151
152enum a4xx_tex_swiz {
153 A4XX_TEX_X = 0,
154 A4XX_TEX_Y = 1,
155 A4XX_TEX_Z = 2,
156 A4XX_TEX_W = 3,
157 A4XX_TEX_ZERO = 4,
158 A4XX_TEX_ONE = 5,
159};
160
161enum a4xx_tex_type {
162 A4XX_TEX_1D = 0,
163 A4XX_TEX_2D = 1,
164 A4XX_TEX_CUBE = 2,
165 A4XX_TEX_3D = 3,
166};
167
168#define A4XX_CGC_HLSQ_EARLY_CYC__MASK 0x00700000
169#define A4XX_CGC_HLSQ_EARLY_CYC__SHIFT 20
170static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val)
171{
172 return ((val) << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT) & A4XX_CGC_HLSQ_EARLY_CYC__MASK;
173}
174#define A4XX_INT0_RBBM_GPU_IDLE 0x00000001
175#define A4XX_INT0_RBBM_AHB_ERROR 0x00000002
176#define A4XX_INT0_RBBM_REG_TIMEOUT 0x00000004
177#define A4XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
178#define A4XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
179#define A4XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
180#define A4XX_INT0_VFD_ERROR 0x00000040
181#define A4XX_INT0_CP_SW_INT 0x00000080
182#define A4XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
183#define A4XX_INT0_CP_OPCODE_ERROR 0x00000200
184#define A4XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
185#define A4XX_INT0_CP_HW_FAULT 0x00000800
186#define A4XX_INT0_CP_DMA 0x00001000
187#define A4XX_INT0_CP_IB2_INT 0x00002000
188#define A4XX_INT0_CP_IB1_INT 0x00004000
189#define A4XX_INT0_CP_RB_INT 0x00008000
190#define A4XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
191#define A4XX_INT0_CP_RB_DONE_TS 0x00020000
192#define A4XX_INT0_CP_VS_DONE_TS 0x00040000
193#define A4XX_INT0_CP_PS_DONE_TS 0x00080000
194#define A4XX_INT0_CACHE_FLUSH_TS 0x00100000
195#define A4XX_INT0_CP_AHB_ERROR_HALT 0x00200000
196#define A4XX_INT0_MISC_HANG_DETECT 0x01000000
197#define A4XX_INT0_UCHE_OOB_ACCESS 0x02000000
198#define REG_A4XX_RB_GMEM_BASE_ADDR 0x00000cc0
199
200#define REG_A4XX_RB_PERFCTR_RB_SEL_0 0x00000cc7
201
202#define REG_A4XX_RB_PERFCTR_RB_SEL_1 0x00000cc8
203
204#define REG_A4XX_RB_PERFCTR_RB_SEL_2 0x00000cc9
205
206#define REG_A4XX_RB_PERFCTR_RB_SEL_3 0x00000cca
207
208#define REG_A4XX_RB_PERFCTR_RB_SEL_4 0x00000ccb
209
210#define REG_A4XX_RB_PERFCTR_RB_SEL_5 0x00000ccc
211
212#define REG_A4XX_RB_PERFCTR_RB_SEL_6 0x00000ccd
213
214#define REG_A4XX_RB_PERFCTR_RB_SEL_7 0x00000cce
215
216#define REG_A4XX_RB_PERFCTR_CCU_SEL_3 0x00000cd2
217
218#define REG_A4XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
219#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff
220#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0
221static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
222{
223 return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
224}
225#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x3fff0000
226#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 16
227static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
228{
229 return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
230}
231
232#define REG_A4XX_RB_CLEAR_COLOR_DW0 0x000020cc
233
234#define REG_A4XX_RB_CLEAR_COLOR_DW1 0x000020cd
235
236#define REG_A4XX_RB_CLEAR_COLOR_DW2 0x000020ce
237
238#define REG_A4XX_RB_CLEAR_COLOR_DW3 0x000020cf
239
240#define REG_A4XX_RB_MODE_CONTROL 0x000020a0
241#define A4XX_RB_MODE_CONTROL_WIDTH__MASK 0x0000003f
242#define A4XX_RB_MODE_CONTROL_WIDTH__SHIFT 0
243static inline uint32_t A4XX_RB_MODE_CONTROL_WIDTH(uint32_t val)
244{
245 return ((val >> 5) << A4XX_RB_MODE_CONTROL_WIDTH__SHIFT) & A4XX_RB_MODE_CONTROL_WIDTH__MASK;
246}
247#define A4XX_RB_MODE_CONTROL_HEIGHT__MASK 0x00003f00
248#define A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT 8
249static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
250{
251 return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
252}
253
254#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1
255#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001
256#define A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00000020
257
258#define REG_A4XX_RB_MSAA_CONTROL 0x000020a2
259#define A4XX_RB_MSAA_CONTROL_DISABLE 0x00001000
260#define A4XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000e000
261#define A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 13
262static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val)
263{
264 return ((val) << A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL_SAMPLES__MASK;
265}
266
267#define REG_A4XX_RB_MSAA_CONTROL2 0x000020a3
268#define A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__MASK 0x00000380
269#define A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__SHIFT 7
270static inline uint32_t A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES(uint32_t val)
271{
272 return ((val) << A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__MASK;
273}
274#define A4XX_RB_MSAA_CONTROL2_VARYING 0x00001000
275
276static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
277
278static inline uint32_t REG_A4XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
279#define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
280#define A4XX_RB_MRT_CONTROL_BLEND 0x00000010
281#define A4XX_RB_MRT_CONTROL_BLEND2 0x00000020
282#define A4XX_RB_MRT_CONTROL_FASTCLEAR 0x00000400
283#define A4XX_RB_MRT_CONTROL_B11 0x00000800
284#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
285#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
286static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
287{
288 return ((val) << A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
289}
290
291static inline uint32_t REG_A4XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020a5 + 0x5*i0; }
292#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
293#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
294static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a4xx_color_fmt val)
295{
296 return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
297}
298#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00000600
299#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 9
300static inline uint32_t A4XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
301{
302 return ((val) << A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK;
303}
304#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00001800
305#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 11
306static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
307{
308 return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
309}
310#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0x007fc000
311#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14
312static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
313{
314 return ((val >> 4) << A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
315}
316
317static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; }
318
319static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; }
320#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x0001fff8
321#define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT 3
322static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val)
323{
324 return ((val) << A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT) & A4XX_RB_MRT_CONTROL3_STRIDE__MASK;
325}
326
327static inline uint32_t REG_A4XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020a8 + 0x5*i0; }
328#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
329#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
330static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
331{
332 return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
333}
334#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
335#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
336static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
337{
338 return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
339}
340#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
341#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
342static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
343{
344 return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
345}
346#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
347#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
348static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
349{
350 return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
351}
352#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
353#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
354static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
355{
356 return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
357}
358#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
359#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
360static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
361{
362 return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
363}
364
365#define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8
366#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
367#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
368#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
369static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
370{
371 return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
372}
373
374#define REG_A4XX_RB_FS_OUTPUT 0x000020f9
375#define A4XX_RB_FS_OUTPUT_ENABLE_COLOR_PIPE 0x00000001
376#define A4XX_RB_FS_OUTPUT_FAST_CLEAR 0x00000100
377#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000
378#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16
379static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
380{
381 return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK;
382}
383
384#define REG_A4XX_RB_RENDER_CONTROL3 0x000020fb
385#define A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__MASK 0x0000001f
386#define A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__SHIFT 0
387static inline uint32_t A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE(uint32_t val)
388{
389 return ((val) << A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__SHIFT) & A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__MASK;
390}
391
392#define REG_A4XX_RB_COPY_CONTROL 0x000020fc
393#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
394#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
395static inline uint32_t A4XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
396{
397 return ((val) << A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
398}
399#define A4XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
400#define A4XX_RB_COPY_CONTROL_MODE__SHIFT 4
401static inline uint32_t A4XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
402{
403 return ((val) << A4XX_RB_COPY_CONTROL_MODE__SHIFT) & A4XX_RB_COPY_CONTROL_MODE__MASK;
404}
405#define A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
406#define A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
407static inline uint32_t A4XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
408{
409 return ((val) << A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
410}
411#define A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
412#define A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
413static inline uint32_t A4XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
414{
415 return ((val >> 14) << A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
416}
417
418#define REG_A4XX_RB_COPY_DEST_BASE 0x000020fd
419#define A4XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0
420#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
421static inline uint32_t A4XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
422{
423 return ((val >> 4) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK;
424}
425
426#define REG_A4XX_RB_COPY_DEST_PITCH 0x000020fe
427#define A4XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
428#define A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
429static inline uint32_t A4XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
430{
431 return ((val >> 5) << A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A4XX_RB_COPY_DEST_PITCH_PITCH__MASK;
432}
433
434#define REG_A4XX_RB_COPY_DEST_INFO 0x000020ff
435#define A4XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
436#define A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
437static inline uint32_t A4XX_RB_COPY_DEST_INFO_FORMAT(enum a4xx_color_fmt val)
438{
439 return ((val) << A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A4XX_RB_COPY_DEST_INFO_FORMAT__MASK;
440}
441#define A4XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
442#define A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
443static inline uint32_t A4XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
444{
445 return ((val) << A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A4XX_RB_COPY_DEST_INFO_SWAP__MASK;
446}
447#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
448#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
449static inline uint32_t A4XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
450{
451 return ((val) << A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
452}
453#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
454#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
455static inline uint32_t A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
456{
457 return ((val) << A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
458}
459#define A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
460#define A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
461static inline uint32_t A4XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
462{
463 return ((val) << A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
464}
465#define A4XX_RB_COPY_DEST_INFO_TILE__MASK 0x03000000
466#define A4XX_RB_COPY_DEST_INFO_TILE__SHIFT 24
467static inline uint32_t A4XX_RB_COPY_DEST_INFO_TILE(enum a4xx_tile_mode val)
468{
469 return ((val) << A4XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A4XX_RB_COPY_DEST_INFO_TILE__MASK;
470}
471
472#define REG_A4XX_RB_FS_OUTPUT_REG 0x00002100
473#define A4XX_RB_FS_OUTPUT_REG_COLOR_PIPE_ENABLE 0x00000001
474#define A4XX_RB_FS_OUTPUT_REG_FRAG_WRITES_Z 0x00000020
475
476#define REG_A4XX_RB_DEPTH_CONTROL 0x00002101
477#define A4XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001
478#define A4XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
479#define A4XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
480#define A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
481#define A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
482static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
483{
484 return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
485}
486#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
487#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000
488#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
489
490#define REG_A4XX_RB_DEPTH_CLEAR 0x00002102
491
492#define REG_A4XX_RB_DEPTH_INFO 0x00002103
493#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003
494#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
495static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum a4xx_depth_format val)
496{
497 return ((val) << A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
498}
499#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
500#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
501static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
502{
503 return ((val >> 12) << A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
504}
505
506#define REG_A4XX_RB_DEPTH_PITCH 0x00002104
507#define A4XX_RB_DEPTH_PITCH__MASK 0xffffffff
508#define A4XX_RB_DEPTH_PITCH__SHIFT 0
509static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val)
510{
511 return ((val >> 4) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK;
512}
513
514#define REG_A4XX_RB_DEPTH_PITCH2 0x00002105
515#define A4XX_RB_DEPTH_PITCH2__MASK 0xffffffff
516#define A4XX_RB_DEPTH_PITCH2__SHIFT 0
517static inline uint32_t A4XX_RB_DEPTH_PITCH2(uint32_t val)
518{
519 return ((val >> 4) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK;
520}
521
522#define REG_A4XX_RB_STENCIL_CONTROL 0x00002106
523#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
524#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
525#define A4XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
526#define A4XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
527#define A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
528static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
529{
530 return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC__MASK;
531}
532#define A4XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
533#define A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
534static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
535{
536 return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL__MASK;
537}
538#define A4XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
539#define A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
540static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
541{
542 return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS__MASK;
543}
544#define A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
545#define A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
546static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
547{
548 return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
549}
550#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
551#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
552static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
553{
554 return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
555}
556#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
557#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
558static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
559{
560 return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
561}
562#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
563#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
564static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
565{
566 return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
567}
568#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
569#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
570static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
571{
572 return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
573}
574
575#define REG_A4XX_RB_STENCIL_CONTROL2 0x00002107
576#define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER 0x00000001
577
578#define REG_A4XX_RB_STENCILREFMASK 0x0000210b
579#define A4XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
580#define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
581static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
582{
583 return ((val) << A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILREF__MASK;
584}
585#define A4XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
586#define A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
587static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
588{
589 return ((val) << A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILMASK__MASK;
590}
591#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
592#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
593static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
594{
595 return ((val) << A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
596}
597
598#define REG_A4XX_RB_STENCILREFMASK_BF 0x0000210c
599#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
600#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
601static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
602{
603 return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
604}
605#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
606#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
607static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
608{
609 return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
610}
611#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
612#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
613static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
614{
615 return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
616}
617
618#define REG_A4XX_RB_BIN_OFFSET 0x0000210d
619#define A4XX_RB_BIN_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
620#define A4XX_RB_BIN_OFFSET_X__MASK 0x00007fff
621#define A4XX_RB_BIN_OFFSET_X__SHIFT 0
622static inline uint32_t A4XX_RB_BIN_OFFSET_X(uint32_t val)
623{
624 return ((val) << A4XX_RB_BIN_OFFSET_X__SHIFT) & A4XX_RB_BIN_OFFSET_X__MASK;
625}
626#define A4XX_RB_BIN_OFFSET_Y__MASK 0x7fff0000
627#define A4XX_RB_BIN_OFFSET_Y__SHIFT 16
628static inline uint32_t A4XX_RB_BIN_OFFSET_Y(uint32_t val)
629{
630 return ((val) << A4XX_RB_BIN_OFFSET_Y__SHIFT) & A4XX_RB_BIN_OFFSET_Y__MASK;
631}
632
633#define REG_A4XX_RB_VPORT_Z_CLAMP_MAX_15 0x0000213f
634
635#define REG_A4XX_RBBM_HW_VERSION 0x00000000
636
637#define REG_A4XX_RBBM_HW_CONFIGURATION 0x00000002
638
639static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP(uint32_t i0) { return 0x00000004 + 0x1*i0; }
640
641static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP_REG(uint32_t i0) { return 0x00000004 + 0x1*i0; }
642
643static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP(uint32_t i0) { return 0x00000008 + 0x1*i0; }
644
645static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP_REG(uint32_t i0) { return 0x00000008 + 0x1*i0; }
646
647static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP(uint32_t i0) { return 0x0000000c + 0x1*i0; }
648
649static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP_REG(uint32_t i0) { return 0x0000000c + 0x1*i0; }
650
651static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP(uint32_t i0) { return 0x00000010 + 0x1*i0; }
652
653static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x00000010 + 0x1*i0; }
654
655#define REG_A4XX_RBBM_CLOCK_CTL_UCHE 0x00000014
656
657#define REG_A4XX_RBBM_CLOCK_CTL2_UCHE 0x00000015
658
659#define REG_A4XX_RBBM_CLOCK_CTL3_UCHE 0x00000016
660
661#define REG_A4XX_RBBM_CLOCK_CTL4_UCHE 0x00000017
662
663#define REG_A4XX_RBBM_CLOCK_HYST_UCHE 0x00000018
664
665#define REG_A4XX_RBBM_CLOCK_DELAY_UCHE 0x00000019
666
667#define REG_A4XX_RBBM_CLOCK_MODE_GPC 0x0000001a
668
669#define REG_A4XX_RBBM_CLOCK_DELAY_GPC 0x0000001b
670
671#define REG_A4XX_RBBM_CLOCK_HYST_GPC 0x0000001c
672
673#define REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM 0x0000001d
674
675#define REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000001e
676
677#define REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x0000001f
678
679#define REG_A4XX_RBBM_CLOCK_CTL 0x00000020
680
681#define REG_A4XX_RBBM_SP_HYST_CNT 0x00000021
682
683#define REG_A4XX_RBBM_SW_RESET_CMD 0x00000022
684
685#define REG_A4XX_RBBM_AHB_CTL0 0x00000023
686
687#define REG_A4XX_RBBM_AHB_CTL1 0x00000024
688
689#define REG_A4XX_RBBM_AHB_CMD 0x00000025
690
691#define REG_A4XX_RBBM_RB_SUB_BLOCK_SEL_CTL 0x00000026
692
693#define REG_A4XX_RBBM_RAM_ACC_63_32 0x00000028
694
695#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x0000002b
696
697#define REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL 0x0000002f
698
699#define REG_A4XX_RBBM_INTERFACE_HANG_MASK_CTL4 0x00000034
700
701#define REG_A4XX_RBBM_INT_CLEAR_CMD 0x00000036
702
703#define REG_A4XX_RBBM_INT_0_MASK 0x00000037
704
705#define REG_A4XX_RBBM_RBBM_CTL 0x0000003e
706
707#define REG_A4XX_RBBM_AHB_DEBUG_CTL 0x0000003f
708
709#define REG_A4XX_RBBM_VBIF_DEBUG_CTL 0x00000041
710
711#define REG_A4XX_RBBM_CLOCK_CTL2 0x00000042
712
713#define REG_A4XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
714
715#define REG_A4XX_RBBM_RESET_CYCLES 0x00000047
716
717#define REG_A4XX_RBBM_EXT_TRACE_BUS_CTL 0x00000049
718
719#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_A 0x0000004a
720
721#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_B 0x0000004b
722
723#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_C 0x0000004c
724
725#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D 0x0000004d
726
727#define REG_A4XX_RBBM_PERFCTR_CP_0_LO 0x0000009c
728
729static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP(uint32_t i0) { return 0x00000068 + 0x1*i0; }
730
731static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; }
732
733static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP(uint32_t i0) { return 0x0000006c + 0x1*i0; }
734
735static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP_REG(uint32_t i0) { return 0x0000006c + 0x1*i0; }
736
737static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP(uint32_t i0) { return 0x00000070 + 0x1*i0; }
738
739static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP_REG(uint32_t i0) { return 0x00000070 + 0x1*i0; }
740
741static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP(uint32_t i0) { return 0x00000074 + 0x1*i0; }
742
743static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP_REG(uint32_t i0) { return 0x00000074 + 0x1*i0; }
744
745static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB(uint32_t i0) { return 0x00000078 + 0x1*i0; }
746
747static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB_REG(uint32_t i0) { return 0x00000078 + 0x1*i0; }
748
749static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB(uint32_t i0) { return 0x0000007c + 0x1*i0; }
750
751static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB_REG(uint32_t i0) { return 0x0000007c + 0x1*i0; }
752
753static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(uint32_t i0) { return 0x00000082 + 0x1*i0; }
754
755static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU_REG(uint32_t i0) { return 0x00000082 + 0x1*i0; }
756
757static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(uint32_t i0) { return 0x00000086 + 0x1*i0; }
758
759static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU_REG(uint32_t i0) { return 0x00000086 + 0x1*i0; }
760
761#define REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM 0x00000080
762
763#define REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM 0x00000081
764
765#define REG_A4XX_RBBM_CLOCK_CTL_HLSQ 0x0000008a
766
767#define REG_A4XX_RBBM_CLOCK_HYST_HLSQ 0x0000008b
768
769#define REG_A4XX_RBBM_CLOCK_DELAY_HLSQ 0x0000008c
770
771#define REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM 0x0000008d
772
773static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(uint32_t i0) { return 0x0000008e + 0x1*i0; }
774
775static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; }
776
777#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168
778
779#define REG_A4XX_RBBM_PERFCTR_CTL 0x00000170
780
781#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD0 0x00000171
782
783#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD1 0x00000172
784
785#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD2 0x00000173
786
787#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000174
788
789#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000175
790
791#define REG_A4XX_RBBM_GPU_BUSY_MASKED 0x0000017a
792
793#define REG_A4XX_RBBM_INT_0_STATUS 0x0000017d
794
795#define REG_A4XX_RBBM_CLOCK_STATUS 0x00000182
796
797#define REG_A4XX_RBBM_AHB_STATUS 0x00000189
798
799#define REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS 0x0000018c
800
801#define REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS 0x0000018d
802
803#define REG_A4XX_RBBM_AHB_ERROR_STATUS 0x0000018f
804
805#define REG_A4XX_RBBM_STATUS 0x00000191
806#define A4XX_RBBM_STATUS_HI_BUSY 0x00000001
807#define A4XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
808#define A4XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
809#define A4XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
810#define A4XX_RBBM_STATUS_VBIF_BUSY 0x00008000
811#define A4XX_RBBM_STATUS_TSE_BUSY 0x00010000
812#define A4XX_RBBM_STATUS_RAS_BUSY 0x00020000
813#define A4XX_RBBM_STATUS_RB_BUSY 0x00040000
814#define A4XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
815#define A4XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
816#define A4XX_RBBM_STATUS_VFD_BUSY 0x00200000
817#define A4XX_RBBM_STATUS_VPC_BUSY 0x00400000
818#define A4XX_RBBM_STATUS_UCHE_BUSY 0x00800000
819#define A4XX_RBBM_STATUS_SP_BUSY 0x01000000
820#define A4XX_RBBM_STATUS_TPL1_BUSY 0x02000000
821#define A4XX_RBBM_STATUS_MARB_BUSY 0x04000000
822#define A4XX_RBBM_STATUS_VSC_BUSY 0x08000000
823#define A4XX_RBBM_STATUS_ARB_BUSY 0x10000000
824#define A4XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
825#define A4XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
826#define A4XX_RBBM_STATUS_GPU_BUSY 0x80000000
827
828#define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5 0x0000019f
829
830#define REG_A4XX_CP_SCRATCH_UMASK 0x00000228
831
832#define REG_A4XX_CP_SCRATCH_ADDR 0x00000229
833
834#define REG_A4XX_CP_RB_BASE 0x00000200
835
836#define REG_A4XX_CP_RB_CNTL 0x00000201
837
838#define REG_A4XX_CP_RB_WPTR 0x00000205
839
840#define REG_A4XX_CP_RB_RPTR_ADDR 0x00000203
841
842#define REG_A4XX_CP_RB_RPTR 0x00000204
843
844#define REG_A4XX_CP_IB1_BASE 0x00000206
845
846#define REG_A4XX_CP_IB1_BUFSZ 0x00000207
847
848#define REG_A4XX_CP_IB2_BASE 0x00000208
849
850#define REG_A4XX_CP_IB2_BUFSZ 0x00000209
851
852#define REG_A4XX_CP_ME_RB_DONE_DATA 0x00000217
853
854#define REG_A4XX_CP_QUEUE_THRESH2 0x00000219
855
856#define REG_A4XX_CP_MERCIU_SIZE 0x0000021b
857
858#define REG_A4XX_CP_ROQ_ADDR 0x0000021c
859
860#define REG_A4XX_CP_ROQ_DATA 0x0000021d
861
862#define REG_A4XX_CP_MEQ_ADDR 0x0000021e
863
864#define REG_A4XX_CP_MEQ_DATA 0x0000021f
865
866#define REG_A4XX_CP_MERCIU_ADDR 0x00000220
867
868#define REG_A4XX_CP_MERCIU_DATA 0x00000221
869
870#define REG_A4XX_CP_MERCIU_DATA2 0x00000222
871
872#define REG_A4XX_CP_PFP_UCODE_ADDR 0x00000223
873
874#define REG_A4XX_CP_PFP_UCODE_DATA 0x00000224
875
876#define REG_A4XX_CP_ME_RAM_WADDR 0x00000225
877
878#define REG_A4XX_CP_ME_RAM_RADDR 0x00000226
879
880#define REG_A4XX_CP_ME_RAM_DATA 0x00000227
881
882#define REG_A4XX_CP_PREEMPT 0x0000022a
883
884#define REG_A4XX_CP_CNTL 0x0000022c
885
886#define REG_A4XX_CP_ME_CNTL 0x0000022d
887
888#define REG_A4XX_CP_DEBUG 0x0000022e
889
890#define REG_A4XX_CP_DEBUG_ECO_CONTROL 0x00000231
891
892#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232
893
894#define REG_A4XX_CP_PROTECT_REG_0 0x00000240
895
896static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
897
898static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
899
900#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
901
902#define REG_A4XX_CP_ST_BASE 0x000004c0
903
904#define REG_A4XX_CP_STQ_AVAIL 0x000004ce
905
906#define REG_A4XX_CP_MERCIU_STAT 0x000004d0
907
908#define REG_A4XX_CP_WFI_PEND_CTR 0x000004d2
909
910#define REG_A4XX_CP_HW_FAULT 0x000004d8
911
912#define REG_A4XX_CP_PROTECT_STATUS 0x000004da
913
914#define REG_A4XX_CP_EVENTS_IN_FLIGHT 0x000004dd
915
916#define REG_A4XX_CP_PERFCTR_CP_SEL_0 0x00000500
917
918#define REG_A4XX_CP_PERFCOMBINER_SELECT 0x0000050b
919
920static inline uint32_t REG_A4XX_CP_SCRATCH(uint32_t i0) { return 0x00000578 + 0x1*i0; }
921
922static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578 + 0x1*i0; }
923
924#define REG_A4XX_SP_VS_STATUS 0x00000ec0
925
926#define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf
927
928#define REG_A4XX_SP_SP_CTRL_REG 0x000022c0
929#define A4XX_SP_SP_CTRL_REG_BINNING_PASS 0x00080000
930
931#define REG_A4XX_SP_INSTR_CACHE_CTRL 0x000022c1
932
933#define REG_A4XX_SP_VS_CTRL_REG0 0x000022c4
934#define A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
935#define A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
936static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
937{
938 return ((val) << A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
939}
940#define A4XX_SP_VS_CTRL_REG0_VARYING 0x00000002
941#define A4XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
942#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
943#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
944static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
945{
946 return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
947}
948#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
949#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
950static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
951{
952 return ((val) << A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
953}
954#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
955#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
956static inline uint32_t A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
957{
958 return ((val) << A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
959}
960#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
961#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
962static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
963{
964 return ((val) << A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
965}
966#define A4XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
967#define A4XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
968
969#define REG_A4XX_SP_VS_CTRL_REG1 0x000022c5
970#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff
971#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
972static inline uint32_t A4XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
973{
974 return ((val) << A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
975}
976#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x7f000000
977#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
978static inline uint32_t A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
979{
980 return ((val) << A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
981}
982
983#define REG_A4XX_SP_VS_PARAM_REG 0x000022c6
984#define A4XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
985#define A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
986static inline uint32_t A4XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
987{
988 return ((val) << A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_POSREGID__MASK;
989}
990#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
991#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
992static inline uint32_t A4XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
993{
994 return ((val) << A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
995}
996#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
997#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
998static inline uint32_t A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
999{
1000 return ((val) << A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
1001}
1002
1003static inline uint32_t REG_A4XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
1004
1005static inline uint32_t REG_A4XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
1006#define A4XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
1007#define A4XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
1008static inline uint32_t A4XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
1009{
1010 return ((val) << A4XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_A_REGID__MASK;
1011}
1012#define A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
1013#define A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
1014static inline uint32_t A4XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
1015{
1016 return ((val) << A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
1017}
1018#define A4XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
1019#define A4XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
1020static inline uint32_t A4XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
1021{
1022 return ((val) << A4XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_B_REGID__MASK;
1023}
1024#define A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
1025#define A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
1026static inline uint32_t A4XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
1027{
1028 return ((val) << A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
1029}
1030
1031static inline uint32_t REG_A4XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
1032
1033static inline uint32_t REG_A4XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
1034#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
1035#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
1036static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
1037{
1038 return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
1039}
1040#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
1041#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
1042static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
1043{
1044 return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
1045}
1046#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
1047#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
1048static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
1049{
1050 return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
1051}
1052#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
1053#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
1054static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
1055{
1056 return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
1057}
1058
1059#define REG_A4XX_SP_VS_OBJ_OFFSET_REG 0x000022e0
1060#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1061#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1062static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1063{
1064 return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1065}
1066#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1067#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1068static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1069{
1070 return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1071}
1072
1073#define REG_A4XX_SP_VS_OBJ_START 0x000022e1
1074
1075#define REG_A4XX_SP_VS_PVT_MEM_PARAM 0x000022e2
1076
1077#define REG_A4XX_SP_VS_PVT_MEM_ADDR 0x000022e3
1078
1079#define REG_A4XX_SP_VS_LENGTH_REG 0x000022e5
1080
1081#define REG_A4XX_SP_FS_CTRL_REG0 0x000022e8
1082#define A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
1083#define A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
1084static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
1085{
1086 return ((val) << A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
1087}
1088#define A4XX_SP_FS_CTRL_REG0_VARYING 0x00000002
1089#define A4XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
1090#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
1091#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
1092static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
1093{
1094 return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
1095}
1096#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
1097#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
1098static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
1099{
1100 return ((val) << A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
1101}
1102#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
1103#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
1104static inline uint32_t A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
1105{
1106 return ((val) << A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
1107}
1108#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
1109#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
1110static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
1111{
1112 return ((val) << A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
1113}
1114#define A4XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
1115#define A4XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
1116
1117#define REG_A4XX_SP_FS_CTRL_REG1 0x000022e9
1118#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff
1119#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
1120static inline uint32_t A4XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
1121{
1122 return ((val) << A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
1123}
1124#define A4XX_SP_FS_CTRL_REG1_VARYING 0x00100000
1125
1126#define REG_A4XX_SP_FS_OBJ_OFFSET_REG 0x000022ea
1127#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1128#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1129static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1130{
1131 return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1132}
1133#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1134#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1135static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1136{
1137 return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1138}
1139
1140#define REG_A4XX_SP_FS_OBJ_START 0x000022eb
1141
1142#define REG_A4XX_SP_FS_PVT_MEM_PARAM 0x000022ec
1143
1144#define REG_A4XX_SP_FS_PVT_MEM_ADDR 0x000022ed
1145
1146#define REG_A4XX_SP_FS_LENGTH_REG 0x000022ef
1147
1148#define REG_A4XX_SP_FS_OUTPUT_REG 0x000022f0
1149#define A4XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080
1150#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00
1151#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8
1152static inline uint32_t A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
1153{
1154 return ((val) << A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
1155}
1156
1157static inline uint32_t REG_A4XX_SP_FS_MRT(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
1158
1159static inline uint32_t REG_A4XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
1160#define A4XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
1161#define A4XX_SP_FS_MRT_REG_REGID__SHIFT 0
1162static inline uint32_t A4XX_SP_FS_MRT_REG_REGID(uint32_t val)
1163{
1164 return ((val) << A4XX_SP_FS_MRT_REG_REGID__SHIFT) & A4XX_SP_FS_MRT_REG_REGID__MASK;
1165}
1166#define A4XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
1167#define A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK 0x0003f000
1168#define A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT 12
1169static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val)
1170{
1171 return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK;
1172}
1173
1174#define REG_A4XX_SP_HS_OBJ_OFFSET_REG 0x0000230d
1175#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1176#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1177static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1178{
1179 return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1180}
1181#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1182#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1183static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1184{
1185 return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1186}
1187
1188#define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334
1189#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1190#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1191static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1192{
1193 return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1194}
1195#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1196#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1197static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1198{
1199 return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1200}
1201
1202#define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b
1203#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1204#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1205static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1206{
1207 return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1208}
1209#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1210#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1211static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1212{
1213 return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1214}
1215
1216#define REG_A4XX_SP_GS_LENGTH_REG 0x00002360
1217
1218#define REG_A4XX_VPC_DEBUG_RAM_SEL 0x00000e60
1219
1220#define REG_A4XX_VPC_DEBUG_RAM_READ 0x00000e61
1221
1222#define REG_A4XX_VPC_DEBUG_ECO_CONTROL 0x00000e64
1223
1224#define REG_A4XX_VPC_PERFCTR_VPC_SEL_3 0x00000e68
1225
1226#define REG_A4XX_VPC_ATTR 0x00002140
1227#define A4XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff
1228#define A4XX_VPC_ATTR_TOTALATTR__SHIFT 0
1229static inline uint32_t A4XX_VPC_ATTR_TOTALATTR(uint32_t val)
1230{
1231 return ((val) << A4XX_VPC_ATTR_TOTALATTR__SHIFT) & A4XX_VPC_ATTR_TOTALATTR__MASK;
1232}
1233#define A4XX_VPC_ATTR_PSIZE 0x00000200
1234#define A4XX_VPC_ATTR_THRDASSIGN__MASK 0x00003000
1235#define A4XX_VPC_ATTR_THRDASSIGN__SHIFT 12
1236static inline uint32_t A4XX_VPC_ATTR_THRDASSIGN(uint32_t val)
1237{
1238 return ((val) << A4XX_VPC_ATTR_THRDASSIGN__SHIFT) & A4XX_VPC_ATTR_THRDASSIGN__MASK;
1239}
1240#define A4XX_VPC_ATTR_ENABLE 0x02000000
1241
1242#define REG_A4XX_VPC_PACK 0x00002141
1243#define A4XX_VPC_PACK_NUMBYPASSVAR__MASK 0x000000ff
1244#define A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT 0
1245static inline uint32_t A4XX_VPC_PACK_NUMBYPASSVAR(uint32_t val)
1246{
1247 return ((val) << A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT) & A4XX_VPC_PACK_NUMBYPASSVAR__MASK;
1248}
1249#define A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
1250#define A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
1251static inline uint32_t A4XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
1252{
1253 return ((val) << A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
1254}
1255#define A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
1256#define A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
1257static inline uint32_t A4XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
1258{
1259 return ((val) << A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
1260}
1261
1262static inline uint32_t REG_A4XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002142 + 0x1*i0; }
1263
1264static inline uint32_t REG_A4XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002142 + 0x1*i0; }
1265
1266static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000214a + 0x1*i0; }
1267
1268static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000214a + 0x1*i0; }
1269
1270#define REG_A4XX_VPC_SO_FLUSH_WADDR_3 0x0000216e
1271
1272#define REG_A4XX_VSC_BIN_SIZE 0x00000c00
1273#define A4XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
1274#define A4XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
1275static inline uint32_t A4XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
1276{
1277 return ((val >> 5) << A4XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A4XX_VSC_BIN_SIZE_WIDTH__MASK;
1278}
1279#define A4XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
1280#define A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
1281static inline uint32_t A4XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
1282{
1283 return ((val >> 5) << A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A4XX_VSC_BIN_SIZE_HEIGHT__MASK;
1284}
1285
1286#define REG_A4XX_VSC_SIZE_ADDRESS 0x00000c01
1287
1288#define REG_A4XX_VSC_SIZE_ADDRESS2 0x00000c02
1289
1290#define REG_A4XX_VSC_DEBUG_ECO_CONTROL 0x00000c03
1291
1292static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
1293
1294static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
1295#define A4XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
1296#define A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
1297static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
1298{
1299 return ((val) << A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_X__MASK;
1300}
1301#define A4XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
1302#define A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
1303static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
1304{
1305 return ((val) << A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_Y__MASK;
1306}
1307#define A4XX_VSC_PIPE_CONFIG_REG_W__MASK 0x00f00000
1308#define A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
1309static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
1310{
1311 return ((val) << A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_W__MASK;
1312}
1313#define A4XX_VSC_PIPE_CONFIG_REG_H__MASK 0x0f000000
1314#define A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT 24
1315static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
1316{
1317 return ((val) << A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_H__MASK;
1318}
1319
1320static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
1321
1322static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
1323
1324static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
1325
1326static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
1327
1328#define REG_A4XX_VSC_PIPE_PARTIAL_POSN_1 0x00000c41
1329
1330#define REG_A4XX_VSC_PERFCTR_VSC_SEL_0 0x00000c50
1331
1332#define REG_A4XX_VSC_PERFCTR_VSC_SEL_1 0x00000c51
1333
1334#define REG_A4XX_VFD_DEBUG_CONTROL 0x00000e40
1335
1336#define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a
1337
1338#define REG_A4XX_VFD_CONTROL_0 0x00002200
1339#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x000000ff
1340#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
1341static inline uint32_t A4XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
1342{
1343 return ((val) << A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
1344}
1345#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK 0x0001fe00
1346#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT 9
1347static inline uint32_t A4XX_VFD_CONTROL_0_BYPASSATTROVS(uint32_t val)
1348{
1349 return ((val) << A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT) & A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK;
1350}
1351#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x03f00000
1352#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 20
1353static inline uint32_t A4XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
1354{
1355 return ((val) << A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
1356}
1357#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xfc000000
1358#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 26
1359static inline uint32_t A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
1360{
1361 return ((val) << A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
1362}
1363
1364#define REG_A4XX_VFD_CONTROL_1 0x00002201
1365#define A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
1366#define A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
1367static inline uint32_t A4XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
1368{
1369 return ((val) << A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
1370}
1371#define A4XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
1372#define A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
1373static inline uint32_t A4XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
1374{
1375 return ((val) << A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A4XX_VFD_CONTROL_1_REGID4VTX__MASK;
1376}
1377#define A4XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
1378#define A4XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
1379static inline uint32_t A4XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
1380{
1381 return ((val) << A4XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A4XX_VFD_CONTROL_1_REGID4INST__MASK;
1382}
1383
1384#define REG_A4XX_VFD_CONTROL_2 0x00002202
1385
1386#define REG_A4XX_VFD_CONTROL_3 0x00002203
1387
1388#define REG_A4XX_VFD_CONTROL_4 0x00002204
1389
1390#define REG_A4XX_VFD_INDEX_OFFSET 0x00002208
1391
1392static inline uint32_t REG_A4XX_VFD_FETCH(uint32_t i0) { return 0x0000220a + 0x4*i0; }
1393
1394static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x0000220a + 0x4*i0; }
1395#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
1396#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
1397static inline uint32_t A4XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
1398{
1399 return ((val) << A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
1400}
1401#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80
1402#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
1403static inline uint32_t A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
1404{
1405 return ((val) << A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
1406}
1407#define A4XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00080000
1408#define A4XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000
1409#define A4XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24
1410static inline uint32_t A4XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
1411{
1412 return ((val) << A4XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_STEPRATE__MASK;
1413}
1414
1415static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x0000220b + 0x4*i0; }
1416
1417static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_2(uint32_t i0) { return 0x0000220c + 0x4*i0; }
1418#define A4XX_VFD_FETCH_INSTR_2_SIZE__MASK 0xfffffff0
1419#define A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT 4
1420static inline uint32_t A4XX_VFD_FETCH_INSTR_2_SIZE(uint32_t val)
1421{
1422 return ((val >> 4) << A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_2_SIZE__MASK;
1423}
1424
1425static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_3(uint32_t i0) { return 0x0000220d + 0x4*i0; }
1426
1427static inline uint32_t REG_A4XX_VFD_DECODE(uint32_t i0) { return 0x0000228a + 0x1*i0; }
1428
1429static inline uint32_t REG_A4XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000228a + 0x1*i0; }
1430#define A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
1431#define A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
1432static inline uint32_t A4XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
1433{
1434 return ((val) << A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
1435}
1436#define A4XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
1437#define A4XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
1438#define A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
1439static inline uint32_t A4XX_VFD_DECODE_INSTR_FORMAT(enum a4xx_vtx_fmt val)
1440{
1441 return ((val) << A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A4XX_VFD_DECODE_INSTR_FORMAT__MASK;
1442}
1443#define A4XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
1444#define A4XX_VFD_DECODE_INSTR_REGID__SHIFT 12
1445static inline uint32_t A4XX_VFD_DECODE_INSTR_REGID(uint32_t val)
1446{
1447 return ((val) << A4XX_VFD_DECODE_INSTR_REGID__SHIFT) & A4XX_VFD_DECODE_INSTR_REGID__MASK;
1448}
1449#define A4XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
1450#define A4XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
1451static inline uint32_t A4XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
1452{
1453 return ((val) << A4XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A4XX_VFD_DECODE_INSTR_SWAP__MASK;
1454}
1455#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
1456#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
1457static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
1458{
1459 return ((val) << A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
1460}
1461#define A4XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
1462#define A4XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
1463
1464#define REG_A4XX_TPL1_DEBUG_ECO_CONTROL 0x00000f00
1465
1466#define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b
1467
1468#define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380
1469
1470#define REG_A4XX_TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR 0x000023a6
1471
1472#define REG_A4XX_GRAS_TSE_STATUS 0x00000c80
1473
1474#define REG_A4XX_GRAS_DEBUG_ECO_CONTROL 0x00000c81
1475
1476#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c88
1477
1478#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c8b
1479
1480#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000
1481
1482#define REG_A4XX_GRAS_CLEAR_CNTL 0x00002003
1483#define A4XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR 0x00000001
1484
1485#define REG_A4XX_GRAS_CL_GB_CLIP_ADJ 0x00002004
1486#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
1487#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
1488static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
1489{
1490 return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
1491}
1492#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
1493#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
1494static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
1495{
1496 return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
1497}
1498
1499#define REG_A4XX_GRAS_CL_VPORT_XOFFSET_0 0x00002008
1500#define A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
1501#define A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
1502static inline uint32_t A4XX_GRAS_CL_VPORT_XOFFSET_0(float val)
1503{
1504 return ((fui(val)) << A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
1505}
1506
1507#define REG_A4XX_GRAS_CL_VPORT_XSCALE_0 0x00002009
1508#define A4XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
1509#define A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
1510static inline uint32_t A4XX_GRAS_CL_VPORT_XSCALE_0(float val)
1511{
1512 return ((fui(val)) << A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_XSCALE_0__MASK;
1513}
1514
1515#define REG_A4XX_GRAS_CL_VPORT_YOFFSET_0 0x0000200a
1516#define A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
1517#define A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
1518static inline uint32_t A4XX_GRAS_CL_VPORT_YOFFSET_0(float val)
1519{
1520 return ((fui(val)) << A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
1521}
1522
1523#define REG_A4XX_GRAS_CL_VPORT_YSCALE_0 0x0000200b
1524#define A4XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
1525#define A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
1526static inline uint32_t A4XX_GRAS_CL_VPORT_YSCALE_0(float val)
1527{
1528 return ((fui(val)) << A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_YSCALE_0__MASK;
1529}
1530
1531#define REG_A4XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000200c
1532#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
1533#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
1534static inline uint32_t A4XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
1535{
1536 return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
1537}
1538
1539#define REG_A4XX_GRAS_CL_VPORT_ZSCALE_0 0x0000200d
1540#define A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
1541#define A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
1542static inline uint32_t A4XX_GRAS_CL_VPORT_ZSCALE_0(float val)
1543{
1544 return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
1545}
1546
1547#define REG_A4XX_GRAS_SU_POINT_MINMAX 0x00002070
1548#define A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
1549#define A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
1550static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MIN(float val)
1551{
1552 return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
1553}
1554#define A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
1555#define A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
1556static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MAX(float val)
1557{
1558 return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
1559}
1560
1561#define REG_A4XX_GRAS_SU_POINT_SIZE 0x00002071
1562#define A4XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
1563#define A4XX_GRAS_SU_POINT_SIZE__SHIFT 0
1564static inline uint32_t A4XX_GRAS_SU_POINT_SIZE(float val)
1565{
1566 return ((((int32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_SIZE__SHIFT) & A4XX_GRAS_SU_POINT_SIZE__MASK;
1567}
1568
1569#define REG_A4XX_GRAS_ALPHA_CONTROL 0x00002073
1570#define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE 0x00000004
1571
1572#define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE 0x00002074
1573#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
1574#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
1575static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
1576{
1577 return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
1578}
1579
1580#define REG_A4XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00002075
1581#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
1582#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
1583static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
1584{
1585 return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
1586}
1587
1588#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_TL 0x0000209f
1589
1590#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL 0x0000207c
1591#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
1592#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
1593#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
1594static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
1595{
1596 return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
1597}
1598#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
1599#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
1600static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
1601{
1602 return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
1603}
1604
1605#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_BR 0x0000207d
1606#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
1607#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
1608#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
1609static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
1610{
1611 return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
1612}
1613#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
1614#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
1615static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
1616{
1617 return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
1618}
1619
1620#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000209c
1621#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
1622#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
1623#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
1624static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
1625{
1626 return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
1627}
1628#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
1629#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
1630static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
1631{
1632 return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
1633}
1634
1635#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000209d
1636#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
1637#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
1638#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
1639static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
1640{
1641 return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
1642}
1643#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
1644#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
1645static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
1646{
1647 return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
1648}
1649
1650#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077
1651#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003
1652#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0
1653static inline uint32_t A4XX_GRAS_DEPTH_CONTROL_FORMAT(enum a4xx_depth_format val)
1654{
1655 return ((val) << A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT) & A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK;
1656}
1657
1658#define REG_A4XX_GRAS_SU_MODE_CONTROL 0x00002078
1659#define A4XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
1660#define A4XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
1661#define A4XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
1662#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
1663#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
1664static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
1665{
1666 return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
1667}
1668#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
1669#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000
1670
1671#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b
1672#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x0000000c
1673#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 2
1674static inline uint32_t A4XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
1675{
1676 return ((val) << A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
1677}
1678#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000380
1679#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 7
1680static inline uint32_t A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(uint32_t val)
1681{
1682 return ((val) << A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
1683}
1684#define A4XX_GRAS_SC_CONTROL_MSAA_DISABLE 0x00000800
1685#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
1686#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
1687static inline uint32_t A4XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
1688{
1689 return ((val) << A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
1690}
1691
1692#define REG_A4XX_UCHE_CACHE_MODE_CONTROL 0x00000e80
1693
1694#define REG_A4XX_UCHE_TRAP_BASE_LO 0x00000e83
1695
1696#define REG_A4XX_UCHE_TRAP_BASE_HI 0x00000e84
1697
1698#define REG_A4XX_UCHE_CACHE_STATUS 0x00000e88
1699
1700#define REG_A4XX_UCHE_INVALIDATE0 0x00000e8a
1701
1702#define REG_A4XX_UCHE_INVALIDATE1 0x00000e8b
1703
1704#define REG_A4XX_UCHE_CACHE_WAYS_VFD 0x00000e8c
1705
1706#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e95
1707
1708#define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD 0x00000e00
1709
1710#define REG_A4XX_HLSQ_DEBUG_ECO_CONTROL 0x00000e04
1711
1712#define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e
1713
1714#define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0
1715#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
1716#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
1717static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
1718{
1719 return ((val) << A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
1720}
1721#define A4XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
1722#define A4XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
1723#define A4XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
1724#define A4XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
1725#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
1726#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
1727static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
1728{
1729 return ((val) << A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
1730}
1731#define A4XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
1732#define A4XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
1733#define A4XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
1734#define A4XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
1735
1736#define REG_A4XX_HLSQ_CONTROL_1_REG 0x000023c1
1737#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
1738#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
1739static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
1740{
1741 return ((val) << A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
1742}
1743#define A4XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
1744#define A4XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
1745#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000
1746
1747#define REG_A4XX_HLSQ_CONTROL_2_REG 0x000023c2
1748#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
1749#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
1750static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
1751{
1752 return ((val) << A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
1753}
1754
1755#define REG_A4XX_HLSQ_CONTROL_3_REG 0x000023c3
1756#define A4XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff
1757#define A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT 0
1758static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
1759{
1760 return ((val) << A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_REGID__MASK;
1761}
1762
1763#define REG_A4XX_HLSQ_VS_CONTROL_REG 0x000023c5
1764#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
1765#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1766static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1767{
1768 return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
1769}
1770#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
1771#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
1772static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
1773{
1774 return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
1775}
1776#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
1777#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
1778static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
1779{
1780 return ((val) << A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
1781}
1782#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1783#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1784static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1785{
1786 return ((val) << A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
1787}
1788
1789#define REG_A4XX_HLSQ_FS_CONTROL_REG 0x000023c6
1790#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
1791#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1792static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1793{
1794 return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
1795}
1796#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
1797#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
1798static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
1799{
1800 return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
1801}
1802#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
1803#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
1804static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
1805{
1806 return ((val) << A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
1807}
1808#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1809#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1810static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1811{
1812 return ((val) << A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
1813}
1814
1815#define REG_A4XX_HLSQ_HS_CONTROL_REG 0x000023c7
1816#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
1817#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1818static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1819{
1820 return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK;
1821}
1822#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
1823#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
1824static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
1825{
1826 return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
1827}
1828#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
1829#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
1830static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
1831{
1832 return ((val) << A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
1833}
1834#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1835#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1836static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1837{
1838 return ((val) << A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK;
1839}
1840
1841#define REG_A4XX_HLSQ_DS_CONTROL_REG 0x000023c8
1842#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
1843#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1844static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1845{
1846 return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK;
1847}
1848#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
1849#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
1850static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
1851{
1852 return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
1853}
1854#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
1855#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
1856static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
1857{
1858 return ((val) << A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
1859}
1860#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1861#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1862static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1863{
1864 return ((val) << A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK;
1865}
1866
1867#define REG_A4XX_HLSQ_GS_CONTROL_REG 0x000023c9
1868#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
1869#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1870static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1871{
1872 return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK;
1873}
1874#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
1875#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
1876static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
1877{
1878 return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
1879}
1880#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
1881#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
1882static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
1883{
1884 return ((val) << A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
1885}
1886#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1887#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1888static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1889{
1890 return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK;
1891}
1892
1893#define REG_A4XX_HLSQ_UPDATE_CONTROL 0x000023db
1894
1895#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00
1896#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001
1897
1898#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c
1899
1900#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10
1901
1902#define REG_A4XX_PC_PERFCTR_PC_SEL_7 0x00000d17
1903
1904#define REG_A4XX_PC_BIN_BASE 0x000021c0
1905
1906#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4
1907#define A4XX_PC_PRIM_VTX_CNTL_VAROUT 0x00000001
1908#define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
1909#define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
1910
1911#define REG_A4XX_UNKNOWN_21C5 0x000021c5
1912
1913#define REG_A4XX_PC_RESTART_INDEX 0x000021c6
1914
1915#define REG_A4XX_PC_GS_PARAM 0x000021e5
1916
1917#define REG_A4XX_PC_HS_PARAM 0x000021e7
1918
1919#define REG_A4XX_VBIF_VERSION 0x00003000
1920
1921#define REG_A4XX_VBIF_CLKON 0x00003001
1922#define A4XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000001
1923
1924#define REG_A4XX_VBIF_ABIT_SORT 0x0000301c
1925
1926#define REG_A4XX_VBIF_ABIT_SORT_CONF 0x0000301d
1927
1928#define REG_A4XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
1929
1930#define REG_A4XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
1931
1932#define REG_A4XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
1933
1934#define REG_A4XX_VBIF_IN_WR_LIM_CONF0 0x00003030
1935
1936#define REG_A4XX_VBIF_IN_WR_LIM_CONF1 0x00003031
1937
1938#define REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
1939
1940#define REG_A4XX_UNKNOWN_0CC5 0x00000cc5
1941
1942#define REG_A4XX_UNKNOWN_0CC6 0x00000cc6
1943
1944#define REG_A4XX_UNKNOWN_0D01 0x00000d01
1945
1946#define REG_A4XX_UNKNOWN_0E05 0x00000e05
1947
1948#define REG_A4XX_UNKNOWN_0E42 0x00000e42
1949
1950#define REG_A4XX_UNKNOWN_0EC2 0x00000ec2
1951
1952#define REG_A4XX_UNKNOWN_0EC3 0x00000ec3
1953
1954#define REG_A4XX_UNKNOWN_0F03 0x00000f03
1955
1956#define REG_A4XX_UNKNOWN_2001 0x00002001
1957
1958#define REG_A4XX_UNKNOWN_209B 0x0000209b
1959
1960#define REG_A4XX_UNKNOWN_20EF 0x000020ef
1961
1962#define REG_A4XX_UNKNOWN_20F0 0x000020f0
1963
1964#define REG_A4XX_UNKNOWN_20F1 0x000020f1
1965
1966#define REG_A4XX_UNKNOWN_20F2 0x000020f2
1967
1968#define REG_A4XX_UNKNOWN_20F3 0x000020f3
1969
1970#define REG_A4XX_UNKNOWN_20F4 0x000020f4
1971
1972#define REG_A4XX_UNKNOWN_20F5 0x000020f5
1973
1974#define REG_A4XX_UNKNOWN_20F6 0x000020f6
1975
1976#define REG_A4XX_UNKNOWN_20F7 0x000020f7
1977
1978#define REG_A4XX_UNKNOWN_2152 0x00002152
1979
1980#define REG_A4XX_UNKNOWN_2153 0x00002153
1981
1982#define REG_A4XX_UNKNOWN_2154 0x00002154
1983
1984#define REG_A4XX_UNKNOWN_2155 0x00002155
1985
1986#define REG_A4XX_UNKNOWN_2156 0x00002156
1987
1988#define REG_A4XX_UNKNOWN_2157 0x00002157
1989
1990#define REG_A4XX_UNKNOWN_21C3 0x000021c3
1991
1992#define REG_A4XX_UNKNOWN_21E6 0x000021e6
1993
1994#define REG_A4XX_UNKNOWN_2209 0x00002209
1995
1996#define REG_A4XX_UNKNOWN_22D7 0x000022d7
1997
1998#define REG_A4XX_UNKNOWN_2381 0x00002381
1999
2000#define REG_A4XX_UNKNOWN_23A0 0x000023a0
2001
2002#define REG_A4XX_TEX_SAMP_0 0x00000000
2003#define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
2004#define A4XX_TEX_SAMP_0_XY_MAG__SHIFT 1
2005static inline uint32_t A4XX_TEX_SAMP_0_XY_MAG(enum a4xx_tex_filter val)
2006{
2007 return ((val) << A4XX_TEX_SAMP_0_XY_MAG__SHIFT) & A4XX_TEX_SAMP_0_XY_MAG__MASK;
2008}
2009#define A4XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
2010#define A4XX_TEX_SAMP_0_XY_MIN__SHIFT 3
2011static inline uint32_t A4XX_TEX_SAMP_0_XY_MIN(enum a4xx_tex_filter val)
2012{
2013 return ((val) << A4XX_TEX_SAMP_0_XY_MIN__SHIFT) & A4XX_TEX_SAMP_0_XY_MIN__MASK;
2014}
2015#define A4XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
2016#define A4XX_TEX_SAMP_0_WRAP_S__SHIFT 5
2017static inline uint32_t A4XX_TEX_SAMP_0_WRAP_S(enum a4xx_tex_clamp val)
2018{
2019 return ((val) << A4XX_TEX_SAMP_0_WRAP_S__SHIFT) & A4XX_TEX_SAMP_0_WRAP_S__MASK;
2020}
2021#define A4XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
2022#define A4XX_TEX_SAMP_0_WRAP_T__SHIFT 8
2023static inline uint32_t A4XX_TEX_SAMP_0_WRAP_T(enum a4xx_tex_clamp val)
2024{
2025 return ((val) << A4XX_TEX_SAMP_0_WRAP_T__SHIFT) & A4XX_TEX_SAMP_0_WRAP_T__MASK;
2026}
2027#define A4XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
2028#define A4XX_TEX_SAMP_0_WRAP_R__SHIFT 11
2029static inline uint32_t A4XX_TEX_SAMP_0_WRAP_R(enum a4xx_tex_clamp val)
2030{
2031 return ((val) << A4XX_TEX_SAMP_0_WRAP_R__SHIFT) & A4XX_TEX_SAMP_0_WRAP_R__MASK;
2032}
2033
2034#define REG_A4XX_TEX_SAMP_1 0x00000001
2035#define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
2036#define A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
2037static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
2038{
2039 return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
2040}
2041#define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
2042#define A4XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
2043static inline uint32_t A4XX_TEX_SAMP_1_MAX_LOD(float val)
2044{
2045 return ((((uint32_t)(val * 64.0))) << A4XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A4XX_TEX_SAMP_1_MAX_LOD__MASK;
2046}
2047#define A4XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
2048#define A4XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
2049static inline uint32_t A4XX_TEX_SAMP_1_MIN_LOD(float val)
2050{
2051 return ((((uint32_t)(val * 64.0))) << A4XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A4XX_TEX_SAMP_1_MIN_LOD__MASK;
2052}
2053
2054#define REG_A4XX_TEX_CONST_0 0x00000000
2055#define A4XX_TEX_CONST_0_TILED 0x00000001
2056#define A4XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
2057#define A4XX_TEX_CONST_0_SWIZ_X__SHIFT 4
2058static inline uint32_t A4XX_TEX_CONST_0_SWIZ_X(enum a4xx_tex_swiz val)
2059{
2060 return ((val) << A4XX_TEX_CONST_0_SWIZ_X__SHIFT) & A4XX_TEX_CONST_0_SWIZ_X__MASK;
2061}
2062#define A4XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
2063#define A4XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
2064static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Y(enum a4xx_tex_swiz val)
2065{
2066 return ((val) << A4XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Y__MASK;
2067}
2068#define A4XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
2069#define A4XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
2070static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Z(enum a4xx_tex_swiz val)
2071{
2072 return ((val) << A4XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Z__MASK;
2073}
2074#define A4XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
2075#define A4XX_TEX_CONST_0_SWIZ_W__SHIFT 13
2076static inline uint32_t A4XX_TEX_CONST_0_SWIZ_W(enum a4xx_tex_swiz val)
2077{
2078 return ((val) << A4XX_TEX_CONST_0_SWIZ_W__SHIFT) & A4XX_TEX_CONST_0_SWIZ_W__MASK;
2079}
2080#define A4XX_TEX_CONST_0_FMT__MASK 0x1fc00000
2081#define A4XX_TEX_CONST_0_FMT__SHIFT 22
2082static inline uint32_t A4XX_TEX_CONST_0_FMT(enum a4xx_tex_fmt val)
2083{
2084 return ((val) << A4XX_TEX_CONST_0_FMT__SHIFT) & A4XX_TEX_CONST_0_FMT__MASK;
2085}
2086#define A4XX_TEX_CONST_0_TYPE__MASK 0x60000000
2087#define A4XX_TEX_CONST_0_TYPE__SHIFT 29
2088static inline uint32_t A4XX_TEX_CONST_0_TYPE(enum a4xx_tex_type val)
2089{
2090 return ((val) << A4XX_TEX_CONST_0_TYPE__SHIFT) & A4XX_TEX_CONST_0_TYPE__MASK;
2091}
2092
2093#define REG_A4XX_TEX_CONST_1 0x00000001
2094#define A4XX_TEX_CONST_1_HEIGHT__MASK 0x00007fff
2095#define A4XX_TEX_CONST_1_HEIGHT__SHIFT 0
2096static inline uint32_t A4XX_TEX_CONST_1_HEIGHT(uint32_t val)
2097{
2098 return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK;
2099}
2100#define A4XX_TEX_CONST_1_WIDTH__MASK 0x1fff8000
2101#define A4XX_TEX_CONST_1_WIDTH__SHIFT 15
2102static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
2103{
2104 return ((val) << A4XX_TEX_CONST_1_WIDTH__SHIFT) & A4XX_TEX_CONST_1_WIDTH__MASK;
2105}
2106
2107#define REG_A4XX_TEX_CONST_2 0x00000002
2108#define A4XX_TEX_CONST_2_PITCH__MASK 0x3ffffe00
2109#define A4XX_TEX_CONST_2_PITCH__SHIFT 9
2110static inline uint32_t A4XX_TEX_CONST_2_PITCH(uint32_t val)
2111{
2112 return ((val) << A4XX_TEX_CONST_2_PITCH__SHIFT) & A4XX_TEX_CONST_2_PITCH__MASK;
2113}
2114#define A4XX_TEX_CONST_2_SWAP__MASK 0xc0000000
2115#define A4XX_TEX_CONST_2_SWAP__SHIFT 30
2116static inline uint32_t A4XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
2117{
2118 return ((val) << A4XX_TEX_CONST_2_SWAP__SHIFT) & A4XX_TEX_CONST_2_SWAP__MASK;
2119}
2120
2121#define REG_A4XX_TEX_CONST_3 0x00000003
2122#define A4XX_TEX_CONST_3_LAYERSZ__MASK 0x0000000f
2123#define A4XX_TEX_CONST_3_LAYERSZ__SHIFT 0
2124static inline uint32_t A4XX_TEX_CONST_3_LAYERSZ(uint32_t val)
2125{
2126 return ((val >> 12) << A4XX_TEX_CONST_3_LAYERSZ__SHIFT) & A4XX_TEX_CONST_3_LAYERSZ__MASK;
2127}
2128
2129#define REG_A4XX_TEX_CONST_4 0x00000004
2130#define A4XX_TEX_CONST_4_BASE__MASK 0xffffffff
2131#define A4XX_TEX_CONST_4_BASE__SHIFT 0
2132static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val)
2133{
2134 return ((val) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK;
2135}
2136
2137#define REG_A4XX_TEX_CONST_5 0x00000005
2138
2139#define REG_A4XX_TEX_CONST_6 0x00000006
2140
2141#define REG_A4XX_TEX_CONST_7 0x00000007
2142
2143
2144#endif /* A4XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
new file mode 100644
index 000000000000..91221836c5ad
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -0,0 +1,604 @@
1/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include "a4xx_gpu.h"
14#ifdef CONFIG_MSM_OCMEM
15# include <soc/qcom/ocmem.h>
16#endif
17
18#define A4XX_INT0_MASK \
19 (A4XX_INT0_RBBM_AHB_ERROR | \
20 A4XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
21 A4XX_INT0_CP_T0_PACKET_IN_IB | \
22 A4XX_INT0_CP_OPCODE_ERROR | \
23 A4XX_INT0_CP_RESERVED_BIT_ERROR | \
24 A4XX_INT0_CP_HW_FAULT | \
25 A4XX_INT0_CP_IB1_INT | \
26 A4XX_INT0_CP_IB2_INT | \
27 A4XX_INT0_CP_RB_INT | \
28 A4XX_INT0_CP_REG_PROTECT_FAULT | \
29 A4XX_INT0_CP_AHB_ERROR_HALT | \
30 A4XX_INT0_UCHE_OOB_ACCESS)
31
32extern bool hang_debug;
33static void a4xx_dump(struct msm_gpu *gpu);
34
35/*
36 * a4xx_enable_hwcg() - Program the clock control registers
37 * @device: The adreno device pointer
38 */
39static void a4xx_enable_hwcg(struct msm_gpu *gpu)
40{
41 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
42 unsigned int i;
43 for (i = 0; i < 4; i++)
44 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202);
45 for (i = 0; i < 4; i++)
46 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222);
47 for (i = 0; i < 4; i++)
48 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7);
49 for (i = 0; i < 4; i++)
50 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111);
51 for (i = 0; i < 4; i++)
52 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_SP(i), 0x22222222);
53 for (i = 0; i < 4; i++)
54 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_SP(i), 0x00222222);
55 for (i = 0; i < 4; i++)
56 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_SP(i), 0x00000104);
57 for (i = 0; i < 4; i++)
58 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_SP(i), 0x00000081);
59 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222);
60 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222);
61 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000);
62 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000);
63 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444);
64 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112);
65 for (i = 0; i < 4; i++)
66 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_RB(i), 0x22222222);
67
68 /* Disable L1 clocking in A420 due to CCU issues with it */
69 for (i = 0; i < 4; i++) {
70 if (adreno_is_a420(adreno_gpu)) {
71 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
72 0x00002020);
73 } else {
74 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
75 0x00022020);
76 }
77 }
78
79 for (i = 0; i < 4; i++) {
80 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
81 0x00000922);
82 }
83
84 for (i = 0; i < 4; i++) {
85 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
86 0x00000000);
87 }
88
89 for (i = 0; i < 4; i++) {
90 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
91 0x00000001);
92 }
93
94 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
95 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104);
96 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222);
97 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022);
98 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F);
99 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022);
100 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222);
101 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104);
102 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
103 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
104 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
105 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00020000);
106 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
107 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
108}
109
110static void a4xx_me_init(struct msm_gpu *gpu)
111{
112 struct msm_ringbuffer *ring = gpu->rb;
113
114 OUT_PKT3(ring, CP_ME_INIT, 17);
115 OUT_RING(ring, 0x000003f7);
116 OUT_RING(ring, 0x00000000);
117 OUT_RING(ring, 0x00000000);
118 OUT_RING(ring, 0x00000000);
119 OUT_RING(ring, 0x00000080);
120 OUT_RING(ring, 0x00000100);
121 OUT_RING(ring, 0x00000180);
122 OUT_RING(ring, 0x00006600);
123 OUT_RING(ring, 0x00000150);
124 OUT_RING(ring, 0x0000014e);
125 OUT_RING(ring, 0x00000154);
126 OUT_RING(ring, 0x00000001);
127 OUT_RING(ring, 0x00000000);
128 OUT_RING(ring, 0x00000000);
129 OUT_RING(ring, 0x00000000);
130 OUT_RING(ring, 0x00000000);
131 OUT_RING(ring, 0x00000000);
132
133 gpu->funcs->flush(gpu);
134 gpu->funcs->idle(gpu);
135}
136
137static int a4xx_hw_init(struct msm_gpu *gpu)
138{
139 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
140 struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
141 uint32_t *ptr, len;
142 int i, ret;
143
144 if (adreno_is_a4xx(adreno_gpu)) {
145 gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
146 gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
147 gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
148 gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
149 gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
150 gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
151 gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
152 gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
153 } else {
154 BUG();
155 }
156
157 /* Make all blocks contribute to the GPU BUSY perf counter */
158 gpu_write(gpu, REG_A4XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
159
160 /* Tune the hystersis counters for SP and CP idle detection */
161 gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
162 gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
163
164 /* Enable the RBBM error reporting bits */
165 gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
166
167 /* Enable AHB error reporting*/
168 gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL1, 0xa6ffffff);
169
170 /* Enable power counters*/
171 gpu_write(gpu, REG_A4XX_RBBM_RBBM_CTL, 0x00000030);
172
173 /*
174 * Turn on hang detection - this spews a lot of useful information
175 * into the RBBM registers on a hang:
176 */
177 gpu_write(gpu, REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL,
178 (1 << 30) | 0xFFFF);
179
180 gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
181 (unsigned int)(a4xx_gpu->ocmem_base >> 14));
182
183 /* Turn on performance counters: */
184 gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
185
186 /* Disable L2 bypass to avoid UCHE out of bounds errors */
187 gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
188 gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
189
190 gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
191 (adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
192
193 a4xx_enable_hwcg(gpu);
194
195 /*
196 * For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2
197 * due to timing issue with HLSQ_TP_CLK_EN
198 */
199 if (adreno_is_a420(adreno_gpu)) {
200 unsigned int val;
201 val = gpu_read(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ);
202 val &= ~A4XX_CGC_HLSQ_EARLY_CYC__MASK;
203 val |= 2 << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT;
204 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
205 }
206
207 ret = adreno_hw_init(gpu);
208 if (ret)
209 return ret;
210
211 /* setup access protection: */
212 gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
213
214 /* RBBM registers */
215 gpu_write(gpu, REG_A4XX_CP_PROTECT(0), 0x62000010);
216 gpu_write(gpu, REG_A4XX_CP_PROTECT(1), 0x63000020);
217 gpu_write(gpu, REG_A4XX_CP_PROTECT(2), 0x64000040);
218 gpu_write(gpu, REG_A4XX_CP_PROTECT(3), 0x65000080);
219 gpu_write(gpu, REG_A4XX_CP_PROTECT(4), 0x66000100);
220 gpu_write(gpu, REG_A4XX_CP_PROTECT(5), 0x64000200);
221
222 /* CP registers */
223 gpu_write(gpu, REG_A4XX_CP_PROTECT(6), 0x67000800);
224 gpu_write(gpu, REG_A4XX_CP_PROTECT(7), 0x64001600);
225
226
227 /* RB registers */
228 gpu_write(gpu, REG_A4XX_CP_PROTECT(8), 0x60003300);
229
230 /* HLSQ registers */
231 gpu_write(gpu, REG_A4XX_CP_PROTECT(9), 0x60003800);
232
233 /* VPC registers */
234 gpu_write(gpu, REG_A4XX_CP_PROTECT(10), 0x61003980);
235
236 /* SMMU registers */
237 gpu_write(gpu, REG_A4XX_CP_PROTECT(11), 0x6e010000);
238
239 gpu_write(gpu, REG_A4XX_RBBM_INT_0_MASK, A4XX_INT0_MASK);
240
241 ret = adreno_hw_init(gpu);
242 if (ret)
243 return ret;
244
245 /* Load PM4: */
246 ptr = (uint32_t *)(adreno_gpu->pm4->data);
247 len = adreno_gpu->pm4->size / 4;
248 DBG("loading PM4 ucode version: %u", ptr[0]);
249 gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0);
250 for (i = 1; i < len; i++)
251 gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]);
252
253 /* Load PFP: */
254 ptr = (uint32_t *)(adreno_gpu->pfp->data);
255 len = adreno_gpu->pfp->size / 4;
256 DBG("loading PFP ucode version: %u", ptr[0]);
257
258 gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0);
259 for (i = 1; i < len; i++)
260 gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_DATA, ptr[i]);
261
262 /* clear ME_HALT to start micro engine */
263 gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
264
265 a4xx_me_init(gpu);
266 return 0;
267}
268
269static void a4xx_recover(struct msm_gpu *gpu)
270{
271 /* dump registers before resetting gpu, if enabled: */
272 if (hang_debug)
273 a4xx_dump(gpu);
274
275 gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 1);
276 gpu_read(gpu, REG_A4XX_RBBM_SW_RESET_CMD);
277 gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 0);
278 adreno_recover(gpu);
279}
280
281static void a4xx_destroy(struct msm_gpu *gpu)
282{
283 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
284 struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
285
286 DBG("%s", gpu->name);
287
288 adreno_gpu_cleanup(adreno_gpu);
289
290#ifdef CONFIG_MSM_OCMEM
291 if (a4xx_gpu->ocmem_base)
292 ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl);
293#endif
294
295 kfree(a4xx_gpu);
296}
297
298static void a4xx_idle(struct msm_gpu *gpu)
299{
300 /* wait for ringbuffer to drain: */
301 adreno_idle(gpu);
302
303 /* then wait for GPU to finish: */
304 if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
305 A4XX_RBBM_STATUS_GPU_BUSY)))
306 DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
307
308 /* TODO maybe we need to reset GPU here to recover from hang? */
309}
310
311static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
312{
313 uint32_t status;
314
315 status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS);
316 DBG("%s: Int status %08x", gpu->name, status);
317
318 gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status);
319
320 msm_gpu_retire(gpu);
321
322 return IRQ_HANDLED;
323}
324
325static const unsigned int a4xx_registers[] = {
326 /* RBBM */
327 0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
328 0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
329 0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
330 /* CP */
331 0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
332 0x0578, 0x058F,
333 /* VSC */
334 0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
335 /* GRAS */
336 0x0C80, 0x0C81, 0x0C88, 0x0C8F,
337 /* RB */
338 0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
339 /* PC */
340 0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
341 /* VFD */
342 0x0E40, 0x0E4A,
343 /* VPC */
344 0x0E60, 0x0E61, 0x0E63, 0x0E68,
345 /* UCHE */
346 0x0E80, 0x0E84, 0x0E88, 0x0E95,
347 /* VMIDMT */
348 0x1000, 0x1000, 0x1002, 0x1002, 0x1004, 0x1004, 0x1008, 0x100A,
349 0x100C, 0x100D, 0x100F, 0x1010, 0x1012, 0x1016, 0x1024, 0x1024,
350 0x1027, 0x1027, 0x1100, 0x1100, 0x1102, 0x1102, 0x1104, 0x1104,
351 0x1110, 0x1110, 0x1112, 0x1116, 0x1124, 0x1124, 0x1300, 0x1300,
352 0x1380, 0x1380,
353 /* GRAS CTX 0 */
354 0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
355 /* PC CTX 0 */
356 0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
357 /* VFD CTX 0 */
358 0x2200, 0x2204, 0x2208, 0x22A9,
359 /* GRAS CTX 1 */
360 0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
361 /* PC CTX 1 */
362 0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
363 /* VFD CTX 1 */
364 0x2600, 0x2604, 0x2608, 0x26A9,
365 /* XPU */
366 0x2C00, 0x2C01, 0x2C10, 0x2C10, 0x2C12, 0x2C16, 0x2C1D, 0x2C20,
367 0x2C28, 0x2C28, 0x2C30, 0x2C30, 0x2C32, 0x2C36, 0x2C40, 0x2C40,
368 0x2C50, 0x2C50, 0x2C52, 0x2C56, 0x2C80, 0x2C80, 0x2C94, 0x2C95,
369 /* VBIF */
370 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x301D, 0x3020, 0x3022,
371 0x3024, 0x3026, 0x3028, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031,
372 0x3034, 0x3036, 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040,
373 0x3049, 0x3049, 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068,
374 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
375 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
376 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
377 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
378 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x330C, 0x330C,
379 0x3310, 0x3310, 0x3400, 0x3401, 0x3410, 0x3410, 0x3412, 0x3416,
380 0x341D, 0x3420, 0x3428, 0x3428, 0x3430, 0x3430, 0x3432, 0x3436,
381 0x3440, 0x3440, 0x3450, 0x3450, 0x3452, 0x3456, 0x3480, 0x3480,
382 0x3494, 0x3495, 0x4000, 0x4000, 0x4002, 0x4002, 0x4004, 0x4004,
383 0x4008, 0x400A, 0x400C, 0x400D, 0x400F, 0x4012, 0x4014, 0x4016,
384 0x401D, 0x401D, 0x4020, 0x4027, 0x4060, 0x4062, 0x4200, 0x4200,
385 0x4300, 0x4300, 0x4400, 0x4400, 0x4500, 0x4500, 0x4800, 0x4802,
386 0x480F, 0x480F, 0x4811, 0x4811, 0x4813, 0x4813, 0x4815, 0x4816,
387 0x482B, 0x482B, 0x4857, 0x4857, 0x4883, 0x4883, 0x48AF, 0x48AF,
388 0x48C5, 0x48C5, 0x48E5, 0x48E5, 0x4905, 0x4905, 0x4925, 0x4925,
389 0x4945, 0x4945, 0x4950, 0x4950, 0x495B, 0x495B, 0x4980, 0x498E,
390 0x4B00, 0x4B00, 0x4C00, 0x4C00, 0x4D00, 0x4D00, 0x4E00, 0x4E00,
391 0x4E80, 0x4E80, 0x4F00, 0x4F00, 0x4F08, 0x4F08, 0x4F10, 0x4F10,
392 0x4F18, 0x4F18, 0x4F20, 0x4F20, 0x4F30, 0x4F30, 0x4F60, 0x4F60,
393 0x4F80, 0x4F81, 0x4F88, 0x4F89, 0x4FEE, 0x4FEE, 0x4FF3, 0x4FF3,
394 0x6000, 0x6001, 0x6008, 0x600F, 0x6014, 0x6016, 0x6018, 0x601B,
395 0x61FD, 0x61FD, 0x623C, 0x623C, 0x6380, 0x6380, 0x63A0, 0x63A0,
396 0x63C0, 0x63C1, 0x63C8, 0x63C9, 0x63D0, 0x63D4, 0x63D6, 0x63D6,
397 0x63EE, 0x63EE, 0x6400, 0x6401, 0x6408, 0x640F, 0x6414, 0x6416,
398 0x6418, 0x641B, 0x65FD, 0x65FD, 0x663C, 0x663C, 0x6780, 0x6780,
399 0x67A0, 0x67A0, 0x67C0, 0x67C1, 0x67C8, 0x67C9, 0x67D0, 0x67D4,
400 0x67D6, 0x67D6, 0x67EE, 0x67EE, 0x6800, 0x6801, 0x6808, 0x680F,
401 0x6814, 0x6816, 0x6818, 0x681B, 0x69FD, 0x69FD, 0x6A3C, 0x6A3C,
402 0x6B80, 0x6B80, 0x6BA0, 0x6BA0, 0x6BC0, 0x6BC1, 0x6BC8, 0x6BC9,
403 0x6BD0, 0x6BD4, 0x6BD6, 0x6BD6, 0x6BEE, 0x6BEE,
404 ~0 /* sentinel */
405};
406
407#ifdef CONFIG_DEBUG_FS
408static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
409{
410 gpu->funcs->pm_resume(gpu);
411
412 seq_printf(m, "status: %08x\n",
413 gpu_read(gpu, REG_A4XX_RBBM_STATUS));
414 gpu->funcs->pm_suspend(gpu);
415
416 adreno_show(gpu, m);
417
418}
419#endif
420
421/* Register offset defines for A4XX, in order of enum adreno_regs */
422static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
423 REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
424 REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
425 REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
426 REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
427 REG_A4XX_CP_PFP_UCODE_DATA),
428 REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
429 REG_A4XX_CP_PFP_UCODE_ADDR),
430 REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
431 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
432 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
433 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
434 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
435 REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
436 REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
437 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
438 REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
439 REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
440 REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
441 REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
442 REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
443 REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
444 REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
445 REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
446 REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
447 REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
448 REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
449 REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
450 REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
451 REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
452 REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
453 REG_A4XX_CP_PROTECT_STATUS),
454 REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
455 REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
456 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
457 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
458 REG_A4XX_RBBM_PERFCTR_CTL),
459 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
460 REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
461 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
462 REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
463 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
464 REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
465 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
466 REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
467 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
468 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
469 REG_A4XX_RBBM_INT_0_STATUS),
470 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
471 REG_A4XX_RBBM_AHB_ERROR_STATUS),
472 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
473 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
474 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
475 REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
476 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
477 REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
478 REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
479 REG_A4XX_VPC_DEBUG_RAM_SEL),
480 REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
481 REG_A4XX_VPC_DEBUG_RAM_READ),
482 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
483 REG_A4XX_RBBM_INT_CLEAR_CMD),
484 REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
485 REG_A4XX_VSC_SIZE_ADDRESS),
486 REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
487 REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
488 REG_A4XX_SP_VS_PVT_MEM_ADDR),
489 REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
490 REG_A4XX_SP_FS_PVT_MEM_ADDR),
491 REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
492 REG_A4XX_SP_VS_OBJ_START),
493 REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
494 REG_A4XX_SP_FS_OBJ_START),
495 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
496 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
497 REG_A4XX_RBBM_SW_RESET_CMD),
498 REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
499 REG_A4XX_UCHE_INVALIDATE0),
500 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
501 REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
502 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
503 REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
504};
505
506static void a4xx_dump(struct msm_gpu *gpu)
507{
508 adreno_dump(gpu);
509 printk("status: %08x\n",
510 gpu_read(gpu, REG_A4XX_RBBM_STATUS));
511 adreno_dump(gpu);
512}
513
514static const struct adreno_gpu_funcs funcs = {
515 .base = {
516 .get_param = adreno_get_param,
517 .hw_init = a4xx_hw_init,
518 .pm_suspend = msm_gpu_pm_suspend,
519 .pm_resume = msm_gpu_pm_resume,
520 .recover = a4xx_recover,
521 .last_fence = adreno_last_fence,
522 .submit = adreno_submit,
523 .flush = adreno_flush,
524 .idle = a4xx_idle,
525 .irq = a4xx_irq,
526 .destroy = a4xx_destroy,
527#ifdef CONFIG_DEBUG_FS
528 .show = a4xx_show,
529#endif
530 },
531};
532
533struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
534{
535 struct a4xx_gpu *a4xx_gpu = NULL;
536 struct adreno_gpu *adreno_gpu;
537 struct msm_gpu *gpu;
538 struct msm_drm_private *priv = dev->dev_private;
539 struct platform_device *pdev = priv->gpu_pdev;
540 int ret;
541
542 if (!pdev) {
543 dev_err(dev->dev, "no a4xx device\n");
544 ret = -ENXIO;
545 goto fail;
546 }
547
548 a4xx_gpu = kzalloc(sizeof(*a4xx_gpu), GFP_KERNEL);
549 if (!a4xx_gpu) {
550 ret = -ENOMEM;
551 goto fail;
552 }
553
554 adreno_gpu = &a4xx_gpu->base;
555 gpu = &adreno_gpu->base;
556
557 a4xx_gpu->pdev = pdev;
558
559 gpu->perfcntrs = NULL;
560 gpu->num_perfcntrs = 0;
561
562 adreno_gpu->registers = a4xx_registers;
563 adreno_gpu->reg_offsets = a4xx_register_offsets;
564
565 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
566 if (ret)
567 goto fail;
568
569 /* if needed, allocate gmem: */
570 if (adreno_is_a4xx(adreno_gpu)) {
571#ifdef CONFIG_MSM_OCMEM
572 /* TODO this is different/missing upstream: */
573 struct ocmem_buf *ocmem_hdl =
574 ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
575
576 a4xx_gpu->ocmem_hdl = ocmem_hdl;
577 a4xx_gpu->ocmem_base = ocmem_hdl->addr;
578 adreno_gpu->gmem = ocmem_hdl->len;
579 DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
580 a4xx_gpu->ocmem_base);
581#endif
582 }
583
584 if (!gpu->mmu) {
585 /* TODO we think it is possible to configure the GPU to
586 * restrict access to VRAM carveout. But the required
587 * registers are unknown. For now just bail out and
588 * limp along with just modesetting. If it turns out
589 * to not be possible to restrict access, then we must
590 * implement a cmdstream validator.
591 */
592 dev_err(dev->dev, "No memory protection without IOMMU\n");
593 ret = -ENXIO;
594 goto fail;
595 }
596
597 return gpu;
598
599fail:
600 if (a4xx_gpu)
601 a4xx_destroy(&a4xx_gpu->base.base);
602
603 return ERR_PTR(ret);
604}
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
new file mode 100644
index 000000000000..01247204ac92
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
@@ -0,0 +1,34 @@
1/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __A4XX_GPU_H__
14#define __A4XX_GPU_H__
15
16#include "adreno_gpu.h"
17
18/* arrg, somehow fb.h is getting pulled in: */
19#undef ROP_COPY
20#undef ROP_XOR
21
22#include "a4xx.xml.h"
23
24struct a4xx_gpu {
25 struct adreno_gpu base;
26 struct platform_device *pdev;
27
28 /* if OCMEM is used for GMEM: */
29 uint32_t ocmem_base;
30 void *ocmem_hdl;
31};
32#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base)
33
34#endif /* __A4XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index cc341bc62b51..a4b33af9338d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
@@ -105,6 +105,7 @@ enum adreno_rb_dither_mode {
105enum adreno_rb_depth_format { 105enum adreno_rb_depth_format {
106 DEPTHX_16 = 0, 106 DEPTHX_16 = 0,
107 DEPTHX_24_8 = 1, 107 DEPTHX_24_8 = 1,
108 DEPTHX_32 = 2,
108}; 109};
109 110
110enum adreno_rb_copy_control_mode { 111enum adreno_rb_copy_control_mode {
@@ -132,6 +133,7 @@ enum a3xx_threadmode {
132}; 133};
133 134
134enum a3xx_instrbuffermode { 135enum a3xx_instrbuffermode {
136 CACHE = 0,
135 BUFFER = 1, 137 BUFFER = 1,
136}; 138};
137 139
@@ -140,6 +142,13 @@ enum a3xx_threadsize {
140 FOUR_QUADS = 1, 142 FOUR_QUADS = 1,
141}; 143};
142 144
145enum a3xx_color_swap {
146 WZYX = 0,
147 WXYZ = 1,
148 ZYXW = 2,
149 XYZW = 3,
150};
151
143#define REG_AXXX_CP_RB_BASE 0x000001c0 152#define REG_AXXX_CP_RB_BASE 0x000001c0
144 153
145#define REG_AXXX_CP_RB_CNTL 0x000001c1 154#define REG_AXXX_CP_RB_CNTL 0x000001c1
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 7ab85af3a7db..be83dee83d08 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -2,6 +2,8 @@
2 * Copyright (C) 2013-2014 Red Hat 2 * Copyright (C) 2013-2014 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 3 * Author: Rob Clark <robdclark@gmail.com>
4 * 4 *
5 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
6 *
5 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by 8 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation. 9 * the Free Software Foundation.
@@ -28,6 +30,7 @@ MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!
28module_param_named(hang_debug, hang_debug, bool, 0600); 30module_param_named(hang_debug, hang_debug, bool, 0600);
29 31
30struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); 32struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
33struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
31 34
32static const struct adreno_info gpulist[] = { 35static const struct adreno_info gpulist[] = {
33 { 36 {
@@ -54,6 +57,14 @@ static const struct adreno_info gpulist[] = {
54 .pfpfw = "a330_pfp.fw", 57 .pfpfw = "a330_pfp.fw",
55 .gmem = SZ_1M, 58 .gmem = SZ_1M,
56 .init = a3xx_gpu_init, 59 .init = a3xx_gpu_init,
60 }, {
61 .rev = ADRENO_REV(4, 2, 0, ANY_ID),
62 .revn = 420,
63 .name = "A420",
64 .pm4fw = "a420_pm4.fw",
65 .pfpfw = "a420_pfp.fw",
66 .gmem = (SZ_1M + SZ_512K),
67 .init = a4xx_gpu_init,
57 }, 68 },
58}; 69};
59 70
@@ -61,6 +72,8 @@ MODULE_FIRMWARE("a300_pm4.fw");
61MODULE_FIRMWARE("a300_pfp.fw"); 72MODULE_FIRMWARE("a300_pfp.fw");
62MODULE_FIRMWARE("a330_pm4.fw"); 73MODULE_FIRMWARE("a330_pm4.fw");
63MODULE_FIRMWARE("a330_pfp.fw"); 74MODULE_FIRMWARE("a330_pfp.fw");
75MODULE_FIRMWARE("a420_pm4.fw");
76MODULE_FIRMWARE("a420_pfp.fw");
64 77
65static inline bool _rev_match(uint8_t entry, uint8_t id) 78static inline bool _rev_match(uint8_t entry, uint8_t id)
66{ 79{
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 6afa29167fee..aa873048308b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -2,6 +2,8 @@
2 * Copyright (C) 2013 Red Hat 2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 3 * Author: Rob Clark <robdclark@gmail.com>
4 * 4 *
5 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
6 *
5 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by 8 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation. 9 * the Free Software Foundation.
@@ -63,19 +65,21 @@ int adreno_hw_init(struct msm_gpu *gpu)
63 } 65 }
64 66
65 /* Setup REG_CP_RB_CNTL: */ 67 /* Setup REG_CP_RB_CNTL: */
66 gpu_write(gpu, REG_AXXX_CP_RB_CNTL, 68 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
67 /* size is log2(quad-words): */ 69 /* size is log2(quad-words): */
68 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | 70 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
69 AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8))); 71 AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
70 72
71 /* Setup ringbuffer address: */ 73 /* Setup ringbuffer address: */
72 gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova); 74 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
73 gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr)); 75 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
76 rbmemptr(adreno_gpu, rptr));
74 77
75 /* Setup scratch/timestamp: */ 78 /* Setup scratch/timestamp: */
76 gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence)); 79 adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_ADDR,
80 rbmemptr(adreno_gpu, fence));
77 81
78 gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1); 82 adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_UMSK, 0x1);
79 83
80 return 0; 84 return 0;
81} 85}
@@ -151,7 +155,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
151 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); 155 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
152 OUT_RING(ring, submit->fence); 156 OUT_RING(ring, submit->fence);
153 157
154 if (adreno_is_a3xx(adreno_gpu)) { 158 if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
155 /* Flush HLSQ lazy updates to make sure there is nothing 159 /* Flush HLSQ lazy updates to make sure there is nothing
156 * pending for indirect loads after the timestamp has 160 * pending for indirect loads after the timestamp has
157 * passed: 161 * passed:
@@ -188,12 +192,13 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
188 192
189void adreno_flush(struct msm_gpu *gpu) 193void adreno_flush(struct msm_gpu *gpu)
190{ 194{
195 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
191 uint32_t wptr = get_wptr(gpu->rb); 196 uint32_t wptr = get_wptr(gpu->rb);
192 197
193 /* ensure writes to ringbuffer have hit system memory: */ 198 /* ensure writes to ringbuffer have hit system memory: */
194 mb(); 199 mb();
195 200
196 gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr); 201 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
197} 202}
198 203
199void adreno_idle(struct msm_gpu *gpu) 204void adreno_idle(struct msm_gpu *gpu)
@@ -319,6 +324,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
319 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u", 324 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
320 gpu->fast_rate, gpu->slow_rate, gpu->bus_freq); 325 gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
321 326
327 ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
328 adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
329 RB_SIZE);
330 if (ret)
331 return ret;
332
322 ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev); 333 ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
323 if (ret) { 334 if (ret) {
324 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n", 335 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
@@ -333,12 +344,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
333 return ret; 344 return ret;
334 } 345 }
335 346
336 ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
337 adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
338 RB_SIZE);
339 if (ret)
340 return ret;
341
342 mmu = gpu->mmu; 347 mmu = gpu->mmu;
343 if (mmu) { 348 if (mmu) {
344 ret = mmu->funcs->attach(mmu, iommu_ports, 349 ret = mmu->funcs->attach(mmu, iommu_ports,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 52f051579753..a0cc30977e67 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -2,6 +2,8 @@
2 * Copyright (C) 2013 Red Hat 2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 3 * Author: Rob Clark <robdclark@gmail.com>
4 * 4 *
5 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
6 *
5 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by 8 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation. 9 * the Free Software Foundation.
@@ -25,6 +27,81 @@
25#include "adreno_common.xml.h" 27#include "adreno_common.xml.h"
26#include "adreno_pm4.xml.h" 28#include "adreno_pm4.xml.h"
27 29
30#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
31/**
32 * adreno_regs: List of registers that are used in across all
33 * 3D devices. Each device type has different offset value for the same
34 * register, so an array of register offsets are declared for every device
35 * and are indexed by the enumeration values defined in this enum
36 */
37enum adreno_regs {
38 REG_ADRENO_CP_DEBUG,
39 REG_ADRENO_CP_ME_RAM_WADDR,
40 REG_ADRENO_CP_ME_RAM_DATA,
41 REG_ADRENO_CP_PFP_UCODE_DATA,
42 REG_ADRENO_CP_PFP_UCODE_ADDR,
43 REG_ADRENO_CP_WFI_PEND_CTR,
44 REG_ADRENO_CP_RB_BASE,
45 REG_ADRENO_CP_RB_RPTR_ADDR,
46 REG_ADRENO_CP_RB_RPTR,
47 REG_ADRENO_CP_RB_WPTR,
48 REG_ADRENO_CP_PROTECT_CTRL,
49 REG_ADRENO_CP_ME_CNTL,
50 REG_ADRENO_CP_RB_CNTL,
51 REG_ADRENO_CP_IB1_BASE,
52 REG_ADRENO_CP_IB1_BUFSZ,
53 REG_ADRENO_CP_IB2_BASE,
54 REG_ADRENO_CP_IB2_BUFSZ,
55 REG_ADRENO_CP_TIMESTAMP,
56 REG_ADRENO_CP_ME_RAM_RADDR,
57 REG_ADRENO_CP_ROQ_ADDR,
58 REG_ADRENO_CP_ROQ_DATA,
59 REG_ADRENO_CP_MERCIU_ADDR,
60 REG_ADRENO_CP_MERCIU_DATA,
61 REG_ADRENO_CP_MERCIU_DATA2,
62 REG_ADRENO_CP_MEQ_ADDR,
63 REG_ADRENO_CP_MEQ_DATA,
64 REG_ADRENO_CP_HW_FAULT,
65 REG_ADRENO_CP_PROTECT_STATUS,
66 REG_ADRENO_SCRATCH_ADDR,
67 REG_ADRENO_SCRATCH_UMSK,
68 REG_ADRENO_SCRATCH_REG2,
69 REG_ADRENO_RBBM_STATUS,
70 REG_ADRENO_RBBM_PERFCTR_CTL,
71 REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
72 REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
73 REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
74 REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
75 REG_ADRENO_RBBM_INT_0_MASK,
76 REG_ADRENO_RBBM_INT_0_STATUS,
77 REG_ADRENO_RBBM_AHB_ERROR_STATUS,
78 REG_ADRENO_RBBM_PM_OVERRIDE2,
79 REG_ADRENO_RBBM_AHB_CMD,
80 REG_ADRENO_RBBM_INT_CLEAR_CMD,
81 REG_ADRENO_RBBM_SW_RESET_CMD,
82 REG_ADRENO_RBBM_CLOCK_CTL,
83 REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
84 REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
85 REG_ADRENO_VPC_DEBUG_RAM_SEL,
86 REG_ADRENO_VPC_DEBUG_RAM_READ,
87 REG_ADRENO_VSC_SIZE_ADDRESS,
88 REG_ADRENO_VFD_CONTROL_0,
89 REG_ADRENO_VFD_INDEX_MAX,
90 REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
91 REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
92 REG_ADRENO_SP_VS_OBJ_START_REG,
93 REG_ADRENO_SP_FS_OBJ_START_REG,
94 REG_ADRENO_PA_SC_AA_CONFIG,
95 REG_ADRENO_SQ_GPR_MANAGEMENT,
96 REG_ADRENO_SQ_INST_STORE_MANAGMENT,
97 REG_ADRENO_TP0_CHICKEN,
98 REG_ADRENO_RBBM_RBBM_CTL,
99 REG_ADRENO_UCHE_INVALIDATE0,
100 REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
101 REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
102 REG_ADRENO_REGISTER_MAX,
103};
104
28struct adreno_rev { 105struct adreno_rev {
29 uint8_t core; 106 uint8_t core;
30 uint8_t major; 107 uint8_t major;
@@ -76,6 +153,13 @@ struct adreno_gpu {
76 struct adreno_rbmemptrs *memptrs; 153 struct adreno_rbmemptrs *memptrs;
77 struct drm_gem_object *memptrs_bo; 154 struct drm_gem_object *memptrs_bo;
78 uint32_t memptrs_iova; 155 uint32_t memptrs_iova;
156
157 /*
158 * Register offsets are different between some GPUs.
159 * GPU specific offsets will be exported by GPU specific
160 * code (a3xx_gpu.c) and stored in this common location.
161 */
162 const unsigned int *reg_offsets;
79}; 163};
80#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) 164#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
81 165
@@ -128,6 +212,16 @@ static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
128 return adreno_is_a330(gpu) && (gpu->rev.patchid > 0); 212 return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
129} 213}
130 214
215static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
216{
217 return (gpu->revn >= 400) && (gpu->revn < 500);
218}
219
220static inline int adreno_is_a420(struct adreno_gpu *gpu)
221{
222 return gpu->revn == 420;
223}
224
131int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); 225int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
132int adreno_hw_init(struct msm_gpu *gpu); 226int adreno_hw_init(struct msm_gpu *gpu);
133uint32_t adreno_last_fence(struct msm_gpu *gpu); 227uint32_t adreno_last_fence(struct msm_gpu *gpu);
@@ -171,5 +265,37 @@ OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
171 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8)); 265 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
172} 266}
173 267
268/*
269 * adreno_checkreg_off() - Checks the validity of a register enum
270 * @gpu: Pointer to struct adreno_gpu
271 * @offset_name: The register enum that is checked
272 */
273static inline bool adreno_reg_check(struct adreno_gpu *gpu,
274 enum adreno_regs offset_name)
275{
276 if (offset_name >= REG_ADRENO_REGISTER_MAX ||
277 !gpu->reg_offsets[offset_name]) {
278 BUG();
279 }
280 return true;
281}
282
283static inline u32 adreno_gpu_read(struct adreno_gpu *gpu,
284 enum adreno_regs offset_name)
285{
286 u32 reg = gpu->reg_offsets[offset_name];
287 u32 val = 0;
288 if(adreno_reg_check(gpu,offset_name))
289 val = gpu_read(&gpu->base, reg - 1);
290 return val;
291}
292
293static inline void adreno_gpu_write(struct adreno_gpu *gpu,
294 enum adreno_regs offset_name, u32 data)
295{
296 u32 reg = gpu->reg_offsets[offset_name];
297 if(adreno_reg_check(gpu, offset_name))
298 gpu_write(&gpu->base, reg - 1, data);
299}
174 300
175#endif /* __ADRENO_GPU_H__ */ 301#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 6ef43f66c30a..6a75cee94d81 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
@@ -157,6 +157,7 @@ enum adreno_pm4_type3_packets {
157 CP_IM_STORE = 44, 157 CP_IM_STORE = 44,
158 CP_SET_DRAW_INIT_FLAGS = 75, 158 CP_SET_DRAW_INIT_FLAGS = 75,
159 CP_SET_PROTECTED_MODE = 95, 159 CP_SET_PROTECTED_MODE = 95,
160 CP_BOOTSTRAP_UCODE = 111,
160 CP_LOAD_STATE = 48, 161 CP_LOAD_STATE = 48,
161 CP_COND_INDIRECT_BUFFER_PFE = 58, 162 CP_COND_INDIRECT_BUFFER_PFE = 58,
162 CP_COND_INDIRECT_BUFFER_PFD = 50, 163 CP_COND_INDIRECT_BUFFER_PFD = 50,
@@ -278,11 +279,11 @@ static inline uint32_t CP_DRAW_INDX_1_INDEX_SIZE(enum pc_di_index_size val)
278#define CP_DRAW_INDX_1_NOT_EOP 0x00001000 279#define CP_DRAW_INDX_1_NOT_EOP 0x00001000
279#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000 280#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000
280#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000 281#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
281#define CP_DRAW_INDX_1_NUM_INDICES__MASK 0xffff0000 282#define CP_DRAW_INDX_1_NUM_INSTANCES__MASK 0xff000000
282#define CP_DRAW_INDX_1_NUM_INDICES__SHIFT 16 283#define CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT 24
283static inline uint32_t CP_DRAW_INDX_1_NUM_INDICES(uint32_t val) 284static inline uint32_t CP_DRAW_INDX_1_NUM_INSTANCES(uint32_t val)
284{ 285{
285 return ((val) << CP_DRAW_INDX_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_1_NUM_INDICES__MASK; 286 return ((val) << CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_1_NUM_INSTANCES__MASK;
286} 287}
287 288
288#define REG_CP_DRAW_INDX_2 0x00000002 289#define REG_CP_DRAW_INDX_2 0x00000002
@@ -293,20 +294,20 @@ static inline uint32_t CP_DRAW_INDX_2_NUM_INDICES(uint32_t val)
293 return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK; 294 return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK;
294} 295}
295 296
296#define REG_CP_DRAW_INDX_2 0x00000002 297#define REG_CP_DRAW_INDX_3 0x00000003
297#define CP_DRAW_INDX_2_INDX_BASE__MASK 0xffffffff 298#define CP_DRAW_INDX_3_INDX_BASE__MASK 0xffffffff
298#define CP_DRAW_INDX_2_INDX_BASE__SHIFT 0 299#define CP_DRAW_INDX_3_INDX_BASE__SHIFT 0
299static inline uint32_t CP_DRAW_INDX_2_INDX_BASE(uint32_t val) 300static inline uint32_t CP_DRAW_INDX_3_INDX_BASE(uint32_t val)
300{ 301{
301 return ((val) << CP_DRAW_INDX_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_2_INDX_BASE__MASK; 302 return ((val) << CP_DRAW_INDX_3_INDX_BASE__SHIFT) & CP_DRAW_INDX_3_INDX_BASE__MASK;
302} 303}
303 304
304#define REG_CP_DRAW_INDX_2 0x00000002 305#define REG_CP_DRAW_INDX_4 0x00000004
305#define CP_DRAW_INDX_2_INDX_SIZE__MASK 0xffffffff 306#define CP_DRAW_INDX_4_INDX_SIZE__MASK 0xffffffff
306#define CP_DRAW_INDX_2_INDX_SIZE__SHIFT 0 307#define CP_DRAW_INDX_4_INDX_SIZE__SHIFT 0
307static inline uint32_t CP_DRAW_INDX_2_INDX_SIZE(uint32_t val) 308static inline uint32_t CP_DRAW_INDX_4_INDX_SIZE(uint32_t val)
308{ 309{
309 return ((val) << CP_DRAW_INDX_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_2_INDX_SIZE__MASK; 310 return ((val) << CP_DRAW_INDX_4_INDX_SIZE__SHIFT) & CP_DRAW_INDX_4_INDX_SIZE__MASK;
310} 311}
311 312
312#define REG_CP_DRAW_INDX_2_0 0x00000000 313#define REG_CP_DRAW_INDX_2_0 0x00000000
@@ -345,11 +346,11 @@ static inline uint32_t CP_DRAW_INDX_2_1_INDEX_SIZE(enum pc_di_index_size val)
345#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000 346#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000
346#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000 347#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000
347#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000 348#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
348#define CP_DRAW_INDX_2_1_NUM_INDICES__MASK 0xffff0000 349#define CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK 0xff000000
349#define CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT 16 350#define CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT 24
350static inline uint32_t CP_DRAW_INDX_2_1_NUM_INDICES(uint32_t val) 351static inline uint32_t CP_DRAW_INDX_2_1_NUM_INSTANCES(uint32_t val)
351{ 352{
352 return ((val) << CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INDICES__MASK; 353 return ((val) << CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK;
353} 354}
354 355
355#define REG_CP_DRAW_INDX_2_2 0x00000002 356#define REG_CP_DRAW_INDX_2_2 0x00000002
@@ -388,11 +389,11 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum pc_di_index_size va
388#define CP_DRAW_INDX_OFFSET_0_NOT_EOP 0x00001000 389#define CP_DRAW_INDX_OFFSET_0_NOT_EOP 0x00001000
389#define CP_DRAW_INDX_OFFSET_0_SMALL_INDEX 0x00002000 390#define CP_DRAW_INDX_OFFSET_0_SMALL_INDEX 0x00002000
390#define CP_DRAW_INDX_OFFSET_0_PRE_DRAW_INITIATOR_ENABLE 0x00004000 391#define CP_DRAW_INDX_OFFSET_0_PRE_DRAW_INITIATOR_ENABLE 0x00004000
391#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK 0xffff0000 392#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK 0xffff0000
392#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT 16 393#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT 16
393static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INDICES(uint32_t val) 394static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES(uint32_t val)
394{ 395{
395 return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK; 396 return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK;
396} 397}
397 398
398#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001 399#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001
@@ -405,20 +406,22 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val)
405 return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK; 406 return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK;
406} 407}
407 408
408#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002 409#define REG_CP_DRAW_INDX_OFFSET_3 0x00000003
409#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK 0xffffffff 410
410#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT 0 411#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004
411static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_BASE(uint32_t val) 412#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK 0xffffffff
413#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT 0
414static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE(uint32_t val)
412{ 415{
413 return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK; 416 return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK;
414} 417}
415 418
416#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002 419#define REG_CP_DRAW_INDX_OFFSET_5 0x00000005
417#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK 0xffffffff 420#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK 0xffffffff
418#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT 0 421#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT 0
419static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_SIZE(uint32_t val) 422static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val)
420{ 423{
421 return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK; 424 return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK;
422} 425}
423 426
424#define REG_CP_SET_DRAW_STATE_0 0x00000000 427#define REG_CP_SET_DRAW_STATE_0 0x00000000
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index e965898dfda6..448438b759b4 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index f2bdda957205..c102a7f074ac 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index e5b071ffd865..a900134bdf33 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 9d00dcba6959..062c68725376 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -15,6 +15,7 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/of_irq.h>
18#include "hdmi.h" 19#include "hdmi.h"
19 20
20void hdmi_set_mode(struct hdmi *hdmi, bool power_on) 21void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -39,7 +40,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
39 power_on ? "Enable" : "Disable", ctrl); 40 power_on ? "Enable" : "Disable", ctrl);
40} 41}
41 42
42irqreturn_t hdmi_irq(int irq, void *dev_id) 43static irqreturn_t hdmi_irq(int irq, void *dev_id)
43{ 44{
44 struct hdmi *hdmi = dev_id; 45 struct hdmi *hdmi = dev_id;
45 46
@@ -54,9 +55,8 @@ irqreturn_t hdmi_irq(int irq, void *dev_id)
54 return IRQ_HANDLED; 55 return IRQ_HANDLED;
55} 56}
56 57
57void hdmi_destroy(struct kref *kref) 58static void hdmi_destroy(struct hdmi *hdmi)
58{ 59{
59 struct hdmi *hdmi = container_of(kref, struct hdmi, refcount);
60 struct hdmi_phy *phy = hdmi->phy; 60 struct hdmi_phy *phy = hdmi->phy;
61 61
62 if (phy) 62 if (phy)
@@ -68,37 +68,24 @@ void hdmi_destroy(struct kref *kref)
68 platform_set_drvdata(hdmi->pdev, NULL); 68 platform_set_drvdata(hdmi->pdev, NULL);
69} 69}
70 70
71/* initialize connector */ 71/* construct hdmi at bind/probe time, grab all the resources. If
72struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) 72 * we are to EPROBE_DEFER we want to do it here, rather than later
73 * at modeset_init() time
74 */
75static struct hdmi *hdmi_init(struct platform_device *pdev)
73{ 76{
77 struct hdmi_platform_config *config = pdev->dev.platform_data;
74 struct hdmi *hdmi = NULL; 78 struct hdmi *hdmi = NULL;
75 struct msm_drm_private *priv = dev->dev_private;
76 struct platform_device *pdev = priv->hdmi_pdev;
77 struct hdmi_platform_config *config;
78 int i, ret; 79 int i, ret;
79 80
80 if (!pdev) { 81 hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
81 dev_err(dev->dev, "no hdmi device\n");
82 ret = -ENXIO;
83 goto fail;
84 }
85
86 config = pdev->dev.platform_data;
87
88 hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
89 if (!hdmi) { 82 if (!hdmi) {
90 ret = -ENOMEM; 83 ret = -ENOMEM;
91 goto fail; 84 goto fail;
92 } 85 }
93 86
94 kref_init(&hdmi->refcount);
95
96 hdmi->dev = dev;
97 hdmi->pdev = pdev; 87 hdmi->pdev = pdev;
98 hdmi->config = config; 88 hdmi->config = config;
99 hdmi->encoder = encoder;
100
101 hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
102 89
103 /* not sure about which phy maps to which msm.. probably I miss some */ 90 /* not sure about which phy maps to which msm.. probably I miss some */
104 if (config->phy_init) 91 if (config->phy_init)
@@ -108,7 +95,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
108 95
109 if (IS_ERR(hdmi->phy)) { 96 if (IS_ERR(hdmi->phy)) {
110 ret = PTR_ERR(hdmi->phy); 97 ret = PTR_ERR(hdmi->phy);
111 dev_err(dev->dev, "failed to load phy: %d\n", ret); 98 dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
112 hdmi->phy = NULL; 99 hdmi->phy = NULL;
113 goto fail; 100 goto fail;
114 } 101 }
@@ -127,7 +114,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
127 config->hpd_reg_names[i]); 114 config->hpd_reg_names[i]);
128 if (IS_ERR(reg)) { 115 if (IS_ERR(reg)) {
129 ret = PTR_ERR(reg); 116 ret = PTR_ERR(reg);
130 dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n", 117 dev_err(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
131 config->hpd_reg_names[i], ret); 118 config->hpd_reg_names[i], ret);
132 goto fail; 119 goto fail;
133 } 120 }
@@ -143,7 +130,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
143 config->pwr_reg_names[i]); 130 config->pwr_reg_names[i]);
144 if (IS_ERR(reg)) { 131 if (IS_ERR(reg)) {
145 ret = PTR_ERR(reg); 132 ret = PTR_ERR(reg);
146 dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n", 133 dev_err(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
147 config->pwr_reg_names[i], ret); 134 config->pwr_reg_names[i], ret);
148 goto fail; 135 goto fail;
149 } 136 }
@@ -158,7 +145,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
158 clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]); 145 clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]);
159 if (IS_ERR(clk)) { 146 if (IS_ERR(clk)) {
160 ret = PTR_ERR(clk); 147 ret = PTR_ERR(clk);
161 dev_err(dev->dev, "failed to get hpd clk: %s (%d)\n", 148 dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
162 config->hpd_clk_names[i], ret); 149 config->hpd_clk_names[i], ret);
163 goto fail; 150 goto fail;
164 } 151 }
@@ -173,7 +160,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
173 clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]); 160 clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]);
174 if (IS_ERR(clk)) { 161 if (IS_ERR(clk)) {
175 ret = PTR_ERR(clk); 162 ret = PTR_ERR(clk);
176 dev_err(dev->dev, "failed to get pwr clk: %s (%d)\n", 163 dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
177 config->pwr_clk_names[i], ret); 164 config->pwr_clk_names[i], ret);
178 goto fail; 165 goto fail;
179 } 166 }
@@ -184,11 +171,40 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
184 hdmi->i2c = hdmi_i2c_init(hdmi); 171 hdmi->i2c = hdmi_i2c_init(hdmi);
185 if (IS_ERR(hdmi->i2c)) { 172 if (IS_ERR(hdmi->i2c)) {
186 ret = PTR_ERR(hdmi->i2c); 173 ret = PTR_ERR(hdmi->i2c);
187 dev_err(dev->dev, "failed to get i2c: %d\n", ret); 174 dev_err(&pdev->dev, "failed to get i2c: %d\n", ret);
188 hdmi->i2c = NULL; 175 hdmi->i2c = NULL;
189 goto fail; 176 goto fail;
190 } 177 }
191 178
179 return hdmi;
180
181fail:
182 if (hdmi)
183 hdmi_destroy(hdmi);
184
185 return ERR_PTR(ret);
186}
187
188/* Second part of initialization, the drm/kms level modeset_init,
189 * constructs/initializes mode objects, etc, is called from master
190 * driver (not hdmi sub-device's probe/bind!)
191 *
192 * Any resource (regulator/clk/etc) which could be missing at boot
193 * should be handled in hdmi_init() so that failure happens from
194 * hdmi sub-device's probe.
195 */
196int hdmi_modeset_init(struct hdmi *hdmi,
197 struct drm_device *dev, struct drm_encoder *encoder)
198{
199 struct msm_drm_private *priv = dev->dev_private;
200 struct platform_device *pdev = hdmi->pdev;
201 int ret;
202
203 hdmi->dev = dev;
204 hdmi->encoder = encoder;
205
206 hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
207
192 hdmi->bridge = hdmi_bridge_init(hdmi); 208 hdmi->bridge = hdmi_bridge_init(hdmi);
193 if (IS_ERR(hdmi->bridge)) { 209 if (IS_ERR(hdmi->bridge)) {
194 ret = PTR_ERR(hdmi->bridge); 210 ret = PTR_ERR(hdmi->bridge);
@@ -205,22 +221,20 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
205 goto fail; 221 goto fail;
206 } 222 }
207 223
208 if (!config->shared_irq) { 224 hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
209 hdmi->irq = platform_get_irq(pdev, 0); 225 if (hdmi->irq < 0) {
210 if (hdmi->irq < 0) { 226 ret = hdmi->irq;
211 ret = hdmi->irq; 227 dev_err(dev->dev, "failed to get irq: %d\n", ret);
212 dev_err(dev->dev, "failed to get irq: %d\n", ret); 228 goto fail;
213 goto fail; 229 }
214 }
215 230
216 ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq, 231 ret = devm_request_irq(&pdev->dev, hdmi->irq,
217 NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 232 hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
218 "hdmi_isr", hdmi); 233 "hdmi_isr", hdmi);
219 if (ret < 0) { 234 if (ret < 0) {
220 dev_err(dev->dev, "failed to request IRQ%u: %d\n", 235 dev_err(dev->dev, "failed to request IRQ%u: %d\n",
221 hdmi->irq, ret); 236 hdmi->irq, ret);
222 goto fail; 237 goto fail;
223 }
224 } 238 }
225 239
226 encoder->bridge = hdmi->bridge; 240 encoder->bridge = hdmi->bridge;
@@ -230,19 +244,20 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
230 244
231 platform_set_drvdata(pdev, hdmi); 245 platform_set_drvdata(pdev, hdmi);
232 246
233 return hdmi; 247 return 0;
234 248
235fail: 249fail:
236 if (hdmi) { 250 /* bridge/connector are normally destroyed by drm: */
237 /* bridge/connector are normally destroyed by drm: */ 251 if (hdmi->bridge) {
238 if (hdmi->bridge) 252 hdmi->bridge->funcs->destroy(hdmi->bridge);
239 hdmi->bridge->funcs->destroy(hdmi->bridge); 253 hdmi->bridge = NULL;
240 if (hdmi->connector) 254 }
241 hdmi->connector->funcs->destroy(hdmi->connector); 255 if (hdmi->connector) {
242 hdmi_destroy(&hdmi->refcount); 256 hdmi->connector->funcs->destroy(hdmi->connector);
257 hdmi->connector = NULL;
243 } 258 }
244 259
245 return ERR_PTR(ret); 260 return ret;
246} 261}
247 262
248/* 263/*
@@ -251,13 +266,6 @@ fail:
251 266
252#include <linux/of_gpio.h> 267#include <linux/of_gpio.h>
253 268
254static void set_hdmi_pdev(struct drm_device *dev,
255 struct platform_device *pdev)
256{
257 struct msm_drm_private *priv = dev->dev_private;
258 priv->hdmi_pdev = pdev;
259}
260
261#ifdef CONFIG_OF 269#ifdef CONFIG_OF
262static int get_gpio(struct device *dev, struct device_node *of_node, const char *name) 270static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
263{ 271{
@@ -278,7 +286,10 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
278 286
279static int hdmi_bind(struct device *dev, struct device *master, void *data) 287static int hdmi_bind(struct device *dev, struct device *master, void *data)
280{ 288{
289 struct drm_device *drm = dev_get_drvdata(master);
290 struct msm_drm_private *priv = drm->dev_private;
281 static struct hdmi_platform_config config = {}; 291 static struct hdmi_platform_config config = {};
292 struct hdmi *hdmi;
282#ifdef CONFIG_OF 293#ifdef CONFIG_OF
283 struct device_node *of_node = dev->of_node; 294 struct device_node *of_node = dev->of_node;
284 295
@@ -298,7 +309,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
298 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); 309 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
299 config.pwr_clk_names = pwr_clk_names; 310 config.pwr_clk_names = pwr_clk_names;
300 config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names); 311 config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
301 config.shared_irq = true;
302 } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) { 312 } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) {
303 static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"}; 313 static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
304 static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"}; 314 static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"};
@@ -369,14 +379,22 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
369 } 379 }
370#endif 380#endif
371 dev->platform_data = &config; 381 dev->platform_data = &config;
372 set_hdmi_pdev(dev_get_drvdata(master), to_platform_device(dev)); 382 hdmi = hdmi_init(to_platform_device(dev));
383 if (IS_ERR(hdmi))
384 return PTR_ERR(hdmi);
385 priv->hdmi = hdmi;
373 return 0; 386 return 0;
374} 387}
375 388
376static void hdmi_unbind(struct device *dev, struct device *master, 389static void hdmi_unbind(struct device *dev, struct device *master,
377 void *data) 390 void *data)
378{ 391{
379 set_hdmi_pdev(dev_get_drvdata(master), NULL); 392 struct drm_device *drm = dev_get_drvdata(master);
393 struct msm_drm_private *priv = drm->dev_private;
394 if (priv->hdmi) {
395 hdmi_destroy(priv->hdmi);
396 priv->hdmi = NULL;
397 }
380} 398}
381 399
382static const struct component_ops hdmi_ops = { 400static const struct component_ops hdmi_ops = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index b981995410b5..43e654f751b7 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -38,8 +38,6 @@ struct hdmi_audio {
38}; 38};
39 39
40struct hdmi { 40struct hdmi {
41 struct kref refcount;
42
43 struct drm_device *dev; 41 struct drm_device *dev;
44 struct platform_device *pdev; 42 struct platform_device *pdev;
45 43
@@ -97,13 +95,9 @@ struct hdmi_platform_config {
97 /* gpio's: */ 95 /* gpio's: */
98 int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio; 96 int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
99 int mux_lpm_gpio; 97 int mux_lpm_gpio;
100
101 /* older devices had their own irq, mdp5+ it is shared w/ mdp: */
102 bool shared_irq;
103}; 98};
104 99
105void hdmi_set_mode(struct hdmi *hdmi, bool power_on); 100void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
106void hdmi_destroy(struct kref *kref);
107 101
108static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data) 102static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
109{ 103{
@@ -115,17 +109,6 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
115 return msm_readl(hdmi->mmio + reg); 109 return msm_readl(hdmi->mmio + reg);
116} 110}
117 111
118static inline struct hdmi * hdmi_reference(struct hdmi *hdmi)
119{
120 kref_get(&hdmi->refcount);
121 return hdmi;
122}
123
124static inline void hdmi_unreference(struct hdmi *hdmi)
125{
126 kref_put(&hdmi->refcount, hdmi_destroy);
127}
128
129/* 112/*
130 * The phy appears to be different, for example between 8960 and 8x60, 113 * The phy appears to be different, for example between 8960 and 8x60,
131 * so split the phy related functions out and load the correct one at 114 * so split the phy related functions out and load the correct one at
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 76fd0cfc6558..5b0844befbab 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index f6cf745c249e..6902ad6da710 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -26,7 +26,6 @@ struct hdmi_bridge {
26static void hdmi_bridge_destroy(struct drm_bridge *bridge) 26static void hdmi_bridge_destroy(struct drm_bridge *bridge)
27{ 27{
28 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); 28 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
29 hdmi_unreference(hdmi_bridge->hdmi);
30 drm_bridge_cleanup(bridge); 29 drm_bridge_cleanup(bridge);
31 kfree(hdmi_bridge); 30 kfree(hdmi_bridge);
32} 31}
@@ -218,7 +217,7 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
218 goto fail; 217 goto fail;
219 } 218 }
220 219
221 hdmi_bridge->hdmi = hdmi_reference(hdmi); 220 hdmi_bridge->hdmi = hdmi;
222 221
223 bridge = &hdmi_bridge->base; 222 bridge = &hdmi_bridge->base;
224 223
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 4aca2a3c667c..fbebb0405d76 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -330,8 +330,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
330 drm_connector_unregister(connector); 330 drm_connector_unregister(connector);
331 drm_connector_cleanup(connector); 331 drm_connector_cleanup(connector);
332 332
333 hdmi_unreference(hdmi_connector->hdmi);
334
335 kfree(hdmi_connector); 333 kfree(hdmi_connector);
336} 334}
337 335
@@ -401,6 +399,9 @@ static const struct drm_connector_funcs hdmi_connector_funcs = {
401 .detect = hdmi_connector_detect, 399 .detect = hdmi_connector_detect,
402 .fill_modes = drm_helper_probe_single_connector_modes, 400 .fill_modes = drm_helper_probe_single_connector_modes,
403 .destroy = hdmi_connector_destroy, 401 .destroy = hdmi_connector_destroy,
402 .reset = drm_atomic_helper_connector_reset,
403 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
404 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
404}; 405};
405 406
406static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { 407static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
@@ -422,7 +423,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
422 goto fail; 423 goto fail;
423 } 424 }
424 425
425 hdmi_connector->hdmi = hdmi_reference(hdmi); 426 hdmi_connector->hdmi = hdmi;
426 INIT_WORK(&hdmi_connector->hpd_work, hotplug_work); 427 INIT_WORK(&hdmi_connector->hpd_work, hotplug_work);
427 428
428 connector = &hdmi_connector->base; 429 connector = &hdmi_connector->base;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index f408b69486a8..eeed006eed13 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -510,7 +510,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
510 510
511#ifdef CONFIG_COMMON_CLK 511#ifdef CONFIG_COMMON_CLK
512 phy_8960->pll_hw.init = &pll_init; 512 phy_8960->pll_hw.init = &pll_init;
513 phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw); 513 phy_8960->pll = devm_clk_register(&hdmi->pdev->dev, &phy_8960->pll_hw);
514 if (IS_ERR(phy_8960->pll)) { 514 if (IS_ERR(phy_8960->pll)) {
515 ret = PTR_ERR(phy_8960->pll); 515 ret = PTR_ERR(phy_8960->pll);
516 phy_8960->pll = NULL; 516 phy_8960->pll = NULL;
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index d53c29327df9..29bd796797de 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 03c0bd9cd5b9..a4a7f8c7122a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 7d00f7fb5773..a7672e100d8b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -25,8 +25,6 @@
25struct mdp4_crtc { 25struct mdp4_crtc {
26 struct drm_crtc base; 26 struct drm_crtc base;
27 char name[8]; 27 char name[8];
28 struct drm_plane *plane;
29 struct drm_plane *planes[8];
30 int id; 28 int id;
31 int ovlp; 29 int ovlp;
32 enum mdp4_dma dma; 30 enum mdp4_dma dma;
@@ -52,25 +50,11 @@ struct mdp4_crtc {
52 50
53 /* if there is a pending flip, these will be non-null: */ 51 /* if there is a pending flip, these will be non-null: */
54 struct drm_pending_vblank_event *event; 52 struct drm_pending_vblank_event *event;
55 struct msm_fence_cb pageflip_cb;
56 53
57#define PENDING_CURSOR 0x1 54#define PENDING_CURSOR 0x1
58#define PENDING_FLIP 0x2 55#define PENDING_FLIP 0x2
59 atomic_t pending; 56 atomic_t pending;
60 57
61 /* the fb that we logically (from PoV of KMS API) hold a ref
62 * to. Which we may not yet be scanning out (we may still
63 * be scanning out previous in case of page_flip while waiting
64 * for gpu rendering to complete:
65 */
66 struct drm_framebuffer *fb;
67
68 /* the fb that we currently hold a scanout ref to: */
69 struct drm_framebuffer *scanout_fb;
70
71 /* for unref'ing framebuffers after scanout completes: */
72 struct drm_flip_work unref_fb_work;
73
74 /* for unref'ing cursor bo's after scanout completes: */ 58 /* for unref'ing cursor bo's after scanout completes: */
75 struct drm_flip_work unref_cursor_work; 59 struct drm_flip_work unref_cursor_work;
76 60
@@ -97,15 +81,14 @@ static void crtc_flush(struct drm_crtc *crtc)
97{ 81{
98 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 82 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
99 struct mdp4_kms *mdp4_kms = get_kms(crtc); 83 struct mdp4_kms *mdp4_kms = get_kms(crtc);
100 uint32_t i, flush = 0; 84 struct drm_plane *plane;
85 uint32_t flush = 0;
101 86
102 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { 87 drm_atomic_crtc_for_each_plane(plane, crtc) {
103 struct drm_plane *plane = mdp4_crtc->planes[i]; 88 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
104 if (plane) { 89 flush |= pipe2flush(pipe_id);
105 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
106 flush |= pipe2flush(pipe_id);
107 }
108 } 90 }
91
109 flush |= ovlp2flush(mdp4_crtc->ovlp); 92 flush |= ovlp2flush(mdp4_crtc->ovlp);
110 93
111 DBG("%s: flush=%08x", mdp4_crtc->name, flush); 94 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
@@ -113,47 +96,6 @@ static void crtc_flush(struct drm_crtc *crtc)
113 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); 96 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
114} 97}
115 98
116static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
117{
118 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
119 struct drm_framebuffer *old_fb = mdp4_crtc->fb;
120
121 /* grab reference to incoming scanout fb: */
122 drm_framebuffer_reference(new_fb);
123 mdp4_crtc->base.primary->fb = new_fb;
124 mdp4_crtc->fb = new_fb;
125
126 if (old_fb)
127 drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
128}
129
130/* unlike update_fb(), take a ref to the new scanout fb *before* updating
131 * plane, then call this. Needed to ensure we don't unref the buffer that
132 * is actually still being scanned out.
133 *
134 * Note that this whole thing goes away with atomic.. since we can defer
135 * calling into driver until rendering is done.
136 */
137static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
138{
139 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
140
141 /* flush updates, to make sure hw is updated to new scanout fb,
142 * so that we can safely queue unref to current fb (ie. next
143 * vblank we know hw is done w/ previous scanout_fb).
144 */
145 crtc_flush(crtc);
146
147 if (mdp4_crtc->scanout_fb)
148 drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
149 mdp4_crtc->scanout_fb);
150
151 mdp4_crtc->scanout_fb = fb;
152
153 /* enable vblank to complete flip: */
154 request_pending(crtc, PENDING_FLIP);
155}
156
157/* if file!=NULL, this is preclose potential cancel-flip path */ 99/* if file!=NULL, this is preclose potential cancel-flip path */
158static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) 100static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
159{ 101{
@@ -171,38 +113,13 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
171 */ 113 */
172 if (!file || (event->base.file_priv == file)) { 114 if (!file || (event->base.file_priv == file)) {
173 mdp4_crtc->event = NULL; 115 mdp4_crtc->event = NULL;
116 DBG("%s: send event: %p", mdp4_crtc->name, event);
174 drm_send_vblank_event(dev, mdp4_crtc->id, event); 117 drm_send_vblank_event(dev, mdp4_crtc->id, event);
175 } 118 }
176 } 119 }
177 spin_unlock_irqrestore(&dev->event_lock, flags); 120 spin_unlock_irqrestore(&dev->event_lock, flags);
178} 121}
179 122
180static void pageflip_cb(struct msm_fence_cb *cb)
181{
182 struct mdp4_crtc *mdp4_crtc =
183 container_of(cb, struct mdp4_crtc, pageflip_cb);
184 struct drm_crtc *crtc = &mdp4_crtc->base;
185 struct drm_framebuffer *fb = crtc->primary->fb;
186
187 if (!fb)
188 return;
189
190 drm_framebuffer_reference(fb);
191 mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
192 update_scanout(crtc, fb);
193}
194
195static void unref_fb_worker(struct drm_flip_work *work, void *val)
196{
197 struct mdp4_crtc *mdp4_crtc =
198 container_of(work, struct mdp4_crtc, unref_fb_work);
199 struct drm_device *dev = mdp4_crtc->base.dev;
200
201 mutex_lock(&dev->mode_config.mutex);
202 drm_framebuffer_unreference(val);
203 mutex_unlock(&dev->mode_config.mutex);
204}
205
206static void unref_cursor_worker(struct drm_flip_work *work, void *val) 123static void unref_cursor_worker(struct drm_flip_work *work, void *val)
207{ 124{
208 struct mdp4_crtc *mdp4_crtc = 125 struct mdp4_crtc *mdp4_crtc =
@@ -218,7 +135,6 @@ static void mdp4_crtc_destroy(struct drm_crtc *crtc)
218 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 135 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
219 136
220 drm_crtc_cleanup(crtc); 137 drm_crtc_cleanup(crtc);
221 drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
222 drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work); 138 drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
223 139
224 kfree(mdp4_crtc); 140 kfree(mdp4_crtc);
@@ -251,57 +167,70 @@ static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
251 return true; 167 return true;
252} 168}
253 169
254static void blend_setup(struct drm_crtc *crtc) 170/* statically (for now) map planes to mixer stage (z-order): */
171static const int idxs[] = {
172 [VG1] = 1,
173 [VG2] = 2,
174 [RGB1] = 0,
175 [RGB2] = 0,
176 [RGB3] = 0,
177 [VG3] = 3,
178 [VG4] = 4,
179
180};
181
182/* setup mixer config, for which we need to consider all crtc's and
183 * the planes attached to them
184 *
185 * TODO may possibly need some extra locking here
186 */
187static void setup_mixer(struct mdp4_kms *mdp4_kms)
255{ 188{
256 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 189 struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
257 struct mdp4_kms *mdp4_kms = get_kms(crtc); 190 struct drm_crtc *crtc;
258 int i, ovlp = mdp4_crtc->ovlp;
259 uint32_t mixer_cfg = 0; 191 uint32_t mixer_cfg = 0;
260 static const enum mdp_mixer_stage_id stages[] = { 192 static const enum mdp_mixer_stage_id stages[] = {
261 STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3, 193 STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
262 }; 194 };
263 /* statically (for now) map planes to mixer stage (z-order): */
264 static const int idxs[] = {
265 [VG1] = 1,
266 [VG2] = 2,
267 [RGB1] = 0,
268 [RGB2] = 0,
269 [RGB3] = 0,
270 [VG3] = 3,
271 [VG4] = 4,
272 195
273 }; 196 list_for_each_entry(crtc, &config->crtc_list, head) {
274 bool alpha[4]= { false, false, false, false }; 197 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
198 struct drm_plane *plane;
275 199
276 /* Don't rely on value read back from hw, but instead use our 200 drm_atomic_crtc_for_each_plane(plane, crtc) {
277 * own shadowed value. Possibly disable/reenable looses the 201 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
278 * previous value and goes back to power-on default? 202 int idx = idxs[pipe_id];
279 */ 203 mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
280 mixer_cfg = mdp4_kms->mixer_cfg; 204 pipe_id, stages[idx]);
205 }
206 }
207
208 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
209}
210
211static void blend_setup(struct drm_crtc *crtc)
212{
213 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
214 struct mdp4_kms *mdp4_kms = get_kms(crtc);
215 struct drm_plane *plane;
216 int i, ovlp = mdp4_crtc->ovlp;
217 bool alpha[4]= { false, false, false, false };
281 218
282 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0); 219 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
283 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0); 220 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
284 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0); 221 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
285 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0); 222 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
286 223
287 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { 224 drm_atomic_crtc_for_each_plane(plane, crtc) {
288 struct drm_plane *plane = mdp4_crtc->planes[i]; 225 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
289 if (plane) { 226 int idx = idxs[pipe_id];
290 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); 227 if (idx > 0) {
291 int idx = idxs[pipe_id]; 228 const struct mdp_format *format =
292 if (idx > 0) {
293 const struct mdp_format *format =
294 to_mdp_format(msm_framebuffer_format(plane->fb)); 229 to_mdp_format(msm_framebuffer_format(plane->fb));
295 alpha[idx-1] = format->alpha_enable; 230 alpha[idx-1] = format->alpha_enable;
296 }
297 mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
298 pipe_id, stages[idx]);
299 } 231 }
300 } 232 }
301 233
302 /* this shouldn't happen.. and seems to cause underflow: */
303 WARN_ON(!mixer_cfg);
304
305 for (i = 0; i < 4; i++) { 234 for (i = 0; i < 4; i++) {
306 uint32_t op; 235 uint32_t op;
307 236
@@ -324,22 +253,21 @@ static void blend_setup(struct drm_crtc *crtc)
324 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0); 253 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
325 } 254 }
326 255
327 mdp4_kms->mixer_cfg = mixer_cfg; 256 setup_mixer(mdp4_kms);
328 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
329} 257}
330 258
331static int mdp4_crtc_mode_set(struct drm_crtc *crtc, 259static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
332 struct drm_display_mode *mode,
333 struct drm_display_mode *adjusted_mode,
334 int x, int y,
335 struct drm_framebuffer *old_fb)
336{ 260{
337 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 261 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
338 struct mdp4_kms *mdp4_kms = get_kms(crtc); 262 struct mdp4_kms *mdp4_kms = get_kms(crtc);
339 enum mdp4_dma dma = mdp4_crtc->dma; 263 enum mdp4_dma dma = mdp4_crtc->dma;
340 int ret, ovlp = mdp4_crtc->ovlp; 264 int ovlp = mdp4_crtc->ovlp;
265 struct drm_display_mode *mode;
266
267 if (WARN_ON(!crtc->state))
268 return;
341 269
342 mode = adjusted_mode; 270 mode = &crtc->state->adjusted_mode;
343 271
344 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 272 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
345 mdp4_crtc->name, mode->base.id, mode->name, 273 mdp4_crtc->name, mode->base.id, mode->name,
@@ -350,28 +278,13 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
350 mode->vsync_end, mode->vtotal, 278 mode->vsync_end, mode->vtotal,
351 mode->type, mode->flags); 279 mode->type, mode->flags);
352 280
353 /* grab extra ref for update_scanout() */
354 drm_framebuffer_reference(crtc->primary->fb);
355
356 ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->primary->fb,
357 0, 0, mode->hdisplay, mode->vdisplay,
358 x << 16, y << 16,
359 mode->hdisplay << 16, mode->vdisplay << 16);
360 if (ret) {
361 drm_framebuffer_unreference(crtc->primary->fb);
362 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
363 mdp4_crtc->name, ret);
364 return ret;
365 }
366
367 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), 281 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
368 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | 282 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
369 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); 283 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
370 284
371 /* take data from pipe: */ 285 /* take data from pipe: */
372 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0); 286 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
373 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 287 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
374 crtc->primary->fb->pitches[0]);
375 mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma), 288 mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
376 MDP4_DMA_DST_SIZE_WIDTH(0) | 289 MDP4_DMA_DST_SIZE_WIDTH(0) |
377 MDP4_DMA_DST_SIZE_HEIGHT(0)); 290 MDP4_DMA_DST_SIZE_HEIGHT(0));
@@ -380,8 +293,7 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
380 mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp), 293 mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
381 MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) | 294 MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
382 MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay)); 295 MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
383 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 296 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
384 crtc->primary->fb->pitches[0]);
385 297
386 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); 298 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
387 299
@@ -390,11 +302,6 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
390 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); 302 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
391 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); 303 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
392 } 304 }
393
394 update_fb(crtc, crtc->primary->fb);
395 update_scanout(crtc, crtc->primary->fb);
396
397 return 0;
398} 305}
399 306
400static void mdp4_crtc_prepare(struct drm_crtc *crtc) 307static void mdp4_crtc_prepare(struct drm_crtc *crtc)
@@ -416,60 +323,51 @@ static void mdp4_crtc_commit(struct drm_crtc *crtc)
416 drm_crtc_vblank_put(crtc); 323 drm_crtc_vblank_put(crtc);
417} 324}
418 325
419static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 326static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
420 struct drm_framebuffer *old_fb) 327{
328}
329
330static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
331 struct drm_crtc_state *state)
421{ 332{
422 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 333 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
423 struct drm_plane *plane = mdp4_crtc->plane; 334 struct drm_device *dev = crtc->dev;
424 struct drm_display_mode *mode = &crtc->mode;
425 int ret;
426 335
427 /* grab extra ref for update_scanout() */ 336 DBG("%s: check", mdp4_crtc->name);
428 drm_framebuffer_reference(crtc->primary->fb);
429 337
430 ret = mdp4_plane_mode_set(plane, crtc, crtc->primary->fb, 338 if (mdp4_crtc->event) {
431 0, 0, mode->hdisplay, mode->vdisplay, 339 dev_err(dev->dev, "already pending flip!\n");
432 x << 16, y << 16, 340 return -EBUSY;
433 mode->hdisplay << 16, mode->vdisplay << 16);
434 if (ret) {
435 drm_framebuffer_unreference(crtc->primary->fb);
436 return ret;
437 } 341 }
438 342
439 update_fb(crtc, crtc->primary->fb); 343 // TODO anything else to check?
440 update_scanout(crtc, crtc->primary->fb);
441 344
442 return 0; 345 return 0;
443} 346}
444 347
445static void mdp4_crtc_load_lut(struct drm_crtc *crtc) 348static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc)
446{ 349{
350 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
351 DBG("%s: begin", mdp4_crtc->name);
447} 352}
448 353
449static int mdp4_crtc_page_flip(struct drm_crtc *crtc, 354static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
450 struct drm_framebuffer *new_fb,
451 struct drm_pending_vblank_event *event,
452 uint32_t page_flip_flags)
453{ 355{
454 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 356 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
455 struct drm_device *dev = crtc->dev; 357 struct drm_device *dev = crtc->dev;
456 struct drm_gem_object *obj;
457 unsigned long flags; 358 unsigned long flags;
458 359
459 if (mdp4_crtc->event) { 360 DBG("%s: flush", mdp4_crtc->name);
460 dev_err(dev->dev, "already pending flip!\n");
461 return -EBUSY;
462 }
463 361
464 obj = msm_framebuffer_bo(new_fb, 0); 362 WARN_ON(mdp4_crtc->event);
465 363
466 spin_lock_irqsave(&dev->event_lock, flags); 364 spin_lock_irqsave(&dev->event_lock, flags);
467 mdp4_crtc->event = event; 365 mdp4_crtc->event = crtc->state->event;
468 spin_unlock_irqrestore(&dev->event_lock, flags); 366 spin_unlock_irqrestore(&dev->event_lock, flags);
469 367
470 update_fb(crtc, new_fb); 368 blend_setup(crtc);
471 369 crtc_flush(crtc);
472 return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb); 370 request_pending(crtc, PENDING_FLIP);
473} 371}
474 372
475static int mdp4_crtc_set_property(struct drm_crtc *crtc, 373static int mdp4_crtc_set_property(struct drm_crtc *crtc,
@@ -607,22 +505,29 @@ static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
607} 505}
608 506
609static const struct drm_crtc_funcs mdp4_crtc_funcs = { 507static const struct drm_crtc_funcs mdp4_crtc_funcs = {
610 .set_config = drm_crtc_helper_set_config, 508 .set_config = drm_atomic_helper_set_config,
611 .destroy = mdp4_crtc_destroy, 509 .destroy = mdp4_crtc_destroy,
612 .page_flip = mdp4_crtc_page_flip, 510 .page_flip = drm_atomic_helper_page_flip,
613 .set_property = mdp4_crtc_set_property, 511 .set_property = mdp4_crtc_set_property,
614 .cursor_set = mdp4_crtc_cursor_set, 512 .cursor_set = mdp4_crtc_cursor_set,
615 .cursor_move = mdp4_crtc_cursor_move, 513 .cursor_move = mdp4_crtc_cursor_move,
514 .reset = drm_atomic_helper_crtc_reset,
515 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
516 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
616}; 517};
617 518
618static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { 519static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
619 .dpms = mdp4_crtc_dpms, 520 .dpms = mdp4_crtc_dpms,
620 .mode_fixup = mdp4_crtc_mode_fixup, 521 .mode_fixup = mdp4_crtc_mode_fixup,
621 .mode_set = mdp4_crtc_mode_set, 522 .mode_set_nofb = mdp4_crtc_mode_set_nofb,
523 .mode_set = drm_helper_crtc_mode_set,
524 .mode_set_base = drm_helper_crtc_mode_set_base,
622 .prepare = mdp4_crtc_prepare, 525 .prepare = mdp4_crtc_prepare,
623 .commit = mdp4_crtc_commit, 526 .commit = mdp4_crtc_commit,
624 .mode_set_base = mdp4_crtc_mode_set_base,
625 .load_lut = mdp4_crtc_load_lut, 527 .load_lut = mdp4_crtc_load_lut,
528 .atomic_check = mdp4_crtc_atomic_check,
529 .atomic_begin = mdp4_crtc_atomic_begin,
530 .atomic_flush = mdp4_crtc_atomic_flush,
626}; 531};
627 532
628static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) 533static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
@@ -638,7 +543,6 @@ static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
638 543
639 if (pending & PENDING_FLIP) { 544 if (pending & PENDING_FLIP) {
640 complete_flip(crtc, NULL); 545 complete_flip(crtc, NULL);
641 drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
642 } 546 }
643 547
644 if (pending & PENDING_CURSOR) { 548 if (pending & PENDING_CURSOR) {
@@ -663,7 +567,8 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
663 567
664void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file) 568void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
665{ 569{
666 DBG("cancel: %p", file); 570 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
571 DBG("%s: cancel: %p", mdp4_crtc->name, file);
667 complete_flip(crtc, file); 572 complete_flip(crtc, file);
668} 573}
669 574
@@ -717,35 +622,6 @@ void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
717 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel); 622 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
718} 623}
719 624
720static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
721 struct drm_plane *plane)
722{
723 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
724
725 BUG_ON(pipe_id >= ARRAY_SIZE(mdp4_crtc->planes));
726
727 if (mdp4_crtc->planes[pipe_id] == plane)
728 return;
729
730 mdp4_crtc->planes[pipe_id] = plane;
731 blend_setup(crtc);
732 if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
733 crtc_flush(crtc);
734}
735
736void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
737{
738 set_attach(crtc, mdp4_plane_pipe(plane), plane);
739}
740
741void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
742{
743 /* don't actually detatch our primary plane: */
744 if (to_mdp4_crtc(crtc)->plane == plane)
745 return;
746 set_attach(crtc, mdp4_plane_pipe(plane), NULL);
747}
748
749static const char *dma_names[] = { 625static const char *dma_names[] = {
750 "DMA_P", "DMA_S", "DMA_E", 626 "DMA_P", "DMA_S", "DMA_E",
751}; 627};
@@ -757,17 +633,13 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
757{ 633{
758 struct drm_crtc *crtc = NULL; 634 struct drm_crtc *crtc = NULL;
759 struct mdp4_crtc *mdp4_crtc; 635 struct mdp4_crtc *mdp4_crtc;
760 int ret;
761 636
762 mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL); 637 mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
763 if (!mdp4_crtc) { 638 if (!mdp4_crtc)
764 ret = -ENOMEM; 639 return ERR_PTR(-ENOMEM);
765 goto fail;
766 }
767 640
768 crtc = &mdp4_crtc->base; 641 crtc = &mdp4_crtc->base;
769 642
770 mdp4_crtc->plane = plane;
771 mdp4_crtc->id = id; 643 mdp4_crtc->id = id;
772 644
773 mdp4_crtc->ovlp = ovlp_id; 645 mdp4_crtc->ovlp = ovlp_id;
@@ -784,26 +656,14 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
784 656
785 spin_lock_init(&mdp4_crtc->cursor.lock); 657 spin_lock_init(&mdp4_crtc->cursor.lock);
786 658
787 ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16, 659 drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
788 "unref fb", unref_fb_worker);
789 if (ret)
790 goto fail;
791
792 ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
793 "unref cursor", unref_cursor_worker); 660 "unref cursor", unref_cursor_worker);
794 661
795 INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
796
797 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs); 662 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs);
798 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); 663 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
664 plane->crtc = crtc;
799 665
800 mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base); 666 mdp4_plane_install_properties(plane, &crtc->base);
801 667
802 return crtc; 668 return crtc;
803
804fail:
805 if (crtc)
806 mdp4_crtc_destroy(crtc);
807
808 return ERR_PTR(ret);
809} 669}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 79d804e61cc4..a62109e4ae0d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -228,7 +228,6 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
228 struct drm_encoder *encoder; 228 struct drm_encoder *encoder;
229 struct drm_connector *connector; 229 struct drm_connector *connector;
230 struct drm_panel *panel; 230 struct drm_panel *panel;
231 struct hdmi *hdmi;
232 int ret; 231 int ret;
233 232
234 /* construct non-private planes: */ 233 /* construct non-private planes: */
@@ -326,11 +325,13 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
326 priv->crtcs[priv->num_crtcs++] = crtc; 325 priv->crtcs[priv->num_crtcs++] = crtc;
327 priv->encoders[priv->num_encoders++] = encoder; 326 priv->encoders[priv->num_encoders++] = encoder;
328 327
329 hdmi = hdmi_init(dev, encoder); 328 if (priv->hdmi) {
330 if (IS_ERR(hdmi)) { 329 /* Construct bridge/connector for HDMI: */
331 ret = PTR_ERR(hdmi); 330 ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
332 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); 331 if (ret) {
333 goto fail; 332 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
333 goto fail;
334 }
334 } 335 }
335 336
336 return 0; 337 return 0;
@@ -381,6 +382,10 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
381 if (IS_ERR(mdp4_kms->dsi_pll_vddio)) 382 if (IS_ERR(mdp4_kms->dsi_pll_vddio))
382 mdp4_kms->dsi_pll_vddio = NULL; 383 mdp4_kms->dsi_pll_vddio = NULL;
383 384
385 /* NOTE: driver for this regulator still missing upstream.. use
386 * _get_exclusive() and ignore the error if it does not exist
387 * (and hope that the bootloader left it on for us)
388 */
384 mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd"); 389 mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
385 if (IS_ERR(mdp4_kms->vdd)) 390 if (IS_ERR(mdp4_kms->vdd))
386 mdp4_kms->vdd = NULL; 391 mdp4_kms->vdd = NULL;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 9ff6e7ccfe90..cbd77bc626d5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -32,13 +32,6 @@ struct mdp4_kms {
32 32
33 int rev; 33 int rev;
34 34
35 /* Shadow value for MDP4_LAYERMIXER_IN_CFG.. since setup for all
36 * crtcs/encoders is in one shared register, we need to update it
37 * via read/modify/write. But to avoid getting confused by power-
38 * on-default values after resume, use this shadow value instead:
39 */
40 uint32_t mixer_cfg;
41
42 /* mapper-id used to request GEM buffer mapped for scanout: */ 35 /* mapper-id used to request GEM buffer mapped for scanout: */
43 int id; 36 int id;
44 37
@@ -194,14 +187,6 @@ uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
194 187
195void mdp4_plane_install_properties(struct drm_plane *plane, 188void mdp4_plane_install_properties(struct drm_plane *plane,
196 struct drm_mode_object *obj); 189 struct drm_mode_object *obj);
197void mdp4_plane_set_scanout(struct drm_plane *plane,
198 struct drm_framebuffer *fb);
199int mdp4_plane_mode_set(struct drm_plane *plane,
200 struct drm_crtc *crtc, struct drm_framebuffer *fb,
201 int crtc_x, int crtc_y,
202 unsigned int crtc_w, unsigned int crtc_h,
203 uint32_t src_x, uint32_t src_y,
204 uint32_t src_w, uint32_t src_h);
205enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane); 190enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
206struct drm_plane *mdp4_plane_init(struct drm_device *dev, 191struct drm_plane *mdp4_plane_init(struct drm_device *dev,
207 enum mdp4_pipe pipe_id, bool private_plane); 192 enum mdp4_pipe pipe_id, bool private_plane);
@@ -210,8 +195,6 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
210void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); 195void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
211void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config); 196void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
212void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer); 197void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
213void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
214void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
215struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, 198struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
216 struct drm_plane *plane, int id, int ovlp_id, 199 struct drm_plane *plane, int id, int ovlp_id,
217 enum mdp4_dma dma_id); 200 enum mdp4_dma dma_id);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index 310034688c15..4ddc28e1275b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -98,6 +98,9 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
98 .detect = mdp4_lvds_connector_detect, 98 .detect = mdp4_lvds_connector_detect,
99 .fill_modes = drm_helper_probe_single_connector_modes, 99 .fill_modes = drm_helper_probe_single_connector_modes,
100 .destroy = mdp4_lvds_connector_destroy, 100 .destroy = mdp4_lvds_connector_destroy,
101 .reset = drm_atomic_helper_connector_reset,
102 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
103 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
101}; 104};
102 105
103static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = { 106static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 66f33dba1ebb..1e5ebe83647d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -31,47 +31,26 @@ struct mdp4_plane {
31}; 31};
32#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base) 32#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
33 33
34static struct mdp4_kms *get_kms(struct drm_plane *plane) 34static void mdp4_plane_set_scanout(struct drm_plane *plane,
35{ 35 struct drm_framebuffer *fb);
36 struct msm_drm_private *priv = plane->dev->dev_private; 36static int mdp4_plane_mode_set(struct drm_plane *plane,
37 return to_mdp4_kms(to_mdp_kms(priv->kms));
38}
39
40static int mdp4_plane_update(struct drm_plane *plane,
41 struct drm_crtc *crtc, struct drm_framebuffer *fb, 37 struct drm_crtc *crtc, struct drm_framebuffer *fb,
42 int crtc_x, int crtc_y, 38 int crtc_x, int crtc_y,
43 unsigned int crtc_w, unsigned int crtc_h, 39 unsigned int crtc_w, unsigned int crtc_h,
44 uint32_t src_x, uint32_t src_y, 40 uint32_t src_x, uint32_t src_y,
45 uint32_t src_w, uint32_t src_h) 41 uint32_t src_w, uint32_t src_h);
46{
47 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
48
49 mdp4_plane->enabled = true;
50
51 if (plane->fb)
52 drm_framebuffer_unreference(plane->fb);
53
54 drm_framebuffer_reference(fb);
55
56 return mdp4_plane_mode_set(plane, crtc, fb,
57 crtc_x, crtc_y, crtc_w, crtc_h,
58 src_x, src_y, src_w, src_h);
59}
60 42
61static int mdp4_plane_disable(struct drm_plane *plane) 43static struct mdp4_kms *get_kms(struct drm_plane *plane)
62{ 44{
63 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 45 struct msm_drm_private *priv = plane->dev->dev_private;
64 DBG("%s: disable", mdp4_plane->name); 46 return to_mdp4_kms(to_mdp_kms(priv->kms));
65 if (plane->crtc)
66 mdp4_crtc_detach(plane->crtc, plane);
67 return 0;
68} 47}
69 48
70static void mdp4_plane_destroy(struct drm_plane *plane) 49static void mdp4_plane_destroy(struct drm_plane *plane)
71{ 50{
72 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 51 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
73 52
74 mdp4_plane_disable(plane); 53 drm_plane_helper_disable(plane);
75 drm_plane_cleanup(plane); 54 drm_plane_cleanup(plane);
76 55
77 kfree(mdp4_plane); 56 kfree(mdp4_plane);
@@ -92,19 +71,75 @@ int mdp4_plane_set_property(struct drm_plane *plane,
92} 71}
93 72
94static const struct drm_plane_funcs mdp4_plane_funcs = { 73static const struct drm_plane_funcs mdp4_plane_funcs = {
95 .update_plane = mdp4_plane_update, 74 .update_plane = drm_atomic_helper_update_plane,
96 .disable_plane = mdp4_plane_disable, 75 .disable_plane = drm_atomic_helper_disable_plane,
97 .destroy = mdp4_plane_destroy, 76 .destroy = mdp4_plane_destroy,
98 .set_property = mdp4_plane_set_property, 77 .set_property = mdp4_plane_set_property,
78 .reset = drm_atomic_helper_plane_reset,
79 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
80 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
99}; 81};
100 82
101void mdp4_plane_set_scanout(struct drm_plane *plane, 83static int mdp4_plane_prepare_fb(struct drm_plane *plane,
84 struct drm_framebuffer *fb)
85{
86 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
87 struct mdp4_kms *mdp4_kms = get_kms(plane);
88
89 DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
90 return msm_framebuffer_prepare(fb, mdp4_kms->id);
91}
92
93static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
94 struct drm_framebuffer *fb)
95{
96 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
97 struct mdp4_kms *mdp4_kms = get_kms(plane);
98
99 DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
100 msm_framebuffer_cleanup(fb, mdp4_kms->id);
101}
102
103
104static int mdp4_plane_atomic_check(struct drm_plane *plane,
105 struct drm_plane_state *state)
106{
107 return 0;
108}
109
110static void mdp4_plane_atomic_update(struct drm_plane *plane,
111 struct drm_plane_state *old_state)
112{
113 struct drm_plane_state *state = plane->state;
114 int ret;
115
116 ret = mdp4_plane_mode_set(plane,
117 state->crtc, state->fb,
118 state->crtc_x, state->crtc_y,
119 state->crtc_w, state->crtc_h,
120 state->src_x, state->src_y,
121 state->src_w, state->src_h);
122 /* atomic_check should have ensured that this doesn't fail */
123 WARN_ON(ret < 0);
124}
125
126static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = {
127 .prepare_fb = mdp4_plane_prepare_fb,
128 .cleanup_fb = mdp4_plane_cleanup_fb,
129 .atomic_check = mdp4_plane_atomic_check,
130 .atomic_update = mdp4_plane_atomic_update,
131};
132
133static void mdp4_plane_set_scanout(struct drm_plane *plane,
102 struct drm_framebuffer *fb) 134 struct drm_framebuffer *fb)
103{ 135{
104 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 136 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
105 struct mdp4_kms *mdp4_kms = get_kms(plane); 137 struct mdp4_kms *mdp4_kms = get_kms(plane);
106 enum mdp4_pipe pipe = mdp4_plane->pipe; 138 enum mdp4_pipe pipe = mdp4_plane->pipe;
107 uint32_t iova; 139 uint32_t iova = msm_framebuffer_iova(fb, mdp4_kms->id, 0);
140
141 DBG("%s: set_scanout: %08x (%u)", mdp4_plane->name,
142 iova, fb->pitches[0]);
108 143
109 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe), 144 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
110 MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | 145 MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
@@ -114,7 +149,6 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
114 MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | 149 MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
115 MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); 150 MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
116 151
117 msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
118 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova); 152 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
119 153
120 plane->fb = fb; 154 plane->fb = fb;
@@ -122,7 +156,7 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
122 156
123#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000 157#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
124 158
125int mdp4_plane_mode_set(struct drm_plane *plane, 159static int mdp4_plane_mode_set(struct drm_plane *plane,
126 struct drm_crtc *crtc, struct drm_framebuffer *fb, 160 struct drm_crtc *crtc, struct drm_framebuffer *fb,
127 int crtc_x, int crtc_y, 161 int crtc_x, int crtc_y,
128 unsigned int crtc_w, unsigned int crtc_h, 162 unsigned int crtc_w, unsigned int crtc_h,
@@ -137,6 +171,11 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
137 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; 171 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
138 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; 172 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
139 173
174 if (!(crtc && fb)) {
175 DBG("%s: disabled!", mdp4_plane->name);
176 return 0;
177 }
178
140 /* src values are in Q16 fixed point, convert to integer: */ 179 /* src values are in Q16 fixed point, convert to integer: */
141 src_x = src_x >> 16; 180 src_x = src_x >> 16;
142 src_y = src_y >> 16; 181 src_y = src_y >> 16;
@@ -197,9 +236,6 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
197 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step); 236 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
198 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step); 237 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
199 238
200 /* TODO detach from old crtc (if we had more than one) */
201 mdp4_crtc_attach(crtc, plane);
202
203 return 0; 239 return 0;
204} 240}
205 241
@@ -239,9 +275,12 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
239 ARRAY_SIZE(mdp4_plane->formats)); 275 ARRAY_SIZE(mdp4_plane->formats));
240 276
241 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; 277 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
242 drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs, 278 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
243 mdp4_plane->formats, mdp4_plane->nformats, 279 mdp4_plane->formats, mdp4_plane->nformats, type);
244 type); 280 if (ret)
281 goto fail;
282
283 drm_plane_helper_add(plane, &mdp4_plane_helper_funcs);
245 284
246 mdp4_plane_install_properties(plane, &plane->base); 285 mdp4_plane_install_properties(plane, &plane->base);
247 286
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 67f4f896ba8c..e87ef5512cb0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -10,14 +10,14 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
22Copyright (C) 2013-2014 by the following authors: 22Copyright (C) 2013-2014 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
new file mode 100644
index 000000000000..b0a44310cf2a
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -0,0 +1,207 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "mdp5_kms.h"
15#include "mdp5_cfg.h"
16
17struct mdp5_cfg_handler {
18 int revision;
19 struct mdp5_cfg config;
20};
21
22/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
23const struct mdp5_cfg_hw *mdp5_cfg = NULL;
24
25const struct mdp5_cfg_hw msm8x74_config = {
26 .name = "msm8x74",
27 .smp = {
28 .mmb_count = 22,
29 .mmb_size = 4096,
30 },
31 .ctl = {
32 .count = 5,
33 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
34 },
35 .pipe_vig = {
36 .count = 3,
37 .base = { 0x01200, 0x01600, 0x01a00 },
38 },
39 .pipe_rgb = {
40 .count = 3,
41 .base = { 0x01e00, 0x02200, 0x02600 },
42 },
43 .pipe_dma = {
44 .count = 2,
45 .base = { 0x02a00, 0x02e00 },
46 },
47 .lm = {
48 .count = 5,
49 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
50 .nb_stages = 5,
51 },
52 .dspp = {
53 .count = 3,
54 .base = { 0x04600, 0x04a00, 0x04e00 },
55 },
56 .ad = {
57 .count = 2,
58 .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
59 },
60 .intf = {
61 .count = 4,
62 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
63 },
64 .max_clk = 200000000,
65};
66
67const struct mdp5_cfg_hw apq8084_config = {
68 .name = "apq8084",
69 .smp = {
70 .mmb_count = 44,
71 .mmb_size = 8192,
72 .reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */
73 .reserved[CID_RGB0] = 2,
74 .reserved[CID_RGB1] = 2,
75 .reserved[CID_RGB2] = 2,
76 .reserved[CID_RGB3] = 2,
77 },
78 .ctl = {
79 .count = 5,
80 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
81 },
82 .pipe_vig = {
83 .count = 4,
84 .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
85 },
86 .pipe_rgb = {
87 .count = 4,
88 .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
89 },
90 .pipe_dma = {
91 .count = 2,
92 .base = { 0x03200, 0x03600 },
93 },
94 .lm = {
95 .count = 6,
96 .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
97 .nb_stages = 5,
98 },
99 .dspp = {
100 .count = 4,
101 .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
102
103 },
104 .ad = {
105 .count = 3,
106 .base = { 0x13500, 0x13700, 0x13900 },
107 },
108 .intf = {
109 .count = 5,
110 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
111 },
112 .max_clk = 320000000,
113};
114
115static const struct mdp5_cfg_handler cfg_handlers[] = {
116 { .revision = 0, .config = { .hw = &msm8x74_config } },
117 { .revision = 2, .config = { .hw = &msm8x74_config } },
118 { .revision = 3, .config = { .hw = &apq8084_config } },
119};
120
121
122static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
123
124const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
125{
126 return cfg_handler->config.hw;
127}
128
129struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler)
130{
131 return &cfg_handler->config;
132}
133
134int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler)
135{
136 return cfg_handler->revision;
137}
138
139void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
140{
141 kfree(cfg_handler);
142}
143
144struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
145 uint32_t major, uint32_t minor)
146{
147 struct drm_device *dev = mdp5_kms->dev;
148 struct platform_device *pdev = dev->platformdev;
149 struct mdp5_cfg_handler *cfg_handler;
150 struct mdp5_cfg_platform *pconfig;
151 int i, ret = 0;
152
153 cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
154 if (unlikely(!cfg_handler)) {
155 ret = -ENOMEM;
156 goto fail;
157 }
158
159 if (major != 1) {
160 dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
161 major, minor);
162 ret = -ENXIO;
163 goto fail;
164 }
165
166 /* only after mdp5_cfg global pointer's init can we access the hw */
167 for (i = 0; i < ARRAY_SIZE(cfg_handlers); i++) {
168 if (cfg_handlers[i].revision != minor)
169 continue;
170 mdp5_cfg = cfg_handlers[i].config.hw;
171
172 break;
173 }
174 if (unlikely(!mdp5_cfg)) {
175 dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
176 major, minor);
177 ret = -ENXIO;
178 goto fail;
179 }
180
181 cfg_handler->revision = minor;
182 cfg_handler->config.hw = mdp5_cfg;
183
184 pconfig = mdp5_get_config(pdev);
185 memcpy(&cfg_handler->config.platform, pconfig, sizeof(*pconfig));
186
187 DBG("MDP5: %s hw config selected", mdp5_cfg->name);
188
189 return cfg_handler;
190
191fail:
192 if (cfg_handler)
193 mdp5_cfg_destroy(cfg_handler);
194
195 return NULL;
196}
197
198static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
199{
200 static struct mdp5_cfg_platform config = {};
201#ifdef CONFIG_OF
202 /* TODO */
203#endif
204 config.iommu = iommu_domain_alloc(&platform_bus_type);
205
206 return &config;
207}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
new file mode 100644
index 000000000000..dba4d52cceeb
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -0,0 +1,91 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __MDP5_CFG_H__
15#define __MDP5_CFG_H__
16
17#include "msm_drv.h"
18
19/*
20 * mdp5_cfg
21 *
22 * This module configures the dynamic offsets used by mdp5.xml.h
23 * (initialized in mdp5_cfg.c)
24 */
25extern const struct mdp5_cfg_hw *mdp5_cfg;
26
27#define MAX_CTL 8
28#define MAX_BASES 8
29#define MAX_SMP_BLOCKS 44
30#define MAX_CLIENTS 32
31
32typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
33
34#define MDP5_SUB_BLOCK_DEFINITION \
35 int count; \
36 uint32_t base[MAX_BASES]
37
38struct mdp5_sub_block {
39 MDP5_SUB_BLOCK_DEFINITION;
40};
41
42struct mdp5_lm_block {
43 MDP5_SUB_BLOCK_DEFINITION;
44 uint32_t nb_stages; /* number of stages per blender */
45};
46
47struct mdp5_smp_block {
48 int mmb_count; /* number of SMP MMBs */
49 int mmb_size; /* MMB: size in bytes */
50 mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
51 int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */
52};
53
54struct mdp5_cfg_hw {
55 char *name;
56
57 struct mdp5_smp_block smp;
58 struct mdp5_sub_block ctl;
59 struct mdp5_sub_block pipe_vig;
60 struct mdp5_sub_block pipe_rgb;
61 struct mdp5_sub_block pipe_dma;
62 struct mdp5_lm_block lm;
63 struct mdp5_sub_block dspp;
64 struct mdp5_sub_block ad;
65 struct mdp5_sub_block intf;
66
67 uint32_t max_clk;
68};
69
70/* platform config data (ie. from DT, or pdata) */
71struct mdp5_cfg_platform {
72 struct iommu_domain *iommu;
73};
74
75struct mdp5_cfg {
76 const struct mdp5_cfg_hw *hw;
77 struct mdp5_cfg_platform platform;
78};
79
80struct mdp5_kms;
81struct mdp5_cfg_handler;
82
83const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd);
84struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
85int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
86
87struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
88 uint32_t major, uint32_t minor);
89void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
90
91#endif /* __MDP5_CFG_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index ebe2e60f3ab1..0e9a2e3a82d7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -17,43 +18,35 @@
17 18
18#include "mdp5_kms.h" 19#include "mdp5_kms.h"
19 20
21#include <linux/sort.h>
20#include <drm/drm_mode.h> 22#include <drm/drm_mode.h>
21#include "drm_crtc.h" 23#include "drm_crtc.h"
22#include "drm_crtc_helper.h" 24#include "drm_crtc_helper.h"
23#include "drm_flip_work.h" 25#include "drm_flip_work.h"
24 26
27#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
28
25struct mdp5_crtc { 29struct mdp5_crtc {
26 struct drm_crtc base; 30 struct drm_crtc base;
27 char name[8]; 31 char name[8];
28 struct drm_plane *plane;
29 struct drm_plane *planes[8];
30 int id; 32 int id;
31 bool enabled; 33 bool enabled;
32 34
33 /* which mixer/encoder we route output to: */ 35 /* layer mixer used for this CRTC (+ its lock): */
34 int mixer; 36#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
37 int lm;
38 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
39
40 /* CTL used for this CRTC: */
41 struct mdp5_ctl *ctl;
35 42
36 /* if there is a pending flip, these will be non-null: */ 43 /* if there is a pending flip, these will be non-null: */
37 struct drm_pending_vblank_event *event; 44 struct drm_pending_vblank_event *event;
38 struct msm_fence_cb pageflip_cb;
39 45
40#define PENDING_CURSOR 0x1 46#define PENDING_CURSOR 0x1
41#define PENDING_FLIP 0x2 47#define PENDING_FLIP 0x2
42 atomic_t pending; 48 atomic_t pending;
43 49
44 /* the fb that we logically (from PoV of KMS API) hold a ref
45 * to. Which we may not yet be scanning out (we may still
46 * be scanning out previous in case of page_flip while waiting
47 * for gpu rendering to complete:
48 */
49 struct drm_framebuffer *fb;
50
51 /* the fb that we currently hold a scanout ref to: */
52 struct drm_framebuffer *scanout_fb;
53
54 /* for unref'ing framebuffers after scanout completes: */
55 struct drm_flip_work unref_fb_work;
56
57 struct mdp_irq vblank; 50 struct mdp_irq vblank;
58 struct mdp_irq err; 51 struct mdp_irq err;
59}; 52};
@@ -73,67 +66,38 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
73 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); 66 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
74} 67}
75 68
76static void crtc_flush(struct drm_crtc *crtc) 69#define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
77{
78 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
79 struct mdp5_kms *mdp5_kms = get_kms(crtc);
80 int id = mdp5_crtc->id;
81 uint32_t i, flush = 0;
82
83 for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
84 struct drm_plane *plane = mdp5_crtc->planes[i];
85 if (plane) {
86 enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
87 flush |= pipe2flush(pipe);
88 }
89 }
90 flush |= mixer2flush(mdp5_crtc->id);
91 flush |= MDP5_CTL_FLUSH_CTL;
92
93 DBG("%s: flush=%08x", mdp5_crtc->name, flush);
94
95 mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
96}
97 70
98static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb) 71static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
99{ 72{
100 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 73 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
101 struct drm_framebuffer *old_fb = mdp5_crtc->fb;
102
103 /* grab reference to incoming scanout fb: */
104 drm_framebuffer_reference(new_fb);
105 mdp5_crtc->base.primary->fb = new_fb;
106 mdp5_crtc->fb = new_fb;
107 74
108 if (old_fb) 75 DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
109 drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb); 76 mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
110} 77}
111 78
112/* unlike update_fb(), take a ref to the new scanout fb *before* updating 79/*
113 * plane, then call this. Needed to ensure we don't unref the buffer that 80 * flush updates, to make sure hw is updated to new scanout fb,
114 * is actually still being scanned out. 81 * so that we can safely queue unref to current fb (ie. next
115 * 82 * vblank we know hw is done w/ previous scanout_fb).
116 * Note that this whole thing goes away with atomic.. since we can defer
117 * calling into driver until rendering is done.
118 */ 83 */
119static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) 84static void crtc_flush_all(struct drm_crtc *crtc)
120{ 85{
121 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 86 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
87 struct drm_plane *plane;
88 uint32_t flush_mask = 0;
122 89
123 /* flush updates, to make sure hw is updated to new scanout fb, 90 /* we could have already released CTL in the disable path: */
124 * so that we can safely queue unref to current fb (ie. next 91 if (!mdp5_crtc->ctl)
125 * vblank we know hw is done w/ previous scanout_fb). 92 return;
126 */
127 crtc_flush(crtc);
128
129 if (mdp5_crtc->scanout_fb)
130 drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
131 mdp5_crtc->scanout_fb);
132 93
133 mdp5_crtc->scanout_fb = fb; 94 drm_atomic_crtc_for_each_plane(plane, crtc) {
95 flush_mask |= mdp5_plane_get_flush(plane);
96 }
97 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
98 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
134 99
135 /* enable vblank to complete flip: */ 100 crtc_flush(crtc, flush_mask);
136 request_pending(crtc, PENDING_FLIP);
137} 101}
138 102
139/* if file!=NULL, this is preclose potential cancel-flip path */ 103/* if file!=NULL, this is preclose potential cancel-flip path */
@@ -142,7 +106,8 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
142 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 106 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
143 struct drm_device *dev = crtc->dev; 107 struct drm_device *dev = crtc->dev;
144 struct drm_pending_vblank_event *event; 108 struct drm_pending_vblank_event *event;
145 unsigned long flags, i; 109 struct drm_plane *plane;
110 unsigned long flags;
146 111
147 spin_lock_irqsave(&dev->event_lock, flags); 112 spin_lock_irqsave(&dev->event_lock, flags);
148 event = mdp5_crtc->event; 113 event = mdp5_crtc->event;
@@ -153,50 +118,22 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
153 */ 118 */
154 if (!file || (event->base.file_priv == file)) { 119 if (!file || (event->base.file_priv == file)) {
155 mdp5_crtc->event = NULL; 120 mdp5_crtc->event = NULL;
121 DBG("%s: send event: %p", mdp5_crtc->name, event);
156 drm_send_vblank_event(dev, mdp5_crtc->id, event); 122 drm_send_vblank_event(dev, mdp5_crtc->id, event);
157 } 123 }
158 } 124 }
159 spin_unlock_irqrestore(&dev->event_lock, flags); 125 spin_unlock_irqrestore(&dev->event_lock, flags);
160 126
161 for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) { 127 drm_atomic_crtc_for_each_plane(plane, crtc) {
162 struct drm_plane *plane = mdp5_crtc->planes[i]; 128 mdp5_plane_complete_flip(plane);
163 if (plane)
164 mdp5_plane_complete_flip(plane);
165 } 129 }
166} 130}
167 131
168static void pageflip_cb(struct msm_fence_cb *cb)
169{
170 struct mdp5_crtc *mdp5_crtc =
171 container_of(cb, struct mdp5_crtc, pageflip_cb);
172 struct drm_crtc *crtc = &mdp5_crtc->base;
173 struct drm_framebuffer *fb = mdp5_crtc->fb;
174
175 if (!fb)
176 return;
177
178 drm_framebuffer_reference(fb);
179 mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
180 update_scanout(crtc, fb);
181}
182
183static void unref_fb_worker(struct drm_flip_work *work, void *val)
184{
185 struct mdp5_crtc *mdp5_crtc =
186 container_of(work, struct mdp5_crtc, unref_fb_work);
187 struct drm_device *dev = mdp5_crtc->base.dev;
188
189 mutex_lock(&dev->mode_config.mutex);
190 drm_framebuffer_unreference(val);
191 mutex_unlock(&dev->mode_config.mutex);
192}
193
194static void mdp5_crtc_destroy(struct drm_crtc *crtc) 132static void mdp5_crtc_destroy(struct drm_crtc *crtc)
195{ 133{
196 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 134 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
197 135
198 drm_crtc_cleanup(crtc); 136 drm_crtc_cleanup(crtc);
199 drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
200 137
201 kfree(mdp5_crtc); 138 kfree(mdp5_crtc);
202} 139}
@@ -214,6 +151,8 @@ static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
214 mdp5_enable(mdp5_kms); 151 mdp5_enable(mdp5_kms);
215 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); 152 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
216 } else { 153 } else {
154 /* set STAGE_UNUSED for all layers */
155 mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
217 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); 156 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
218 mdp5_disable(mdp5_kms); 157 mdp5_disable(mdp5_kms);
219 } 158 }
@@ -228,54 +167,78 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
228 return true; 167 return true;
229} 168}
230 169
170/*
171 * blend_setup() - blend all the planes of a CRTC
172 *
173 * When border is enabled, the border color will ALWAYS be the base layer.
174 * Therefore, the first plane (private RGB pipe) will start at STAGE0.
175 * If disabled, the first plane starts at STAGE_BASE.
176 *
177 * Note:
178 * Border is not enabled here because the private plane is exactly
179 * the CRTC resolution.
180 */
231static void blend_setup(struct drm_crtc *crtc) 181static void blend_setup(struct drm_crtc *crtc)
232{ 182{
233 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 183 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
234 struct mdp5_kms *mdp5_kms = get_kms(crtc); 184 struct mdp5_kms *mdp5_kms = get_kms(crtc);
235 int id = mdp5_crtc->id; 185 struct drm_plane *plane;
186 const struct mdp5_cfg_hw *hw_cfg;
187 uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
188 unsigned long flags;
189#define blender(stage) ((stage) - STAGE_BASE)
236 190
237 /* 191 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
238 * Hard-coded setup for now until I figure out how the
239 * layer-mixer works
240 */
241 192
242 /* LM[id]: */ 193 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
243 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id), 194
244 MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA); 195 /* ctl could be released already when we are shutting down: */
245 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0), 196 if (!mdp5_crtc->ctl)
246 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | 197 goto out;
247 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
248 MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
249 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
250 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
251
252 /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
253 * we want to be setting CTL[m].LAYER[n]. Not sure what the
254 * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
255 * used when chaining up mixers for high resolution displays?
256 */
257 198
258 /* CTL[id]: */ 199 drm_atomic_crtc_for_each_plane(plane, crtc) {
259 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0), 200 enum mdp_mixer_stage_id stage =
260 MDP5_CTL_LAYER_REG_RGB0(STAGE0) | 201 to_mdp5_plane_state(plane->state)->stage;
261 MDP5_CTL_LAYER_REG_BORDER_COLOR); 202
262 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0); 203 /*
263 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0); 204 * Note: This cannot happen with current implementation but
264 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0); 205 * we need to check this condition once z property is added
265 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0); 206 */
207 BUG_ON(stage > hw_cfg->lm.nb_stages);
208
209 /* LM */
210 mdp5_write(mdp5_kms,
211 REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
212 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
213 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
214 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
215 blender(stage)), 0xff);
216 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
217 blender(stage)), 0x00);
218 /* CTL */
219 blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
220 DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
221 pipe2name(mdp5_plane_pipe(plane)), stage);
222 }
223
224 DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
225 mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
226
227out:
228 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
266} 229}
267 230
268static int mdp5_crtc_mode_set(struct drm_crtc *crtc, 231static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
269 struct drm_display_mode *mode,
270 struct drm_display_mode *adjusted_mode,
271 int x, int y,
272 struct drm_framebuffer *old_fb)
273{ 232{
274 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 233 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
275 struct mdp5_kms *mdp5_kms = get_kms(crtc); 234 struct mdp5_kms *mdp5_kms = get_kms(crtc);
276 int ret; 235 unsigned long flags;
236 struct drm_display_mode *mode;
277 237
278 mode = adjusted_mode; 238 if (WARN_ON(!crtc->state))
239 return;
240
241 mode = &crtc->state->adjusted_mode;
279 242
280 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 243 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
281 mdp5_crtc->name, mode->base.id, mode->name, 244 mdp5_crtc->name, mode->base.id, mode->name,
@@ -286,28 +249,11 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
286 mode->vsync_end, mode->vtotal, 249 mode->vsync_end, mode->vtotal,
287 mode->type, mode->flags); 250 mode->type, mode->flags);
288 251
289 /* grab extra ref for update_scanout() */ 252 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
290 drm_framebuffer_reference(crtc->primary->fb); 253 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
291
292 ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->primary->fb,
293 0, 0, mode->hdisplay, mode->vdisplay,
294 x << 16, y << 16,
295 mode->hdisplay << 16, mode->vdisplay << 16);
296 if (ret) {
297 drm_framebuffer_unreference(crtc->primary->fb);
298 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
299 mdp5_crtc->name, ret);
300 return ret;
301 }
302
303 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
304 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) | 254 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
305 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); 255 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
306 256 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
307 update_fb(crtc, crtc->primary->fb);
308 update_scanout(crtc, crtc->primary->fb);
309
310 return 0;
311} 257}
312 258
313static void mdp5_crtc_prepare(struct drm_crtc *crtc) 259static void mdp5_crtc_prepare(struct drm_crtc *crtc)
@@ -321,66 +267,119 @@ static void mdp5_crtc_prepare(struct drm_crtc *crtc)
321 267
322static void mdp5_crtc_commit(struct drm_crtc *crtc) 268static void mdp5_crtc_commit(struct drm_crtc *crtc)
323{ 269{
270 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
271 DBG("%s", mdp5_crtc->name);
324 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 272 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
325 crtc_flush(crtc); 273 crtc_flush_all(crtc);
326 /* drop the ref to mdp clk's that we got in prepare: */ 274 /* drop the ref to mdp clk's that we got in prepare: */
327 mdp5_disable(get_kms(crtc)); 275 mdp5_disable(get_kms(crtc));
328} 276}
329 277
330static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 278static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
331 struct drm_framebuffer *old_fb) 279{
280}
281
282struct plane_state {
283 struct drm_plane *plane;
284 struct mdp5_plane_state *state;
285};
286
287static int pstate_cmp(const void *a, const void *b)
288{
289 struct plane_state *pa = (struct plane_state *)a;
290 struct plane_state *pb = (struct plane_state *)b;
291 return pa->state->zpos - pb->state->zpos;
292}
293
294static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
295 struct drm_crtc_state *state)
332{ 296{
333 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 297 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
334 struct drm_plane *plane = mdp5_crtc->plane; 298 struct mdp5_kms *mdp5_kms = get_kms(crtc);
335 struct drm_display_mode *mode = &crtc->mode; 299 struct drm_plane *plane;
336 int ret; 300 struct drm_device *dev = crtc->dev;
337 301 struct plane_state pstates[STAGE3 + 1];
338 /* grab extra ref for update_scanout() */ 302 int cnt = 0, i;
339 drm_framebuffer_reference(crtc->primary->fb); 303
340 304 DBG("%s: check", mdp5_crtc->name);
341 ret = mdp5_plane_mode_set(plane, crtc, crtc->primary->fb, 305
342 0, 0, mode->hdisplay, mode->vdisplay, 306 if (mdp5_crtc->event) {
343 x << 16, y << 16, 307 dev_err(dev->dev, "already pending flip!\n");
344 mode->hdisplay << 16, mode->vdisplay << 16); 308 return -EBUSY;
345 if (ret) {
346 drm_framebuffer_unreference(crtc->primary->fb);
347 return ret;
348 } 309 }
349 310
350 update_fb(crtc, crtc->primary->fb); 311 /* request a free CTL, if none is already allocated for this CRTC */
351 update_scanout(crtc, crtc->primary->fb); 312 if (state->enable && !mdp5_crtc->ctl) {
313 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
314 if (WARN_ON(!mdp5_crtc->ctl))
315 return -EINVAL;
316 }
317
318 /* verify that there are not too many planes attached to crtc
319 * and that we don't have conflicting mixer stages:
320 */
321 drm_atomic_crtc_state_for_each_plane(plane, state) {
322 struct drm_plane_state *pstate;
323
324 if (cnt >= ARRAY_SIZE(pstates)) {
325 dev_err(dev->dev, "too many planes!\n");
326 return -EINVAL;
327 }
328
329 pstate = state->state->plane_states[drm_plane_index(plane)];
330
331 /* plane might not have changed, in which case take
332 * current state:
333 */
334 if (!pstate)
335 pstate = plane->state;
336
337 pstates[cnt].plane = plane;
338 pstates[cnt].state = to_mdp5_plane_state(pstate);
339
340 cnt++;
341 }
342
343 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
344
345 for (i = 0; i < cnt; i++) {
346 pstates[i].state->stage = STAGE_BASE + i;
347 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
348 pipe2name(mdp5_plane_pipe(pstates[i].plane)),
349 pstates[i].state->stage);
350 }
352 351
353 return 0; 352 return 0;
354} 353}
355 354
356static void mdp5_crtc_load_lut(struct drm_crtc *crtc) 355static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
357{ 356{
357 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
358 DBG("%s: begin", mdp5_crtc->name);
358} 359}
359 360
360static int mdp5_crtc_page_flip(struct drm_crtc *crtc, 361static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
361 struct drm_framebuffer *new_fb,
362 struct drm_pending_vblank_event *event,
363 uint32_t page_flip_flags)
364{ 362{
365 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 363 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
366 struct drm_device *dev = crtc->dev; 364 struct drm_device *dev = crtc->dev;
367 struct drm_gem_object *obj;
368 unsigned long flags; 365 unsigned long flags;
369 366
370 if (mdp5_crtc->event) { 367 DBG("%s: flush", mdp5_crtc->name);
371 dev_err(dev->dev, "already pending flip!\n");
372 return -EBUSY;
373 }
374 368
375 obj = msm_framebuffer_bo(new_fb, 0); 369 WARN_ON(mdp5_crtc->event);
376 370
377 spin_lock_irqsave(&dev->event_lock, flags); 371 spin_lock_irqsave(&dev->event_lock, flags);
378 mdp5_crtc->event = event; 372 mdp5_crtc->event = crtc->state->event;
379 spin_unlock_irqrestore(&dev->event_lock, flags); 373 spin_unlock_irqrestore(&dev->event_lock, flags);
380 374
381 update_fb(crtc, new_fb); 375 blend_setup(crtc);
376 crtc_flush_all(crtc);
377 request_pending(crtc, PENDING_FLIP);
382 378
383 return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb); 379 if (mdp5_crtc->ctl && !crtc->state->enable) {
380 mdp5_ctl_release(mdp5_crtc->ctl);
381 mdp5_crtc->ctl = NULL;
382 }
384} 383}
385 384
386static int mdp5_crtc_set_property(struct drm_crtc *crtc, 385static int mdp5_crtc_set_property(struct drm_crtc *crtc,
@@ -391,27 +390,33 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc,
391} 390}
392 391
393static const struct drm_crtc_funcs mdp5_crtc_funcs = { 392static const struct drm_crtc_funcs mdp5_crtc_funcs = {
394 .set_config = drm_crtc_helper_set_config, 393 .set_config = drm_atomic_helper_set_config,
395 .destroy = mdp5_crtc_destroy, 394 .destroy = mdp5_crtc_destroy,
396 .page_flip = mdp5_crtc_page_flip, 395 .page_flip = drm_atomic_helper_page_flip,
397 .set_property = mdp5_crtc_set_property, 396 .set_property = mdp5_crtc_set_property,
397 .reset = drm_atomic_helper_crtc_reset,
398 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
399 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
398}; 400};
399 401
400static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { 402static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
401 .dpms = mdp5_crtc_dpms, 403 .dpms = mdp5_crtc_dpms,
402 .mode_fixup = mdp5_crtc_mode_fixup, 404 .mode_fixup = mdp5_crtc_mode_fixup,
403 .mode_set = mdp5_crtc_mode_set, 405 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
406 .mode_set = drm_helper_crtc_mode_set,
407 .mode_set_base = drm_helper_crtc_mode_set_base,
404 .prepare = mdp5_crtc_prepare, 408 .prepare = mdp5_crtc_prepare,
405 .commit = mdp5_crtc_commit, 409 .commit = mdp5_crtc_commit,
406 .mode_set_base = mdp5_crtc_mode_set_base,
407 .load_lut = mdp5_crtc_load_lut, 410 .load_lut = mdp5_crtc_load_lut,
411 .atomic_check = mdp5_crtc_atomic_check,
412 .atomic_begin = mdp5_crtc_atomic_begin,
413 .atomic_flush = mdp5_crtc_atomic_flush,
408}; 414};
409 415
410static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) 416static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
411{ 417{
412 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank); 418 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
413 struct drm_crtc *crtc = &mdp5_crtc->base; 419 struct drm_crtc *crtc = &mdp5_crtc->base;
414 struct msm_drm_private *priv = crtc->dev->dev_private;
415 unsigned pending; 420 unsigned pending;
416 421
417 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank); 422 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
@@ -420,16 +425,14 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
420 425
421 if (pending & PENDING_FLIP) { 426 if (pending & PENDING_FLIP) {
422 complete_flip(crtc, NULL); 427 complete_flip(crtc, NULL);
423 drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
424 } 428 }
425} 429}
426 430
427static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) 431static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
428{ 432{
429 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err); 433 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
430 struct drm_crtc *crtc = &mdp5_crtc->base; 434
431 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus); 435 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
432 crtc_flush(crtc);
433} 436}
434 437
435uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc) 438uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
@@ -450,10 +453,9 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
450{ 453{
451 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 454 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
452 struct mdp5_kms *mdp5_kms = get_kms(crtc); 455 struct mdp5_kms *mdp5_kms = get_kms(crtc);
453 static const enum mdp5_intfnum intfnum[] = { 456 uint32_t flush_mask = 0;
454 INTF0, INTF1, INTF2, INTF3,
455 };
456 uint32_t intf_sel; 457 uint32_t intf_sel;
458 unsigned long flags;
457 459
458 /* now that we know what irq's we want: */ 460 /* now that we know what irq's we want: */
459 mdp5_crtc->err.irqmask = intf2err(intf); 461 mdp5_crtc->err.irqmask = intf2err(intf);
@@ -463,6 +465,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
463 if (!mdp5_kms) 465 if (!mdp5_kms)
464 return; 466 return;
465 467
468 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
466 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); 469 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
467 470
468 switch (intf) { 471 switch (intf) {
@@ -487,45 +490,25 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
487 break; 490 break;
488 } 491 }
489 492
490 blend_setup(crtc); 493 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
494 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
491 495
492 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel); 496 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
497 mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
498 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
499 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
493 500
494 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); 501 crtc_flush(crtc, flush_mask);
495 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
496 MDP5_CTL_OP_MODE(MODE_NONE) |
497 MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
498
499 crtc_flush(crtc);
500} 502}
501 503
502static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id, 504int mdp5_crtc_get_lm(struct drm_crtc *crtc)
503 struct drm_plane *plane)
504{ 505{
505 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 506 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
506 507
507 BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes)); 508 if (WARN_ON(!crtc))
509 return -EINVAL;
508 510
509 if (mdp5_crtc->planes[pipe_id] == plane) 511 return mdp5_crtc->lm;
510 return;
511
512 mdp5_crtc->planes[pipe_id] = plane;
513 blend_setup(crtc);
514 if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
515 crtc_flush(crtc);
516}
517
518void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
519{
520 set_attach(crtc, mdp5_plane_pipe(plane), plane);
521}
522
523void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
524{
525 /* don't actually detatch our primary plane: */
526 if (to_mdp5_crtc(crtc)->plane == plane)
527 return;
528 set_attach(crtc, mdp5_plane_pipe(plane), NULL);
529} 512}
530 513
531/* initialize crtc */ 514/* initialize crtc */
@@ -534,18 +517,17 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
534{ 517{
535 struct drm_crtc *crtc = NULL; 518 struct drm_crtc *crtc = NULL;
536 struct mdp5_crtc *mdp5_crtc; 519 struct mdp5_crtc *mdp5_crtc;
537 int ret;
538 520
539 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL); 521 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
540 if (!mdp5_crtc) { 522 if (!mdp5_crtc)
541 ret = -ENOMEM; 523 return ERR_PTR(-ENOMEM);
542 goto fail;
543 }
544 524
545 crtc = &mdp5_crtc->base; 525 crtc = &mdp5_crtc->base;
546 526
547 mdp5_crtc->plane = plane;
548 mdp5_crtc->id = id; 527 mdp5_crtc->id = id;
528 mdp5_crtc->lm = GET_LM_ID(id);
529
530 spin_lock_init(&mdp5_crtc->lm_lock);
549 531
550 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; 532 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
551 mdp5_crtc->err.irq = mdp5_crtc_err_irq; 533 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
@@ -553,23 +535,11 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
553 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d", 535 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
554 pipe2name(mdp5_plane_pipe(plane)), id); 536 pipe2name(mdp5_plane_pipe(plane)), id);
555 537
556 ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16,
557 "unref fb", unref_fb_worker);
558 if (ret)
559 goto fail;
560
561 INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
562
563 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs); 538 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
564 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); 539 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
540 plane->crtc = crtc;
565 541
566 mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base); 542 mdp5_plane_install_properties(plane, &crtc->base);
567 543
568 return crtc; 544 return crtc;
569
570fail:
571 if (crtc)
572 mdp5_crtc_destroy(crtc);
573
574 return ERR_PTR(ret);
575} 545}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
new file mode 100644
index 000000000000..dea4505ac963
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -0,0 +1,322 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "mdp5_kms.h"
15#include "mdp5_ctl.h"
16
17/*
18 * CTL - MDP Control Pool Manager
19 *
20 * Controls are shared between all CRTCs.
21 *
22 * They are intended to be used for data path configuration.
23 * The top level register programming describes the complete data path for
24 * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
25 *
26 * Hardware capabilities determine the number of concurrent data paths
27 *
28 * In certain use cases (high-resolution dual pipe), one single CTL can be
29 * shared across multiple CRTCs.
30 *
31 * Because the number of CTLs can be less than the number of CRTCs,
32 * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
33 * requested by the client (in mdp5_crtc_mode_set()).
34 */
35
36struct mdp5_ctl {
37 struct mdp5_ctl_manager *ctlm;
38
39 u32 id;
40
41 /* whether this CTL has been allocated or not: */
42 bool busy;
43
44 /* memory output connection (@see mdp5_ctl_mode): */
45 u32 mode;
46
47 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
48 spinlock_t hw_lock;
49 u32 reg_offset;
50
51 /* flush mask used to commit CTL registers */
52 u32 flush_mask;
53
54 bool cursor_on;
55
56 struct drm_crtc *crtc;
57};
58
59struct mdp5_ctl_manager {
60 struct drm_device *dev;
61
62 /* number of CTL / Layer Mixers in this hw config: */
63 u32 nlm;
64 u32 nctl;
65
66 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
67 spinlock_t pool_lock;
68 struct mdp5_ctl ctls[MAX_CTL];
69};
70
71static inline
72struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
73{
74 struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
75
76 return to_mdp5_kms(to_mdp_kms(priv->kms));
77}
78
79static inline
80void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
81{
82 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
83
84 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
85 mdp5_write(mdp5_kms, reg, data);
86}
87
88static inline
89u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
90{
91 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
92
93 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
94 return mdp5_read(mdp5_kms, reg);
95}
96
97
98int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf)
99{
100 unsigned long flags;
101 static const enum mdp5_intfnum intfnum[] = {
102 INTF0, INTF1, INTF2, INTF3,
103 };
104
105 spin_lock_irqsave(&ctl->hw_lock, flags);
106 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id),
107 MDP5_CTL_OP_MODE(ctl->mode) |
108 MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
109 spin_unlock_irqrestore(&ctl->hw_lock, flags);
110
111 return 0;
112}
113
114int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
115{
116 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
117 unsigned long flags;
118 u32 blend_cfg;
119 int lm;
120
121 lm = mdp5_crtc_get_lm(ctl->crtc);
122 if (unlikely(WARN_ON(lm < 0))) {
123 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
124 ctl->id, lm);
125 return -EINVAL;
126 }
127
128 spin_lock_irqsave(&ctl->hw_lock, flags);
129
130 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
131
132 if (enable)
133 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
134 else
135 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
136
137 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
138
139 spin_unlock_irqrestore(&ctl->hw_lock, flags);
140
141 ctl->cursor_on = enable;
142
143 return 0;
144}
145
146
147int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
148{
149 unsigned long flags;
150
151 if (ctl->cursor_on)
152 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
153 else
154 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
155
156 spin_lock_irqsave(&ctl->hw_lock, flags);
157 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
158 spin_unlock_irqrestore(&ctl->hw_lock, flags);
159
160 return 0;
161}
162
163int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
164{
165 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
166 unsigned long flags;
167
168 if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
169 int lm = mdp5_crtc_get_lm(ctl->crtc);
170
171 if (unlikely(WARN_ON(lm < 0))) {
172 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
173 ctl->id, lm);
174 return -EINVAL;
175 }
176
177 /* for current targets, cursor bit is the same as LM bit */
178 flush_mask |= mdp_ctl_flush_mask_lm(lm);
179 }
180
181 spin_lock_irqsave(&ctl->hw_lock, flags);
182 ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
183 spin_unlock_irqrestore(&ctl->hw_lock, flags);
184
185 return 0;
186}
187
188u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
189{
190 return ctl->flush_mask;
191}
192
193void mdp5_ctl_release(struct mdp5_ctl *ctl)
194{
195 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
196 unsigned long flags;
197
198 if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
199 dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
200 ctl->id, ctl->busy);
201 return;
202 }
203
204 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
205 ctl->busy = false;
206 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
207
208 DBG("CTL %d released", ctl->id);
209}
210
211/*
212 * mdp5_ctl_request() - CTL dynamic allocation
213 *
214 * Note: Current implementation considers that we can only have one CRTC per CTL
215 *
216 * @return first free CTL
217 */
218struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
219 struct drm_crtc *crtc)
220{
221 struct mdp5_ctl *ctl = NULL;
222 unsigned long flags;
223 int c;
224
225 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
226
227 for (c = 0; c < ctl_mgr->nctl; c++)
228 if (!ctl_mgr->ctls[c].busy)
229 break;
230
231 if (unlikely(c >= ctl_mgr->nctl)) {
232 dev_err(ctl_mgr->dev->dev, "No more CTL available!");
233 goto unlock;
234 }
235
236 ctl = &ctl_mgr->ctls[c];
237
238 ctl->crtc = crtc;
239 ctl->busy = true;
240 DBG("CTL %d allocated", ctl->id);
241
242unlock:
243 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
244 return ctl;
245}
246
247void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
248{
249 unsigned long flags;
250 int c;
251
252 for (c = 0; c < ctl_mgr->nctl; c++) {
253 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
254
255 spin_lock_irqsave(&ctl->hw_lock, flags);
256 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
257 spin_unlock_irqrestore(&ctl->hw_lock, flags);
258 }
259}
260
261void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
262{
263 kfree(ctl_mgr);
264}
265
266struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
267 void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
268{
269 struct mdp5_ctl_manager *ctl_mgr;
270 const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
271 unsigned long flags;
272 int c, ret;
273
274 ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
275 if (!ctl_mgr) {
276 dev_err(dev->dev, "failed to allocate CTL manager\n");
277 ret = -ENOMEM;
278 goto fail;
279 }
280
281 if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
282 dev_err(dev->dev, "Increase static pool size to at least %d\n",
283 ctl_cfg->count);
284 ret = -ENOSPC;
285 goto fail;
286 }
287
288 /* initialize the CTL manager: */
289 ctl_mgr->dev = dev;
290 ctl_mgr->nlm = hw_cfg->lm.count;
291 ctl_mgr->nctl = ctl_cfg->count;
292 spin_lock_init(&ctl_mgr->pool_lock);
293
294 /* initialize each CTL of the pool: */
295 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
296 for (c = 0; c < ctl_mgr->nctl; c++) {
297 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
298
299 if (WARN_ON(!ctl_cfg->base[c])) {
300 dev_err(dev->dev, "CTL_%d: base is null!\n", c);
301 ret = -EINVAL;
302 goto fail;
303 }
304 ctl->ctlm = ctl_mgr;
305 ctl->id = c;
306 ctl->mode = MODE_NONE;
307 ctl->reg_offset = ctl_cfg->base[c];
308 ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
309 ctl->busy = false;
310 spin_lock_init(&ctl->hw_lock);
311 }
312 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
313 DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
314
315 return ctl_mgr;
316
317fail:
318 if (ctl_mgr)
319 mdp5_ctlm_destroy(ctl_mgr);
320
321 return ERR_PTR(ret);
322}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
new file mode 100644
index 000000000000..1018519b6af2
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -0,0 +1,122 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __MDP5_CTL_H__
15#define __MDP5_CTL_H__
16
17#include "msm_drv.h"
18
19/*
20 * CTL Manager prototypes:
21 * mdp5_ctlm_init() returns a ctlm (CTL Manager) handler,
22 * which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions.
23 */
24struct mdp5_ctl_manager;
25struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
26 void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
27void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
28void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
29
30/*
31 * CTL prototypes:
32 * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
33 * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
34 */
35struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
36
37int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf);
38
39int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable);
40
41/* @blend_cfg: see LM blender config definition below */
42int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
43
44/* @flush_mask: see CTL flush masks definitions below */
45int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
46u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);
47
48void mdp5_ctl_release(struct mdp5_ctl *ctl);
49
50/*
51 * blend_cfg (LM blender config):
52 *
53 * The function below allows the caller of mdp5_ctl_blend() to specify how pipes
54 * are being blended according to their stage (z-order), through @blend_cfg arg.
55 */
56static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
57 enum mdp_mixer_stage_id stage)
58{
59 switch (pipe) {
60 case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
61 case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
62 case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
63 case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
64 case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
65 case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
66 case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
67 case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
68 case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
69 case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
70 default: return 0;
71 }
72}
73
74/*
75 * flush_mask (CTL flush masks):
76 *
77 * The following functions allow each DRM entity to get and store
78 * their own flush mask.
79 * Once stored, these masks will then be accessed through each DRM's
80 * interface and used by the caller of mdp5_ctl_commit() to specify
81 * which block(s) need to be flushed through @flush_mask parameter.
82 */
83
84#define MDP5_CTL_FLUSH_CURSOR_DUMMY 0x80000000
85
86static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id)
87{
88 /* TODO: use id once multiple cursor support is present */
89 (void)cursor_id;
90
91 return MDP5_CTL_FLUSH_CURSOR_DUMMY;
92}
93
94static inline u32 mdp_ctl_flush_mask_lm(int lm)
95{
96 switch (lm) {
97 case 0: return MDP5_CTL_FLUSH_LM0;
98 case 1: return MDP5_CTL_FLUSH_LM1;
99 case 2: return MDP5_CTL_FLUSH_LM2;
100 case 5: return MDP5_CTL_FLUSH_LM5;
101 default: return 0;
102 }
103}
104
105static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
106{
107 switch (pipe) {
108 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
109 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
110 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
111 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
112 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
113 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
114 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
115 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
116 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
117 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
118 default: return 0;
119 }
120}
121
122#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index edec7bfaa952..0254bfdeb92f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -24,6 +24,7 @@ struct mdp5_encoder {
24 struct drm_encoder base; 24 struct drm_encoder base;
25 int intf; 25 int intf;
26 enum mdp5_intf intf_id; 26 enum mdp5_intf intf_id;
27 spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
27 bool enabled; 28 bool enabled;
28 uint32_t bsc; 29 uint32_t bsc;
29}; 30};
@@ -115,6 +116,7 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
115 struct mdp5_kms *mdp5_kms = get_kms(encoder); 116 struct mdp5_kms *mdp5_kms = get_kms(encoder);
116 int intf = mdp5_encoder->intf; 117 int intf = mdp5_encoder->intf;
117 bool enabled = (mode == DRM_MODE_DPMS_ON); 118 bool enabled = (mode == DRM_MODE_DPMS_ON);
119 unsigned long flags;
118 120
119 DBG("mode=%d", mode); 121 DBG("mode=%d", mode);
120 122
@@ -123,9 +125,24 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
123 125
124 if (enabled) { 126 if (enabled) {
125 bs_set(mdp5_encoder, 1); 127 bs_set(mdp5_encoder, 1);
128 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
126 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); 129 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
130 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
127 } else { 131 } else {
132 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
128 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0); 133 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
134 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
135
136 /*
137 * Wait for a vsync so we know the ENABLE=0 latched before
138 * the (connector) source of the vsync's gets disabled,
139 * otherwise we end up in a funny state if we re-enable
140 * before the disable latches, which results that some of
141 * the settings changes for the new modeset (like new
142 * scanout buffer) don't latch properly..
143 */
144 mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf));
145
129 bs_set(mdp5_encoder, 0); 146 bs_set(mdp5_encoder, 0);
130 } 147 }
131 148
@@ -150,6 +167,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
150 uint32_t display_v_start, display_v_end; 167 uint32_t display_v_start, display_v_end;
151 uint32_t hsync_start_x, hsync_end_x; 168 uint32_t hsync_start_x, hsync_end_x;
152 uint32_t format; 169 uint32_t format;
170 unsigned long flags;
153 171
154 mode = adjusted_mode; 172 mode = adjusted_mode;
155 173
@@ -180,6 +198,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
180 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; 198 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
181 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; 199 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
182 200
201 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
202
183 mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf), 203 mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
184 MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) | 204 MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
185 MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal)); 205 MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
@@ -201,6 +221,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
201 mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0); 221 mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
202 mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format); 222 mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
203 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */ 223 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
224
225 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
204} 226}
205 227
206static void mdp5_encoder_prepare(struct drm_encoder *encoder) 228static void mdp5_encoder_prepare(struct drm_encoder *encoder)
@@ -242,6 +264,8 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
242 mdp5_encoder->intf_id = intf_id; 264 mdp5_encoder->intf_id = intf_id;
243 encoder = &mdp5_encoder->base; 265 encoder = &mdp5_encoder->base;
244 266
267 spin_lock_init(&mdp5_encoder->intf_lock);
268
245 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, 269 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
246 DRM_MODE_ENCODER_TMDS); 270 DRM_MODE_ENCODER_TMDS);
247 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); 271 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index f2b985bc2adf..70ac81edd40f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -15,6 +15,8 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/irqdomain.h>
19#include <linux/irq.h>
18 20
19#include "msm_drv.h" 21#include "msm_drv.h"
20#include "mdp5_kms.h" 22#include "mdp5_kms.h"
@@ -88,11 +90,17 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
88 90
89 VERB("intr=%08x", intr); 91 VERB("intr=%08x", intr);
90 92
91 if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) 93 if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) {
92 mdp5_irq_mdp(mdp_kms); 94 mdp5_irq_mdp(mdp_kms);
95 intr &= ~MDP5_HW_INTR_STATUS_INTR_MDP;
96 }
93 97
94 if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI) 98 while (intr) {
95 hdmi_irq(0, mdp5_kms->hdmi); 99 irq_hw_number_t hwirq = fls(intr) - 1;
100 generic_handle_irq(irq_find_mapping(
101 mdp5_kms->irqcontroller.domain, hwirq));
102 intr &= ~(1 << hwirq);
103 }
96 104
97 return IRQ_HANDLED; 105 return IRQ_HANDLED;
98} 106}
@@ -109,3 +117,82 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
109 mdp_update_vblank_mask(to_mdp_kms(kms), 117 mdp_update_vblank_mask(to_mdp_kms(kms),
110 mdp5_crtc_vblank(crtc), false); 118 mdp5_crtc_vblank(crtc), false);
111} 119}
120
121/*
122 * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
123 * can register to get their irq's delivered
124 */
125
126#define VALID_IRQS (MDP5_HW_INTR_STATUS_INTR_DSI0 | \
127 MDP5_HW_INTR_STATUS_INTR_DSI1 | \
128 MDP5_HW_INTR_STATUS_INTR_HDMI | \
129 MDP5_HW_INTR_STATUS_INTR_EDP)
130
131static void mdp5_hw_mask_irq(struct irq_data *irqd)
132{
133 struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
134 smp_mb__before_atomic();
135 clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
136 smp_mb__after_atomic();
137}
138
139static void mdp5_hw_unmask_irq(struct irq_data *irqd)
140{
141 struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
142 smp_mb__before_atomic();
143 set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
144 smp_mb__after_atomic();
145}
146
147static struct irq_chip mdp5_hw_irq_chip = {
148 .name = "mdp5",
149 .irq_mask = mdp5_hw_mask_irq,
150 .irq_unmask = mdp5_hw_unmask_irq,
151};
152
153static int mdp5_hw_irqdomain_map(struct irq_domain *d,
154 unsigned int irq, irq_hw_number_t hwirq)
155{
156 struct mdp5_kms *mdp5_kms = d->host_data;
157
158 if (!(VALID_IRQS & (1 << hwirq)))
159 return -EPERM;
160
161 irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
162 irq_set_chip_data(irq, mdp5_kms);
163 set_irq_flags(irq, IRQF_VALID);
164
165 return 0;
166}
167
168static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
169 .map = mdp5_hw_irqdomain_map,
170 .xlate = irq_domain_xlate_onecell,
171};
172
173
174int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
175{
176 struct device *dev = mdp5_kms->dev->dev;
177 struct irq_domain *d;
178
179 d = irq_domain_add_linear(dev->of_node, 32,
180 &mdp5_hw_irqdomain_ops, mdp5_kms);
181 if (!d) {
182 dev_err(dev, "mdp5 irq domain add failed\n");
183 return -ENXIO;
184 }
185
186 mdp5_kms->irqcontroller.enabled_mask = 0;
187 mdp5_kms->irqcontroller.domain = d;
188
189 return 0;
190}
191
192void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
193{
194 if (mdp5_kms->irqcontroller.domain) {
195 irq_domain_remove(mdp5_kms->irqcontroller.domain);
196 mdp5_kms->irqcontroller.domain = NULL;
197 }
198}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 31a2c6331a1d..a11f1b80c488 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -24,145 +25,11 @@ static const char *iommu_ports[] = {
24 "mdp_0", 25 "mdp_0",
25}; 26};
26 27
27static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
28
29const struct mdp5_config *mdp5_cfg;
30
31static const struct mdp5_config msm8x74_config = {
32 .name = "msm8x74",
33 .ctl = {
34 .count = 5,
35 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
36 },
37 .pipe_vig = {
38 .count = 3,
39 .base = { 0x01200, 0x01600, 0x01a00 },
40 },
41 .pipe_rgb = {
42 .count = 3,
43 .base = { 0x01e00, 0x02200, 0x02600 },
44 },
45 .pipe_dma = {
46 .count = 2,
47 .base = { 0x02a00, 0x02e00 },
48 },
49 .lm = {
50 .count = 5,
51 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
52 },
53 .dspp = {
54 .count = 3,
55 .base = { 0x04600, 0x04a00, 0x04e00 },
56 },
57 .ad = {
58 .count = 2,
59 .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
60 },
61 .intf = {
62 .count = 4,
63 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
64 },
65};
66
67static const struct mdp5_config apq8084_config = {
68 .name = "apq8084",
69 .ctl = {
70 .count = 5,
71 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
72 },
73 .pipe_vig = {
74 .count = 4,
75 .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
76 },
77 .pipe_rgb = {
78 .count = 4,
79 .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
80 },
81 .pipe_dma = {
82 .count = 2,
83 .base = { 0x03200, 0x03600 },
84 },
85 .lm = {
86 .count = 6,
87 .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
88 },
89 .dspp = {
90 .count = 4,
91 .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
92
93 },
94 .ad = {
95 .count = 3,
96 .base = { 0x13500, 0x13700, 0x13900 },
97 },
98 .intf = {
99 .count = 5,
100 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
101 },
102};
103
104struct mdp5_config_entry {
105 int revision;
106 const struct mdp5_config *config;
107};
108
109static const struct mdp5_config_entry mdp5_configs[] = {
110 { .revision = 0, .config = &msm8x74_config },
111 { .revision = 2, .config = &msm8x74_config },
112 { .revision = 3, .config = &apq8084_config },
113};
114
115static int mdp5_select_hw_cfg(struct msm_kms *kms)
116{
117 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
118 struct drm_device *dev = mdp5_kms->dev;
119 uint32_t version, major, minor;
120 int i, ret = 0;
121
122 mdp5_enable(mdp5_kms);
123 version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
124 mdp5_disable(mdp5_kms);
125
126 major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
127 minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
128
129 DBG("found MDP5 version v%d.%d", major, minor);
130
131 if (major != 1) {
132 dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
133 major, minor);
134 ret = -ENXIO;
135 goto out;
136 }
137
138 mdp5_kms->rev = minor;
139
140 /* only after mdp5_cfg global pointer's init can we access the hw */
141 for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) {
142 if (mdp5_configs[i].revision != minor)
143 continue;
144 mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config;
145 break;
146 }
147 if (unlikely(!mdp5_kms->hw_cfg)) {
148 dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
149 major, minor);
150 ret = -ENXIO;
151 goto out;
152 }
153
154 DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name);
155
156 return 0;
157out:
158 return ret;
159}
160
161static int mdp5_hw_init(struct msm_kms *kms) 28static int mdp5_hw_init(struct msm_kms *kms)
162{ 29{
163 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 30 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
164 struct drm_device *dev = mdp5_kms->dev; 31 struct drm_device *dev = mdp5_kms->dev;
165 int i; 32 unsigned long flags;
166 33
167 pm_runtime_get_sync(dev->dev); 34 pm_runtime_get_sync(dev->dev);
168 35
@@ -190,10 +57,11 @@ static int mdp5_hw_init(struct msm_kms *kms)
190 * care. 57 * care.
191 */ 58 */
192 59
60 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
193 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); 61 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
62 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
194 63
195 for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++) 64 mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
196 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0);
197 65
198 pm_runtime_put_sync(dev->dev); 66 pm_runtime_put_sync(dev->dev);
199 67
@@ -221,10 +89,20 @@ static void mdp5_destroy(struct msm_kms *kms)
221 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 89 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
222 struct msm_mmu *mmu = mdp5_kms->mmu; 90 struct msm_mmu *mmu = mdp5_kms->mmu;
223 91
92 mdp5_irq_domain_fini(mdp5_kms);
93
224 if (mmu) { 94 if (mmu) {
225 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); 95 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
226 mmu->funcs->destroy(mmu); 96 mmu->funcs->destroy(mmu);
227 } 97 }
98
99 if (mdp5_kms->ctlm)
100 mdp5_ctlm_destroy(mdp5_kms->ctlm);
101 if (mdp5_kms->smp)
102 mdp5_smp_destroy(mdp5_kms->smp);
103 if (mdp5_kms->cfg)
104 mdp5_cfg_destroy(mdp5_kms->cfg);
105
228 kfree(mdp5_kms); 106 kfree(mdp5_kms);
229} 107}
230 108
@@ -274,17 +152,31 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
274 static const enum mdp5_pipe crtcs[] = { 152 static const enum mdp5_pipe crtcs[] = {
275 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, 153 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
276 }; 154 };
155 static const enum mdp5_pipe pub_planes[] = {
156 SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
157 };
277 struct drm_device *dev = mdp5_kms->dev; 158 struct drm_device *dev = mdp5_kms->dev;
278 struct msm_drm_private *priv = dev->dev_private; 159 struct msm_drm_private *priv = dev->dev_private;
279 struct drm_encoder *encoder; 160 struct drm_encoder *encoder;
161 const struct mdp5_cfg_hw *hw_cfg;
280 int i, ret; 162 int i, ret;
281 163
282 /* construct CRTCs: */ 164 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
283 for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) { 165
166 /* register our interrupt-controller for hdmi/eDP/dsi/etc
167 * to use for irqs routed through mdp:
168 */
169 ret = mdp5_irq_domain_init(mdp5_kms);
170 if (ret)
171 goto fail;
172
173 /* construct CRTCs and their private planes: */
174 for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
284 struct drm_plane *plane; 175 struct drm_plane *plane;
285 struct drm_crtc *crtc; 176 struct drm_crtc *crtc;
286 177
287 plane = mdp5_plane_init(dev, crtcs[i], true); 178 plane = mdp5_plane_init(dev, crtcs[i], true,
179 hw_cfg->pipe_rgb.base[i]);
288 if (IS_ERR(plane)) { 180 if (IS_ERR(plane)) {
289 ret = PTR_ERR(plane); 181 ret = PTR_ERR(plane);
290 dev_err(dev->dev, "failed to construct plane for %s (%d)\n", 182 dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
@@ -302,6 +194,20 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
302 priv->crtcs[priv->num_crtcs++] = crtc; 194 priv->crtcs[priv->num_crtcs++] = crtc;
303 } 195 }
304 196
197 /* Construct public planes: */
198 for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
199 struct drm_plane *plane;
200
201 plane = mdp5_plane_init(dev, pub_planes[i], false,
202 hw_cfg->pipe_vig.base[i]);
203 if (IS_ERR(plane)) {
204 ret = PTR_ERR(plane);
205 dev_err(dev->dev, "failed to construct %s plane: %d\n",
206 pipe2name(pub_planes[i]), ret);
207 goto fail;
208 }
209 }
210
305 /* Construct encoder for HDMI: */ 211 /* Construct encoder for HDMI: */
306 encoder = mdp5_encoder_init(dev, 3, INTF_HDMI); 212 encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
307 if (IS_ERR(encoder)) { 213 if (IS_ERR(encoder)) {
@@ -324,11 +230,12 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
324 priv->encoders[priv->num_encoders++] = encoder; 230 priv->encoders[priv->num_encoders++] = encoder;
325 231
326 /* Construct bridge/connector for HDMI: */ 232 /* Construct bridge/connector for HDMI: */
327 mdp5_kms->hdmi = hdmi_init(dev, encoder); 233 if (priv->hdmi) {
328 if (IS_ERR(mdp5_kms->hdmi)) { 234 ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
329 ret = PTR_ERR(mdp5_kms->hdmi); 235 if (ret) {
330 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); 236 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
331 goto fail; 237 goto fail;
238 }
332 } 239 }
333 240
334 return 0; 241 return 0;
@@ -337,6 +244,21 @@ fail:
337 return ret; 244 return ret;
338} 245}
339 246
247static void read_hw_revision(struct mdp5_kms *mdp5_kms,
248 uint32_t *major, uint32_t *minor)
249{
250 uint32_t version;
251
252 mdp5_enable(mdp5_kms);
253 version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
254 mdp5_disable(mdp5_kms);
255
256 *major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
257 *minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
258
259 DBG("MDP5 version v%d.%d", *major, *minor);
260}
261
340static int get_clk(struct platform_device *pdev, struct clk **clkp, 262static int get_clk(struct platform_device *pdev, struct clk **clkp,
341 const char *name) 263 const char *name)
342{ 264{
@@ -353,10 +275,11 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
353struct msm_kms *mdp5_kms_init(struct drm_device *dev) 275struct msm_kms *mdp5_kms_init(struct drm_device *dev)
354{ 276{
355 struct platform_device *pdev = dev->platformdev; 277 struct platform_device *pdev = dev->platformdev;
356 struct mdp5_platform_config *config = mdp5_get_config(pdev); 278 struct mdp5_cfg *config;
357 struct mdp5_kms *mdp5_kms; 279 struct mdp5_kms *mdp5_kms;
358 struct msm_kms *kms = NULL; 280 struct msm_kms *kms = NULL;
359 struct msm_mmu *mmu; 281 struct msm_mmu *mmu;
282 uint32_t major, minor;
360 int i, ret; 283 int i, ret;
361 284
362 mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL); 285 mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
@@ -366,12 +289,13 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
366 goto fail; 289 goto fail;
367 } 290 }
368 291
292 spin_lock_init(&mdp5_kms->resource_lock);
293
369 mdp_kms_init(&mdp5_kms->base, &kms_funcs); 294 mdp_kms_init(&mdp5_kms->base, &kms_funcs);
370 295
371 kms = &mdp5_kms->base.base; 296 kms = &mdp5_kms->base.base;
372 297
373 mdp5_kms->dev = dev; 298 mdp5_kms->dev = dev;
374 mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
375 299
376 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); 300 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
377 if (IS_ERR(mdp5_kms->mmio)) { 301 if (IS_ERR(mdp5_kms->mmio)) {
@@ -416,24 +340,52 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
416 if (ret) 340 if (ret)
417 goto fail; 341 goto fail;
418 342
419 ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk); 343 /* we need to set a default rate before enabling. Set a safe
344 * rate first, then figure out hw revision, and then set a
345 * more optimal rate:
346 */
347 clk_set_rate(mdp5_kms->src_clk, 200000000);
348
349 read_hw_revision(mdp5_kms, &major, &minor);
420 350
421 ret = mdp5_select_hw_cfg(kms); 351 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
422 if (ret) 352 if (IS_ERR(mdp5_kms->cfg)) {
353 ret = PTR_ERR(mdp5_kms->cfg);
354 mdp5_kms->cfg = NULL;
423 goto fail; 355 goto fail;
356 }
357
358 config = mdp5_cfg_get_config(mdp5_kms->cfg);
359
360 /* TODO: compute core clock rate at runtime */
361 clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
362
363 mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
364 if (IS_ERR(mdp5_kms->smp)) {
365 ret = PTR_ERR(mdp5_kms->smp);
366 mdp5_kms->smp = NULL;
367 goto fail;
368 }
369
370 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
371 if (IS_ERR(mdp5_kms->ctlm)) {
372 ret = PTR_ERR(mdp5_kms->ctlm);
373 mdp5_kms->ctlm = NULL;
374 goto fail;
375 }
424 376
425 /* make sure things are off before attaching iommu (bootloader could 377 /* make sure things are off before attaching iommu (bootloader could
426 * have left things on, in which case we'll start getting faults if 378 * have left things on, in which case we'll start getting faults if
427 * we don't disable): 379 * we don't disable):
428 */ 380 */
429 mdp5_enable(mdp5_kms); 381 mdp5_enable(mdp5_kms);
430 for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++) 382 for (i = 0; i < config->hw->intf.count; i++)
431 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); 383 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
432 mdp5_disable(mdp5_kms); 384 mdp5_disable(mdp5_kms);
433 mdelay(16); 385 mdelay(16);
434 386
435 if (config->iommu) { 387 if (config->platform.iommu) {
436 mmu = msm_iommu_new(&pdev->dev, config->iommu); 388 mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
437 if (IS_ERR(mmu)) { 389 if (IS_ERR(mmu)) {
438 ret = PTR_ERR(mmu); 390 ret = PTR_ERR(mmu);
439 dev_err(dev->dev, "failed to init iommu: %d\n", ret); 391 dev_err(dev->dev, "failed to init iommu: %d\n", ret);
@@ -474,18 +426,3 @@ fail:
474 mdp5_destroy(kms); 426 mdp5_destroy(kms);
475 return ERR_PTR(ret); 427 return ERR_PTR(ret);
476} 428}
477
478static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
479{
480 static struct mdp5_platform_config config = {};
481#ifdef CONFIG_OF
482 /* TODO */
483#endif
484 config.iommu = iommu_domain_alloc(&platform_bus_type);
485 /* TODO hard-coded in downstream mdss, but should it be? */
486 config.max_clk = 200000000;
487 /* TODO get from DT: */
488 config.smp_blk_cnt = 22;
489
490 return &config;
491}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 5bf340dd0f00..dd69c77c0d64 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -21,25 +21,9 @@
21#include "msm_drv.h" 21#include "msm_drv.h"
22#include "msm_kms.h" 22#include "msm_kms.h"
23#include "mdp/mdp_kms.h" 23#include "mdp/mdp_kms.h"
24/* dynamic offsets used by mdp5.xml.h (initialized in mdp5_kms.c) */ 24#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
25#define MDP5_MAX_BASES 8
26struct mdp5_sub_block {
27 int count;
28 uint32_t base[MDP5_MAX_BASES];
29};
30struct mdp5_config {
31 char *name;
32 struct mdp5_sub_block ctl;
33 struct mdp5_sub_block pipe_vig;
34 struct mdp5_sub_block pipe_rgb;
35 struct mdp5_sub_block pipe_dma;
36 struct mdp5_sub_block lm;
37 struct mdp5_sub_block dspp;
38 struct mdp5_sub_block ad;
39 struct mdp5_sub_block intf;
40};
41extern const struct mdp5_config *mdp5_cfg;
42#include "mdp5.xml.h" 25#include "mdp5.xml.h"
26#include "mdp5_ctl.h"
43#include "mdp5_smp.h" 27#include "mdp5_smp.h"
44 28
45struct mdp5_kms { 29struct mdp5_kms {
@@ -47,17 +31,14 @@ struct mdp5_kms {
47 31
48 struct drm_device *dev; 32 struct drm_device *dev;
49 33
50 int rev; 34 struct mdp5_cfg_handler *cfg;
51 const struct mdp5_config *hw_cfg;
52 35
53 /* mapper-id used to request GEM buffer mapped for scanout: */ 36 /* mapper-id used to request GEM buffer mapped for scanout: */
54 int id; 37 int id;
55 struct msm_mmu *mmu; 38 struct msm_mmu *mmu;
56 39
57 /* for tracking smp allocation amongst pipes: */ 40 struct mdp5_smp *smp;
58 mdp5_smp_state_t smp_state; 41 struct mdp5_ctl_manager *ctlm;
59 struct mdp5_client_smp_state smp_client_state[CID_MAX];
60 int smp_blk_cnt;
61 42
62 /* io/register spaces: */ 43 /* io/register spaces: */
63 void __iomem *mmio, *vbif; 44 void __iomem *mmio, *vbif;
@@ -71,18 +52,47 @@ struct mdp5_kms {
71 struct clk *lut_clk; 52 struct clk *lut_clk;
72 struct clk *vsync_clk; 53 struct clk *vsync_clk;
73 54
74 struct hdmi *hdmi; 55 /*
56 * lock to protect access to global resources: ie., following register:
57 * - REG_MDP5_DISP_INTF_SEL
58 */
59 spinlock_t resource_lock;
75 60
76 struct mdp_irq error_handler; 61 struct mdp_irq error_handler;
62
63 struct {
64 volatile unsigned long enabled_mask;
65 struct irq_domain *domain;
66 } irqcontroller;
77}; 67};
78#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) 68#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
79 69
80/* platform config data (ie. from DT, or pdata) */ 70struct mdp5_plane_state {
81struct mdp5_platform_config { 71 struct drm_plane_state base;
82 struct iommu_domain *iommu; 72
83 uint32_t max_clk; 73 /* "virtual" zpos.. we calculate actual mixer-stage at runtime
84 int smp_blk_cnt; 74 * by sorting the attached planes by zpos and then assigning
75 * mixer stage lowest to highest. Private planes get default
76 * zpos of zero, and public planes a unique value that is
77 * greater than zero. This way, things work out if a naive
78 * userspace assigns planes to a crtc without setting zpos.
79 */
80 int zpos;
81
82 /* the actual mixer stage, calculated in crtc->atomic_check()
83 * NOTE: this should move to mdp5_crtc_state, when that exists
84 */
85 enum mdp_mixer_stage_id stage;
86
87 /* some additional transactional status to help us know in the
88 * apply path whether we need to update SMP allocation, and
89 * whether current update is still pending:
90 */
91 bool mode_changed : 1;
92 bool pending : 1;
85}; 93};
94#define to_mdp5_plane_state(x) \
95 container_of(x, struct mdp5_plane_state, base)
86 96
87static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) 97static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
88{ 98{
@@ -107,23 +117,6 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
107 return names[pipe]; 117 return names[pipe];
108} 118}
109 119
110static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
111{
112 switch (pipe) {
113 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
114 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
115 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
116 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
117 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
118 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
119 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
120 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
121 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
122 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
123 default: return 0;
124 }
125}
126
127static inline int pipe2nclients(enum mdp5_pipe pipe) 120static inline int pipe2nclients(enum mdp5_pipe pipe)
128{ 121{
129 switch (pipe) { 122 switch (pipe) {
@@ -137,34 +130,6 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
137 } 130 }
138} 131}
139 132
140static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
141{
142 WARN_ON(plane >= pipe2nclients(pipe));
143 switch (pipe) {
144 case SSPP_VIG0: return CID_VIG0_Y + plane;
145 case SSPP_VIG1: return CID_VIG1_Y + plane;
146 case SSPP_VIG2: return CID_VIG2_Y + plane;
147 case SSPP_RGB0: return CID_RGB0;
148 case SSPP_RGB1: return CID_RGB1;
149 case SSPP_RGB2: return CID_RGB2;
150 case SSPP_DMA0: return CID_DMA0_Y + plane;
151 case SSPP_DMA1: return CID_DMA1_Y + plane;
152 case SSPP_VIG3: return CID_VIG3_Y + plane;
153 case SSPP_RGB3: return CID_RGB3;
154 default: return CID_UNUSED;
155 }
156}
157
158static inline uint32_t mixer2flush(int lm)
159{
160 switch (lm) {
161 case 0: return MDP5_CTL_FLUSH_LM0;
162 case 1: return MDP5_CTL_FLUSH_LM1;
163 case 2: return MDP5_CTL_FLUSH_LM2;
164 default: return 0;
165 }
166}
167
168static inline uint32_t intf2err(int intf) 133static inline uint32_t intf2err(int intf)
169{ 134{
170 switch (intf) { 135 switch (intf) {
@@ -197,6 +162,8 @@ void mdp5_irq_uninstall(struct msm_kms *kms);
197irqreturn_t mdp5_irq(struct msm_kms *kms); 162irqreturn_t mdp5_irq(struct msm_kms *kms);
198int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); 163int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
199void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); 164void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
165int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
166void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
200 167
201static inline 168static inline
202uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats, 169uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
@@ -210,26 +177,18 @@ uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
210 177
211void mdp5_plane_install_properties(struct drm_plane *plane, 178void mdp5_plane_install_properties(struct drm_plane *plane,
212 struct drm_mode_object *obj); 179 struct drm_mode_object *obj);
213void mdp5_plane_set_scanout(struct drm_plane *plane, 180uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
214 struct drm_framebuffer *fb);
215int mdp5_plane_mode_set(struct drm_plane *plane,
216 struct drm_crtc *crtc, struct drm_framebuffer *fb,
217 int crtc_x, int crtc_y,
218 unsigned int crtc_w, unsigned int crtc_h,
219 uint32_t src_x, uint32_t src_y,
220 uint32_t src_w, uint32_t src_h);
221void mdp5_plane_complete_flip(struct drm_plane *plane); 181void mdp5_plane_complete_flip(struct drm_plane *plane);
222enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 182enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
223struct drm_plane *mdp5_plane_init(struct drm_device *dev, 183struct drm_plane *mdp5_plane_init(struct drm_device *dev,
224 enum mdp5_pipe pipe, bool private_plane); 184 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
225 185
226uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); 186uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
227 187
188int mdp5_crtc_get_lm(struct drm_crtc *crtc);
228void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); 189void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
229void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, 190void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
230 enum mdp5_intf intf_id); 191 enum mdp5_intf intf_id);
231void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
232void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
233struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, 192struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
234 struct drm_plane *plane, int id); 193 struct drm_plane *plane, int id);
235 194
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index f3daec4412ad..26e5fdea6594 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -17,6 +18,7 @@
17 18
18#include "mdp5_kms.h" 19#include "mdp5_kms.h"
19 20
21#define MAX_PLANE 4
20 22
21struct mdp5_plane { 23struct mdp5_plane {
22 struct drm_plane base; 24 struct drm_plane base;
@@ -24,6 +26,11 @@ struct mdp5_plane {
24 26
25 enum mdp5_pipe pipe; 27 enum mdp5_pipe pipe;
26 28
29 spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
30 uint32_t reg_offset;
31
32 uint32_t flush_mask; /* used to commit pipe registers */
33
27 uint32_t nformats; 34 uint32_t nformats;
28 uint32_t formats[32]; 35 uint32_t formats[32];
29 36
@@ -31,31 +38,24 @@ struct mdp5_plane {
31}; 38};
32#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) 39#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
33 40
41static int mdp5_plane_mode_set(struct drm_plane *plane,
42 struct drm_crtc *crtc, struct drm_framebuffer *fb,
43 int crtc_x, int crtc_y,
44 unsigned int crtc_w, unsigned int crtc_h,
45 uint32_t src_x, uint32_t src_y,
46 uint32_t src_w, uint32_t src_h);
47static void set_scanout_locked(struct drm_plane *plane,
48 struct drm_framebuffer *fb);
49
34static struct mdp5_kms *get_kms(struct drm_plane *plane) 50static struct mdp5_kms *get_kms(struct drm_plane *plane)
35{ 51{
36 struct msm_drm_private *priv = plane->dev->dev_private; 52 struct msm_drm_private *priv = plane->dev->dev_private;
37 return to_mdp5_kms(to_mdp_kms(priv->kms)); 53 return to_mdp5_kms(to_mdp_kms(priv->kms));
38} 54}
39 55
40static int mdp5_plane_update(struct drm_plane *plane, 56static bool plane_enabled(struct drm_plane_state *state)
41 struct drm_crtc *crtc, struct drm_framebuffer *fb,
42 int crtc_x, int crtc_y,
43 unsigned int crtc_w, unsigned int crtc_h,
44 uint32_t src_x, uint32_t src_y,
45 uint32_t src_w, uint32_t src_h)
46{ 57{
47 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 58 return state->fb && state->crtc;
48
49 mdp5_plane->enabled = true;
50
51 if (plane->fb)
52 drm_framebuffer_unreference(plane->fb);
53
54 drm_framebuffer_reference(fb);
55
56 return mdp5_plane_mode_set(plane, crtc, fb,
57 crtc_x, crtc_y, crtc_w, crtc_h,
58 src_x, src_y, src_w, src_h);
59} 59}
60 60
61static int mdp5_plane_disable(struct drm_plane *plane) 61static int mdp5_plane_disable(struct drm_plane *plane)
@@ -63,21 +63,13 @@ static int mdp5_plane_disable(struct drm_plane *plane)
63 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 63 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
64 struct mdp5_kms *mdp5_kms = get_kms(plane); 64 struct mdp5_kms *mdp5_kms = get_kms(plane);
65 enum mdp5_pipe pipe = mdp5_plane->pipe; 65 enum mdp5_pipe pipe = mdp5_plane->pipe;
66 int i;
67 66
68 DBG("%s: disable", mdp5_plane->name); 67 DBG("%s: disable", mdp5_plane->name);
69 68
70 /* update our SMP request to zero (release all our blks): */ 69 if (mdp5_kms) {
71 for (i = 0; i < pipe2nclients(pipe); i++) 70 /* Release the memory we requested earlier from the SMP: */
72 mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0); 71 mdp5_smp_release(mdp5_kms->smp, pipe);
73 72 }
74 /* TODO detaching now will cause us not to get the last
75 * vblank and mdp5_smp_commit().. so other planes will
76 * still see smp blocks previously allocated to us as
77 * in-use..
78 */
79 if (plane->crtc)
80 mdp5_crtc_detach(plane->crtc, plane);
81 73
82 return 0; 74 return 0;
83} 75}
@@ -85,11 +77,8 @@ static int mdp5_plane_disable(struct drm_plane *plane)
85static void mdp5_plane_destroy(struct drm_plane *plane) 77static void mdp5_plane_destroy(struct drm_plane *plane)
86{ 78{
87 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 79 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
88 struct msm_drm_private *priv = plane->dev->dev_private;
89
90 if (priv->kms)
91 mdp5_plane_disable(plane);
92 80
81 drm_plane_helper_disable(plane);
93 drm_plane_cleanup(plane); 82 drm_plane_cleanup(plane);
94 83
95 kfree(mdp5_plane); 84 kfree(mdp5_plane);
@@ -109,109 +98,186 @@ int mdp5_plane_set_property(struct drm_plane *plane,
109 return -EINVAL; 98 return -EINVAL;
110} 99}
111 100
101static void mdp5_plane_reset(struct drm_plane *plane)
102{
103 struct mdp5_plane_state *mdp5_state;
104
105 if (plane->state && plane->state->fb)
106 drm_framebuffer_unreference(plane->state->fb);
107
108 kfree(to_mdp5_plane_state(plane->state));
109 mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
110
111 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
112 mdp5_state->zpos = 0;
113 } else {
114 mdp5_state->zpos = 1 + drm_plane_index(plane);
115 }
116
117 plane->state = &mdp5_state->base;
118}
119
120static struct drm_plane_state *
121mdp5_plane_duplicate_state(struct drm_plane *plane)
122{
123 struct mdp5_plane_state *mdp5_state;
124
125 if (WARN_ON(!plane->state))
126 return NULL;
127
128 mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
129 sizeof(*mdp5_state), GFP_KERNEL);
130
131 if (mdp5_state && mdp5_state->base.fb)
132 drm_framebuffer_reference(mdp5_state->base.fb);
133
134 mdp5_state->mode_changed = false;
135 mdp5_state->pending = false;
136
137 return &mdp5_state->base;
138}
139
140static void mdp5_plane_destroy_state(struct drm_plane *plane,
141 struct drm_plane_state *state)
142{
143 if (state->fb)
144 drm_framebuffer_unreference(state->fb);
145
146 kfree(to_mdp5_plane_state(state));
147}
148
112static const struct drm_plane_funcs mdp5_plane_funcs = { 149static const struct drm_plane_funcs mdp5_plane_funcs = {
113 .update_plane = mdp5_plane_update, 150 .update_plane = drm_atomic_helper_update_plane,
114 .disable_plane = mdp5_plane_disable, 151 .disable_plane = drm_atomic_helper_disable_plane,
115 .destroy = mdp5_plane_destroy, 152 .destroy = mdp5_plane_destroy,
116 .set_property = mdp5_plane_set_property, 153 .set_property = mdp5_plane_set_property,
154 .reset = mdp5_plane_reset,
155 .atomic_duplicate_state = mdp5_plane_duplicate_state,
156 .atomic_destroy_state = mdp5_plane_destroy_state,
117}; 157};
118 158
119void mdp5_plane_set_scanout(struct drm_plane *plane, 159static int mdp5_plane_prepare_fb(struct drm_plane *plane,
120 struct drm_framebuffer *fb) 160 struct drm_framebuffer *fb)
121{ 161{
122 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 162 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
123 struct mdp5_kms *mdp5_kms = get_kms(plane); 163 struct mdp5_kms *mdp5_kms = get_kms(plane);
124 enum mdp5_pipe pipe = mdp5_plane->pipe;
125 uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
126 uint32_t iova[4];
127 int i;
128
129 for (i = 0; i < nplanes; i++) {
130 struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
131 msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
132 }
133 for (; i < 4; i++)
134 iova[i] = 0;
135 164
136 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), 165 DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
137 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | 166 return msm_framebuffer_prepare(fb, mdp5_kms->id);
138 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
139
140 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
141 MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
142 MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
143
144 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]);
145 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]);
146 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]);
147 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]);
148
149 plane->fb = fb;
150} 167}
151 168
152/* NOTE: looks like if horizontal decimation is used (if we supported that) 169static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
153 * then the width used to calculate SMP block requirements is the post- 170 struct drm_framebuffer *fb)
154 * decimated width. Ie. SMP buffering sits downstream of decimation (which
155 * presumably happens during the dma from scanout buffer).
156 */
157static int request_smp_blocks(struct drm_plane *plane, uint32_t format,
158 uint32_t nplanes, uint32_t width)
159{ 171{
160 struct drm_device *dev = plane->dev;
161 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 172 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
162 struct mdp5_kms *mdp5_kms = get_kms(plane); 173 struct mdp5_kms *mdp5_kms = get_kms(plane);
163 enum mdp5_pipe pipe = mdp5_plane->pipe;
164 int i, hsub, nlines, nblks, ret;
165 174
166 hsub = drm_format_horz_chroma_subsampling(format); 175 DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
176 msm_framebuffer_cleanup(fb, mdp5_kms->id);
177}
167 178
168 /* different if BWC (compressed framebuffer?) enabled: */ 179static int mdp5_plane_atomic_check(struct drm_plane *plane,
169 nlines = 2; 180 struct drm_plane_state *state)
181{
182 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
183 struct drm_plane_state *old_state = plane->state;
170 184
171 for (i = 0, nblks = 0; i < nplanes; i++) { 185 DBG("%s: check (%d -> %d)", mdp5_plane->name,
172 int n, fetch_stride, cpp; 186 plane_enabled(old_state), plane_enabled(state));
173 187
174 cpp = drm_format_plane_cpp(format, i); 188 if (plane_enabled(state) && plane_enabled(old_state)) {
175 fetch_stride = width * cpp / (i ? hsub : 1); 189 /* we cannot change SMP block configuration during scanout: */
190 bool full_modeset = false;
191 if (state->fb->pixel_format != old_state->fb->pixel_format) {
192 DBG("%s: pixel_format change!", mdp5_plane->name);
193 full_modeset = true;
194 }
195 if (state->src_w != old_state->src_w) {
196 DBG("%s: src_w change!", mdp5_plane->name);
197 full_modeset = true;
198 }
199 if (to_mdp5_plane_state(old_state)->pending) {
200 DBG("%s: still pending!", mdp5_plane->name);
201 full_modeset = true;
202 }
203 if (full_modeset) {
204 struct drm_crtc_state *crtc_state =
205 drm_atomic_get_crtc_state(state->state, state->crtc);
206 crtc_state->mode_changed = true;
207 to_mdp5_plane_state(state)->mode_changed = true;
208 }
209 } else {
210 to_mdp5_plane_state(state)->mode_changed = true;
211 }
176 212
177 n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE); 213 return 0;
214}
178 215
179 /* for hw rev v1.00 */ 216static void mdp5_plane_atomic_update(struct drm_plane *plane,
180 if (mdp5_kms->rev == 0) 217 struct drm_plane_state *old_state)
181 n = roundup_pow_of_two(n); 218{
219 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
220 struct drm_plane_state *state = plane->state;
182 221
183 DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n); 222 DBG("%s: update", mdp5_plane->name);
184 ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n);
185 if (ret) {
186 dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n",
187 n, ret);
188 return ret;
189 }
190 223
191 nblks += n; 224 if (!plane_enabled(state)) {
225 to_mdp5_plane_state(state)->pending = true;
226 mdp5_plane_disable(plane);
227 } else if (to_mdp5_plane_state(state)->mode_changed) {
228 int ret;
229 to_mdp5_plane_state(state)->pending = true;
230 ret = mdp5_plane_mode_set(plane,
231 state->crtc, state->fb,
232 state->crtc_x, state->crtc_y,
233 state->crtc_w, state->crtc_h,
234 state->src_x, state->src_y,
235 state->src_w, state->src_h);
236 /* atomic_check should have ensured that this doesn't fail */
237 WARN_ON(ret < 0);
238 } else {
239 unsigned long flags;
240 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
241 set_scanout_locked(plane, state->fb);
242 spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
192 } 243 }
193
194 /* in success case, return total # of blocks allocated: */
195 return nblks;
196} 244}
197 245
198static void set_fifo_thresholds(struct drm_plane *plane, int nblks) 246static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
247 .prepare_fb = mdp5_plane_prepare_fb,
248 .cleanup_fb = mdp5_plane_cleanup_fb,
249 .atomic_check = mdp5_plane_atomic_check,
250 .atomic_update = mdp5_plane_atomic_update,
251};
252
253static void set_scanout_locked(struct drm_plane *plane,
254 struct drm_framebuffer *fb)
199{ 255{
200 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 256 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
201 struct mdp5_kms *mdp5_kms = get_kms(plane); 257 struct mdp5_kms *mdp5_kms = get_kms(plane);
202 enum mdp5_pipe pipe = mdp5_plane->pipe; 258 enum mdp5_pipe pipe = mdp5_plane->pipe;
203 uint32_t val;
204 259
205 /* 1/4 of SMP pool that is being fetched */ 260 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
206 val = (nblks * SMP_ENTRIES_PER_BLK) / 4; 261 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
262 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
207 263
208 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1); 264 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
209 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2); 265 MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
210 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3); 266 MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
267
268 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
269 msm_framebuffer_iova(fb, mdp5_kms->id, 0));
270 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
271 msm_framebuffer_iova(fb, mdp5_kms->id, 1));
272 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
273 msm_framebuffer_iova(fb, mdp5_kms->id, 2));
274 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
275 msm_framebuffer_iova(fb, mdp5_kms->id, 4));
211 276
277 plane->fb = fb;
212} 278}
213 279
214int mdp5_plane_mode_set(struct drm_plane *plane, 280static int mdp5_plane_mode_set(struct drm_plane *plane,
215 struct drm_crtc *crtc, struct drm_framebuffer *fb, 281 struct drm_crtc *crtc, struct drm_framebuffer *fb,
216 int crtc_x, int crtc_y, 282 int crtc_x, int crtc_y,
217 unsigned int crtc_w, unsigned int crtc_h, 283 unsigned int crtc_w, unsigned int crtc_h,
@@ -225,7 +291,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
225 uint32_t nplanes, config = 0; 291 uint32_t nplanes, config = 0;
226 uint32_t phasex_step = 0, phasey_step = 0; 292 uint32_t phasex_step = 0, phasey_step = 0;
227 uint32_t hdecm = 0, vdecm = 0; 293 uint32_t hdecm = 0, vdecm = 0;
228 int i, nblks; 294 unsigned long flags;
295 int ret;
229 296
230 nplanes = drm_format_num_planes(fb->pixel_format); 297 nplanes = drm_format_num_planes(fb->pixel_format);
231 298
@@ -243,12 +310,11 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
243 fb->base.id, src_x, src_y, src_w, src_h, 310 fb->base.id, src_x, src_y, src_w, src_h,
244 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); 311 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
245 312
246 /* 313 /* Request some memory from the SMP: */
247 * Calculate and request required # of smp blocks: 314 ret = mdp5_smp_request(mdp5_kms->smp,
248 */ 315 mdp5_plane->pipe, fb->pixel_format, src_w);
249 nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w); 316 if (ret)
250 if (nblks < 0) 317 return ret;
251 return nblks;
252 318
253 /* 319 /*
254 * Currently we update the hw for allocations/requests immediately, 320 * Currently we update the hw for allocations/requests immediately,
@@ -256,8 +322,7 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
256 * would move into atomic->check_plane_state(), while updating the 322 * would move into atomic->check_plane_state(), while updating the
257 * hw would remain here: 323 * hw would remain here:
258 */ 324 */
259 for (i = 0; i < pipe2nclients(pipe); i++) 325 mdp5_smp_configure(mdp5_kms->smp, pipe);
260 mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i));
261 326
262 if (src_w != crtc_w) { 327 if (src_w != crtc_w) {
263 config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN; 328 config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
@@ -269,6 +334,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
269 /* TODO calc phasey_step, vdecm */ 334 /* TODO calc phasey_step, vdecm */
270 } 335 }
271 336
337 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
338
272 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), 339 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
273 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) | 340 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
274 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h)); 341 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
@@ -289,8 +356,6 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
289 MDP5_PIPE_OUT_XY_X(crtc_x) | 356 MDP5_PIPE_OUT_XY_X(crtc_x) |
290 MDP5_PIPE_OUT_XY_Y(crtc_y)); 357 MDP5_PIPE_OUT_XY_Y(crtc_y));
291 358
292 mdp5_plane_set_scanout(plane, fb);
293
294 format = to_mdp_format(msm_framebuffer_format(fb)); 359 format = to_mdp_format(msm_framebuffer_format(fb));
295 360
296 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), 361 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
@@ -330,22 +395,24 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
330 MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) | 395 MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
331 MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST)); 396 MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
332 397
333 set_fifo_thresholds(plane, nblks); 398 set_scanout_locked(plane, fb);
334 399
335 /* TODO detach from old crtc (if we had more than one) */ 400 spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
336 mdp5_crtc_attach(crtc, plane);
337 401
338 return 0; 402 return ret;
339} 403}
340 404
341void mdp5_plane_complete_flip(struct drm_plane *plane) 405void mdp5_plane_complete_flip(struct drm_plane *plane)
342{ 406{
343 struct mdp5_kms *mdp5_kms = get_kms(plane); 407 struct mdp5_kms *mdp5_kms = get_kms(plane);
344 enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe; 408 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
345 int i; 409 enum mdp5_pipe pipe = mdp5_plane->pipe;
410
411 DBG("%s: complete flip", mdp5_plane->name);
346 412
347 for (i = 0; i < pipe2nclients(pipe); i++) 413 mdp5_smp_commit(mdp5_kms->smp, pipe);
348 mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i)); 414
415 to_mdp5_plane_state(plane->state)->pending = false;
349} 416}
350 417
351enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) 418enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
@@ -354,9 +421,16 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
354 return mdp5_plane->pipe; 421 return mdp5_plane->pipe;
355} 422}
356 423
424uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
425{
426 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
427
428 return mdp5_plane->flush_mask;
429}
430
357/* initialize plane */ 431/* initialize plane */
358struct drm_plane *mdp5_plane_init(struct drm_device *dev, 432struct drm_plane *mdp5_plane_init(struct drm_device *dev,
359 enum mdp5_pipe pipe, bool private_plane) 433 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
360{ 434{
361 struct drm_plane *plane = NULL; 435 struct drm_plane *plane = NULL;
362 struct mdp5_plane *mdp5_plane; 436 struct mdp5_plane *mdp5_plane;
@@ -377,10 +451,18 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
377 mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats, 451 mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
378 ARRAY_SIZE(mdp5_plane->formats)); 452 ARRAY_SIZE(mdp5_plane->formats));
379 453
454 mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
455 mdp5_plane->reg_offset = reg_offset;
456 spin_lock_init(&mdp5_plane->pipe_lock);
457
380 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; 458 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
381 drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, 459 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
382 mdp5_plane->formats, mdp5_plane->nformats, 460 mdp5_plane->formats, mdp5_plane->nformats,
383 type); 461 type);
462 if (ret)
463 goto fail;
464
465 drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
384 466
385 mdp5_plane_install_properties(plane, &plane->base); 467 mdp5_plane_install_properties(plane, &plane->base);
386 468
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 2d0236b963a6..bf551885e019 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -29,8 +30,11 @@
29 * Based on the size of the attached scanout buffer, a certain # of 30 * Based on the size of the attached scanout buffer, a certain # of
30 * blocks must be allocated to that client out of the shared pool. 31 * blocks must be allocated to that client out of the shared pool.
31 * 32 *
32 * For each block, it can be either free, or pending/in-use by a 33 * In some hw, some blocks are statically allocated for certain pipes
33 * client. The updates happen in three steps: 34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
35 *
36 * For each block that can be dynamically allocated, it can be either
37 * free, or pending/in-use by a client. The updates happen in three steps:
34 * 38 *
35 * 1) mdp5_smp_request(): 39 * 1) mdp5_smp_request():
36 * When plane scanout is setup, calculate required number of 40 * When plane scanout is setup, calculate required number of
@@ -61,21 +65,68 @@
61 * inuse and pending state of all clients.. 65 * inuse and pending state of all clients..
62 */ 66 */
63 67
64static DEFINE_SPINLOCK(smp_lock); 68struct mdp5_smp {
69 struct drm_device *dev;
70
71 int blk_cnt;
72 int blk_size;
73
74 spinlock_t state_lock;
75 mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
76
77 struct mdp5_client_smp_state client_state[CID_MAX];
78};
65 79
80static inline
81struct mdp5_kms *get_kms(struct mdp5_smp *smp)
82{
83 struct msm_drm_private *priv = smp->dev->dev_private;
84
85 return to_mdp5_kms(to_mdp_kms(priv->kms));
86}
87
88static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
89{
90 WARN_ON(plane >= pipe2nclients(pipe));
91 switch (pipe) {
92 case SSPP_VIG0: return CID_VIG0_Y + plane;
93 case SSPP_VIG1: return CID_VIG1_Y + plane;
94 case SSPP_VIG2: return CID_VIG2_Y + plane;
95 case SSPP_RGB0: return CID_RGB0;
96 case SSPP_RGB1: return CID_RGB1;
97 case SSPP_RGB2: return CID_RGB2;
98 case SSPP_DMA0: return CID_DMA0_Y + plane;
99 case SSPP_DMA1: return CID_DMA1_Y + plane;
100 case SSPP_VIG3: return CID_VIG3_Y + plane;
101 case SSPP_RGB3: return CID_RGB3;
102 default: return CID_UNUSED;
103 }
104}
66 105
67/* step #1: update # of blocks pending for the client: */ 106/* step #1: update # of blocks pending for the client: */
68int mdp5_smp_request(struct mdp5_kms *mdp5_kms, 107static int smp_request_block(struct mdp5_smp *smp,
69 enum mdp5_client_id cid, int nblks) 108 enum mdp5_client_id cid, int nblks)
70{ 109{
71 struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; 110 struct mdp5_kms *mdp5_kms = get_kms(smp);
72 int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt; 111 const struct mdp5_cfg_hw *hw_cfg;
112 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
113 int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
114 int reserved;
73 unsigned long flags; 115 unsigned long flags;
74 116
75 spin_lock_irqsave(&smp_lock, flags); 117 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
118 reserved = hw_cfg->smp.reserved[cid];
119
120 spin_lock_irqsave(&smp->state_lock, flags);
76 121
77 avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt); 122 nblks -= reserved;
123 if (reserved)
124 DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
125
126 avail = cnt - bitmap_weight(smp->state, cnt);
78 if (nblks > avail) { 127 if (nblks > avail) {
128 dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
129 nblks, avail);
79 ret = -ENOSPC; 130 ret = -ENOSPC;
80 goto fail; 131 goto fail;
81 } 132 }
@@ -84,9 +135,9 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
84 if (nblks > cur_nblks) { 135 if (nblks > cur_nblks) {
85 /* grow the existing pending reservation: */ 136 /* grow the existing pending reservation: */
86 for (i = cur_nblks; i < nblks; i++) { 137 for (i = cur_nblks; i < nblks; i++) {
87 int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt); 138 int blk = find_first_zero_bit(smp->state, cnt);
88 set_bit(blk, ps->pending); 139 set_bit(blk, ps->pending);
89 set_bit(blk, mdp5_kms->smp_state); 140 set_bit(blk, smp->state);
90 } 141 }
91 } else { 142 } else {
92 /* shrink the existing pending reservation: */ 143 /* shrink the existing pending reservation: */
@@ -98,15 +149,88 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
98 } 149 }
99 150
100fail: 151fail:
101 spin_unlock_irqrestore(&smp_lock, flags); 152 spin_unlock_irqrestore(&smp->state_lock, flags);
153 return 0;
154}
155
156static void set_fifo_thresholds(struct mdp5_smp *smp,
157 enum mdp5_pipe pipe, int nblks)
158{
159 struct mdp5_kms *mdp5_kms = get_kms(smp);
160 u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
161 u32 val;
162
163 /* 1/4 of SMP pool that is being fetched */
164 val = (nblks * smp_entries_per_blk) / 4;
165
166 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
167 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
168 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
169}
170
171/*
172 * NOTE: looks like if horizontal decimation is used (if we supported that)
173 * then the width used to calculate SMP block requirements is the post-
174 * decimated width. Ie. SMP buffering sits downstream of decimation (which
175 * presumably happens during the dma from scanout buffer).
176 */
177int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
178{
179 struct mdp5_kms *mdp5_kms = get_kms(smp);
180 struct drm_device *dev = mdp5_kms->dev;
181 int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
182 int i, hsub, nplanes, nlines, nblks, ret;
183
184 nplanes = drm_format_num_planes(fmt);
185 hsub = drm_format_horz_chroma_subsampling(fmt);
186
187 /* different if BWC (compressed framebuffer?) enabled: */
188 nlines = 2;
189
190 for (i = 0, nblks = 0; i < nplanes; i++) {
191 int n, fetch_stride, cpp;
192
193 cpp = drm_format_plane_cpp(fmt, i);
194 fetch_stride = width * cpp / (i ? hsub : 1);
195
196 n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
197
198 /* for hw rev v1.00 */
199 if (rev == 0)
200 n = roundup_pow_of_two(n);
201
202 DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
203 ret = smp_request_block(smp, pipe2client(pipe, i), n);
204 if (ret) {
205 dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
206 n, ret);
207 return ret;
208 }
209
210 nblks += n;
211 }
212
213 set_fifo_thresholds(smp, pipe, nblks);
214
102 return 0; 215 return 0;
103} 216}
104 217
105static void update_smp_state(struct mdp5_kms *mdp5_kms, 218/* Release SMP blocks for all clients of the pipe */
219void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
220{
221 int i, nblks;
222
223 for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
224 smp_request_block(smp, pipe2client(pipe, i), 0);
225 set_fifo_thresholds(smp, pipe, 0);
226}
227
228static void update_smp_state(struct mdp5_smp *smp,
106 enum mdp5_client_id cid, mdp5_smp_state_t *assigned) 229 enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
107{ 230{
108 int cnt = mdp5_kms->smp_blk_cnt; 231 struct mdp5_kms *mdp5_kms = get_kms(smp);
109 uint32_t blk, val; 232 int cnt = smp->blk_cnt;
233 u32 blk, val;
110 234
111 for_each_set_bit(blk, *assigned, cnt) { 235 for_each_set_bit(blk, *assigned, cnt) {
112 int idx = blk / 3; 236 int idx = blk / 3;
@@ -135,39 +259,80 @@ static void update_smp_state(struct mdp5_kms *mdp5_kms,
135} 259}
136 260
137/* step #2: configure hw for union(pending, inuse): */ 261/* step #2: configure hw for union(pending, inuse): */
138void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) 262void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
139{ 263{
140 struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; 264 int cnt = smp->blk_cnt;
141 int cnt = mdp5_kms->smp_blk_cnt;
142 mdp5_smp_state_t assigned; 265 mdp5_smp_state_t assigned;
266 int i;
143 267
144 bitmap_or(assigned, ps->inuse, ps->pending, cnt); 268 for (i = 0; i < pipe2nclients(pipe); i++) {
145 update_smp_state(mdp5_kms, cid, &assigned); 269 enum mdp5_client_id cid = pipe2client(pipe, i);
270 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
271
272 bitmap_or(assigned, ps->inuse, ps->pending, cnt);
273 update_smp_state(smp, cid, &assigned);
274 }
146} 275}
147 276
148/* step #3: after vblank, copy pending -> inuse: */ 277/* step #3: after vblank, copy pending -> inuse: */
149void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) 278void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
150{ 279{
151 struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; 280 int cnt = smp->blk_cnt;
152 int cnt = mdp5_kms->smp_blk_cnt;
153 mdp5_smp_state_t released; 281 mdp5_smp_state_t released;
282 int i;
283
284 for (i = 0; i < pipe2nclients(pipe); i++) {
285 enum mdp5_client_id cid = pipe2client(pipe, i);
286 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
287
288 /*
289 * Figure out if there are any blocks we where previously
290 * using, which can be released and made available to other
291 * clients:
292 */
293 if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
294 unsigned long flags;
295
296 spin_lock_irqsave(&smp->state_lock, flags);
297 /* clear released blocks: */
298 bitmap_andnot(smp->state, smp->state, released, cnt);
299 spin_unlock_irqrestore(&smp->state_lock, flags);
154 300
155 /* 301 update_smp_state(smp, CID_UNUSED, &released);
156 * Figure out if there are any blocks we where previously 302 }
157 * using, which can be released and made available to other 303
158 * clients: 304 bitmap_copy(ps->inuse, ps->pending, cnt);
159 */
160 if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
161 unsigned long flags;
162
163 spin_lock_irqsave(&smp_lock, flags);
164 /* clear released blocks: */
165 bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
166 released, cnt);
167 spin_unlock_irqrestore(&smp_lock, flags);
168
169 update_smp_state(mdp5_kms, CID_UNUSED, &released);
170 } 305 }
306}
307
308void mdp5_smp_destroy(struct mdp5_smp *smp)
309{
310 kfree(smp);
311}
312
313struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
314{
315 struct mdp5_smp *smp = NULL;
316 int ret;
317
318 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
319 if (unlikely(!smp)) {
320 ret = -ENOMEM;
321 goto fail;
322 }
323
324 smp->dev = dev;
325 smp->blk_cnt = cfg->mmb_count;
326 smp->blk_size = cfg->mmb_size;
327
328 /* statically tied MMBs cannot be re-allocated: */
329 bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
330 spin_lock_init(&smp->state_lock);
331
332 return smp;
333fail:
334 if (smp)
335 mdp5_smp_destroy(smp);
171 336
172 bitmap_copy(ps->inuse, ps->pending, cnt); 337 return ERR_PTR(ret);
173} 338}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index 0ab739e1a1dd..e47179f63585 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -20,22 +21,26 @@
20 21
21#include "msm_drv.h" 22#include "msm_drv.h"
22 23
23#define MAX_SMP_BLOCKS 22
24#define SMP_BLK_SIZE 4096
25#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16)
26
27typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
28
29struct mdp5_client_smp_state { 24struct mdp5_client_smp_state {
30 mdp5_smp_state_t inuse; 25 mdp5_smp_state_t inuse;
31 mdp5_smp_state_t pending; 26 mdp5_smp_state_t pending;
32}; 27};
33 28
34struct mdp5_kms; 29struct mdp5_kms;
30struct mdp5_smp;
31
32/*
33 * SMP module prototypes:
34 * mdp5_smp_init() returns a SMP @handler,
35 * which is then used to call the other mdp5_smp_*(handler, ...) functions.
36 */
35 37
36int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks); 38struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
37void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid); 39void mdp5_smp_destroy(struct mdp5_smp *smp);
38void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
39 40
41int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width);
42void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
43void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
44void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
40 45
41#endif /* __MDP5_SMP_H__ */ 46#endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
new file mode 100644
index 000000000000..f0de412e13dc
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -0,0 +1,163 @@
1/*
2 * Copyright (C) 2014 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_kms.h"
20#include "msm_gem.h"
21
22struct msm_commit {
23 struct drm_atomic_state *state;
24 uint32_t fence;
25 struct msm_fence_cb fence_cb;
26};
27
28static void fence_cb(struct msm_fence_cb *cb);
29
30static struct msm_commit *new_commit(struct drm_atomic_state *state)
31{
32 struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
33
34 if (!c)
35 return NULL;
36
37 c->state = state;
38 /* TODO we might need a way to indicate to run the cb on a
39 * different wq so wait_for_vblanks() doesn't block retiring
40 * bo's..
41 */
42 INIT_FENCE_CB(&c->fence_cb, fence_cb);
43
44 return c;
45}
46
47/* The (potentially) asynchronous part of the commit. At this point
48 * nothing can fail short of armageddon.
49 */
50static void complete_commit(struct msm_commit *c)
51{
52 struct drm_atomic_state *state = c->state;
53 struct drm_device *dev = state->dev;
54
55 drm_atomic_helper_commit_pre_planes(dev, state);
56
57 drm_atomic_helper_commit_planes(dev, state);
58
59 drm_atomic_helper_commit_post_planes(dev, state);
60
61 drm_atomic_helper_wait_for_vblanks(dev, state);
62
63 drm_atomic_helper_cleanup_planes(dev, state);
64
65 drm_atomic_state_free(state);
66
67 kfree(c);
68}
69
70static void fence_cb(struct msm_fence_cb *cb)
71{
72 struct msm_commit *c =
73 container_of(cb, struct msm_commit, fence_cb);
74 complete_commit(c);
75}
76
77static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
78{
79 struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
80 c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
81}
82
83
84/**
85 * drm_atomic_helper_commit - commit validated state object
86 * @dev: DRM device
87 * @state: the driver state object
88 * @async: asynchronous commit
89 *
90 * This function commits a with drm_atomic_helper_check() pre-validated state
91 * object. This can still fail when e.g. the framebuffer reservation fails. For
92 * now this doesn't implement asynchronous commits.
93 *
94 * RETURNS
95 * Zero for success or -errno.
96 */
97int msm_atomic_commit(struct drm_device *dev,
98 struct drm_atomic_state *state, bool async)
99{
100 struct msm_commit *c;
101 int nplanes = dev->mode_config.num_total_plane;
102 int i, ret;
103
104 ret = drm_atomic_helper_prepare_planes(dev, state);
105 if (ret)
106 return ret;
107
108 c = new_commit(state);
109
110 /*
111 * Figure out what fence to wait for:
112 */
113 for (i = 0; i < nplanes; i++) {
114 struct drm_plane *plane = state->planes[i];
115 struct drm_plane_state *new_state = state->plane_states[i];
116
117 if (!plane)
118 continue;
119
120 if ((plane->state->fb != new_state->fb) && new_state->fb)
121 add_fb(c, new_state->fb);
122 }
123
124 /*
125 * This is the point of no return - everything below never fails except
126 * when the hw goes bonghits. Which means we can commit the new state on
127 * the software side now.
128 */
129
130 drm_atomic_helper_swap_state(dev, state);
131
132 /*
133 * Everything below can be run asynchronously without the need to grab
134 * any modeset locks at all under one conditions: It must be guaranteed
135 * that the asynchronous work has either been cancelled (if the driver
136 * supports it, which at least requires that the framebuffers get
137 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
138 * before the new state gets committed on the software side with
139 * drm_atomic_helper_swap_state().
140 *
141 * This scheme allows new atomic state updates to be prepared and
142 * checked in parallel to the asynchronous completion of the previous
143 * update. Which is important since compositors need to figure out the
144 * composition of the next frame right after having submitted the
145 * current layout.
146 */
147
148 if (async) {
149 msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
150 return 0;
151 }
152
153 ret = msm_wait_fence_interruptable(dev, c->fence, NULL);
154 if (ret) {
155 WARN_ON(ret); // TODO unswap state back? or??
156 kfree(c);
157 return ret;
158 }
159
160 complete_commit(c);
161
162 return 0;
163}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 42e1c48eef28..c795217e1bfc 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -29,6 +29,8 @@ static void msm_fb_output_poll_changed(struct drm_device *dev)
29static const struct drm_mode_config_funcs mode_config_funcs = { 29static const struct drm_mode_config_funcs mode_config_funcs = {
30 .fb_create = msm_framebuffer_create, 30 .fb_create = msm_framebuffer_create,
31 .output_poll_changed = msm_fb_output_poll_changed, 31 .output_poll_changed = msm_fb_output_poll_changed,
32 .atomic_check = drm_atomic_helper_check,
33 .atomic_commit = msm_atomic_commit,
32}; 34};
33 35
34int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu) 36int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
@@ -294,6 +296,8 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
294 goto fail; 296 goto fail;
295 } 297 }
296 298
299 drm_mode_config_reset(dev);
300
297#ifdef CONFIG_DRM_MSM_FBDEV 301#ifdef CONFIG_DRM_MSM_FBDEV
298 priv->fbdev = msm_fbdev_init(dev); 302 priv->fbdev = msm_fbdev_init(dev);
299#endif 303#endif
@@ -619,6 +623,26 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
619 return ret; 623 return ret;
620} 624}
621 625
626int msm_queue_fence_cb(struct drm_device *dev,
627 struct msm_fence_cb *cb, uint32_t fence)
628{
629 struct msm_drm_private *priv = dev->dev_private;
630 int ret = 0;
631
632 mutex_lock(&dev->struct_mutex);
633 if (!list_empty(&cb->work.entry)) {
634 ret = -EINVAL;
635 } else if (fence > priv->completed_fence) {
636 cb->fence = fence;
637 list_add_tail(&cb->work.entry, &priv->fence_cbs);
638 } else {
639 queue_work(priv->wq, &cb->work);
640 }
641 mutex_unlock(&dev->struct_mutex);
642
643 return ret;
644}
645
622/* called from workqueue */ 646/* called from workqueue */
623void msm_update_fence(struct drm_device *dev, uint32_t fence) 647void msm_update_fence(struct drm_device *dev, uint32_t fence)
624{ 648{
@@ -832,6 +856,7 @@ static struct drm_driver msm_driver = {
832 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, 856 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
833 .gem_prime_vmap = msm_gem_prime_vmap, 857 .gem_prime_vmap = msm_gem_prime_vmap,
834 .gem_prime_vunmap = msm_gem_prime_vunmap, 858 .gem_prime_vunmap = msm_gem_prime_vunmap,
859 .gem_prime_mmap = msm_gem_prime_mmap,
835#ifdef CONFIG_DEBUG_FS 860#ifdef CONFIG_DEBUG_FS
836 .debugfs_init = msm_debugfs_init, 861 .debugfs_init = msm_debugfs_init,
837 .debugfs_cleanup = msm_debugfs_cleanup, 862 .debugfs_cleanup = msm_debugfs_cleanup,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 67f9d0a2332c..136303818436 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -32,15 +32,6 @@
32#include <linux/types.h> 32#include <linux/types.h>
33#include <asm/sizes.h> 33#include <asm/sizes.h>
34 34
35
36#if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_QCOM)
37/* stubs we need for compile-test: */
38static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
39{
40 return NULL;
41}
42#endif
43
44#ifndef CONFIG_OF 35#ifndef CONFIG_OF
45#include <mach/board.h> 36#include <mach/board.h>
46#include <mach/socinfo.h> 37#include <mach/socinfo.h>
@@ -48,7 +39,10 @@ static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
48#endif 39#endif
49 40
50#include <drm/drmP.h> 41#include <drm/drmP.h>
42#include <drm/drm_atomic.h>
43#include <drm/drm_atomic_helper.h>
51#include <drm/drm_crtc_helper.h> 44#include <drm/drm_crtc_helper.h>
45#include <drm/drm_plane_helper.h>
52#include <drm/drm_fb_helper.h> 46#include <drm/drm_fb_helper.h>
53#include <drm/msm_drm.h> 47#include <drm/msm_drm.h>
54#include <drm/drm_gem.h> 48#include <drm/drm_gem.h>
@@ -75,7 +69,12 @@ struct msm_drm_private {
75 struct msm_kms *kms; 69 struct msm_kms *kms;
76 70
77 /* subordinate devices, if present: */ 71 /* subordinate devices, if present: */
78 struct platform_device *hdmi_pdev, *gpu_pdev; 72 struct platform_device *gpu_pdev;
73
74 /* possibly this should be in the kms component, but it is
75 * shared by both mdp4 and mdp5..
76 */
77 struct hdmi *hdmi;
79 78
80 /* when we have more than one 'msm_gpu' these need to be an array: */ 79 /* when we have more than one 'msm_gpu' these need to be an array: */
81 struct msm_gpu *gpu; 80 struct msm_gpu *gpu;
@@ -145,21 +144,29 @@ void __msm_fence_worker(struct work_struct *work);
145 (_cb)->func = _func; \ 144 (_cb)->func = _func; \
146 } while (0) 145 } while (0)
147 146
147int msm_atomic_commit(struct drm_device *dev,
148 struct drm_atomic_state *state, bool async);
149
148int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); 150int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
149 151
150int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 152int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
151 struct timespec *timeout); 153 struct timespec *timeout);
154int msm_queue_fence_cb(struct drm_device *dev,
155 struct msm_fence_cb *cb, uint32_t fence);
152void msm_update_fence(struct drm_device *dev, uint32_t fence); 156void msm_update_fence(struct drm_device *dev, uint32_t fence);
153 157
154int msm_ioctl_gem_submit(struct drm_device *dev, void *data, 158int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
155 struct drm_file *file); 159 struct drm_file *file);
156 160
161int msm_gem_mmap_obj(struct drm_gem_object *obj,
162 struct vm_area_struct *vma);
157int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 163int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
158int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 164int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
159uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); 165uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
160int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, 166int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
161 uint32_t *iova); 167 uint32_t *iova);
162int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova); 168int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
169uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
163struct page **msm_gem_get_pages(struct drm_gem_object *obj); 170struct page **msm_gem_get_pages(struct drm_gem_object *obj);
164void msm_gem_put_pages(struct drm_gem_object *obj); 171void msm_gem_put_pages(struct drm_gem_object *obj);
165void msm_gem_put_iova(struct drm_gem_object *obj, int id); 172void msm_gem_put_iova(struct drm_gem_object *obj, int id);
@@ -170,6 +177,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
170struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); 177struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
171void *msm_gem_prime_vmap(struct drm_gem_object *obj); 178void *msm_gem_prime_vmap(struct drm_gem_object *obj);
172void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 179void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
180int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
173struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, 181struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
174 struct dma_buf_attachment *attach, struct sg_table *sg); 182 struct dma_buf_attachment *attach, struct sg_table *sg);
175int msm_gem_prime_pin(struct drm_gem_object *obj); 183int msm_gem_prime_pin(struct drm_gem_object *obj);
@@ -192,6 +200,9 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
192struct drm_gem_object *msm_gem_import(struct drm_device *dev, 200struct drm_gem_object *msm_gem_import(struct drm_device *dev,
193 uint32_t size, struct sg_table *sgt); 201 uint32_t size, struct sg_table *sgt);
194 202
203int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
204void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
205uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
195struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); 206struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
196const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); 207const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
197struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, 208struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
@@ -202,8 +213,8 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
202struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); 213struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
203 214
204struct hdmi; 215struct hdmi;
205struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder); 216int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
206irqreturn_t hdmi_irq(int irq, void *dev_id); 217 struct drm_encoder *encoder);
207void __init hdmi_register(void); 218void __init hdmi_register(void);
208void __exit hdmi_unregister(void); 219void __exit hdmi_unregister(void);
209 220
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 81bafdf19ab3..84dec161d836 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -24,7 +24,7 @@
24struct msm_framebuffer { 24struct msm_framebuffer {
25 struct drm_framebuffer base; 25 struct drm_framebuffer base;
26 const struct msm_format *format; 26 const struct msm_format *format;
27 struct drm_gem_object *planes[2]; 27 struct drm_gem_object *planes[3];
28}; 28};
29#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base) 29#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
30 30
@@ -87,6 +87,44 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
87} 87}
88#endif 88#endif
89 89
90/* prepare/pin all the fb's bo's for scanout. Note that it is not valid
91 * to prepare an fb more multiple different initiator 'id's. But that
92 * should be fine, since only the scanout (mdpN) side of things needs
93 * this, the gpu doesn't care about fb's.
94 */
95int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
96{
97 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
98 int ret, i, n = drm_format_num_planes(fb->pixel_format);
99 uint32_t iova;
100
101 for (i = 0; i < n; i++) {
102 ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
103 DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
104 if (ret)
105 return ret;
106 }
107
108 return 0;
109}
110
111void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
112{
113 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
114 int i, n = drm_format_num_planes(fb->pixel_format);
115
116 for (i = 0; i < n; i++)
117 msm_gem_put_iova(msm_fb->planes[i], id);
118}
119
120uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
121{
122 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
123 if (!msm_fb->planes[plane])
124 return 0;
125 return msm_gem_iova(msm_fb->planes[plane], id);
126}
127
90struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane) 128struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
91{ 129{
92 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); 130 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
@@ -166,6 +204,11 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
166 204
167 msm_fb->format = format; 205 msm_fb->format = format;
168 206
207 if (n > ARRAY_SIZE(msm_fb->planes)) {
208 ret = -EINVAL;
209 goto fail;
210 }
211
169 for (i = 0; i < n; i++) { 212 for (i = 0; i < n; i++) {
170 unsigned int width = mode_cmd->width / (i ? hsub : 1); 213 unsigned int width = mode_cmd->width / (i ? hsub : 1);
171 unsigned int height = mode_cmd->height / (i ? vsub : 1); 214 unsigned int height = mode_cmd->height / (i ? vsub : 1);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index ab5bfd2d0ebf..94d55e526b4e 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -93,9 +93,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
93 uint32_t paddr; 93 uint32_t paddr;
94 int ret, size; 94 int ret, size;
95 95
96 sizes->surface_bpp = 32;
97 sizes->surface_depth = 24;
98
99 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, 96 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
100 sizes->surface_height, sizes->surface_bpp, 97 sizes->surface_height, sizes->surface_bpp,
101 sizes->fb_width, sizes->fb_height); 98 sizes->fb_width, sizes->fb_height);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 4b1b82adabde..4a6f0e49d5b5 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -309,6 +309,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
309 return ret; 309 return ret;
310} 310}
311 311
312/* get iova, taking a reference. Should have a matching put */
312int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) 313int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
313{ 314{
314 struct msm_gem_object *msm_obj = to_msm_bo(obj); 315 struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -328,6 +329,16 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
328 return ret; 329 return ret;
329} 330}
330 331
332/* get iova without taking a reference, used in places where you have
333 * already done a 'msm_gem_get_iova()'.
334 */
335uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
336{
337 struct msm_gem_object *msm_obj = to_msm_bo(obj);
338 WARN_ON(!msm_obj->domain[id].iova);
339 return msm_obj->domain[id].iova;
340}
341
331void msm_gem_put_iova(struct drm_gem_object *obj, int id) 342void msm_gem_put_iova(struct drm_gem_object *obj, int id)
332{ 343{
333 // XXX TODO .. 344 // XXX TODO ..
@@ -397,23 +408,10 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
397int msm_gem_queue_inactive_cb(struct drm_gem_object *obj, 408int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
398 struct msm_fence_cb *cb) 409 struct msm_fence_cb *cb)
399{ 410{
400 struct drm_device *dev = obj->dev;
401 struct msm_drm_private *priv = dev->dev_private;
402 struct msm_gem_object *msm_obj = to_msm_bo(obj); 411 struct msm_gem_object *msm_obj = to_msm_bo(obj);
403 int ret = 0; 412 uint32_t fence = msm_gem_fence(msm_obj,
404 413 MSM_PREP_READ | MSM_PREP_WRITE);
405 mutex_lock(&dev->struct_mutex); 414 return msm_queue_fence_cb(obj->dev, cb, fence);
406 if (!list_empty(&cb->work.entry)) {
407 ret = -EINVAL;
408 } else if (is_active(msm_obj)) {
409 cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
410 list_add_tail(&cb->work.entry, &priv->fence_cbs);
411 } else {
412 queue_work(priv->wq, &cb->work);
413 }
414 mutex_unlock(&dev->struct_mutex);
415
416 return ret;
417} 415}
418 416
419void msm_gem_move_to_active(struct drm_gem_object *obj, 417void msm_gem_move_to_active(struct drm_gem_object *obj,
@@ -452,12 +450,8 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
452 int ret = 0; 450 int ret = 0;
453 451
454 if (is_active(msm_obj)) { 452 if (is_active(msm_obj)) {
455 uint32_t fence = 0; 453 uint32_t fence = msm_gem_fence(msm_obj, op);
456 454
457 if (op & MSM_PREP_READ)
458 fence = msm_obj->write_fence;
459 if (op & MSM_PREP_WRITE)
460 fence = max(fence, msm_obj->read_fence);
461 if (op & MSM_PREP_NOSYNC) 455 if (op & MSM_PREP_NOSYNC)
462 timeout = NULL; 456 timeout = NULL;
463 457
@@ -525,13 +519,11 @@ void msm_gem_free_object(struct drm_gem_object *obj)
525 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 519 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
526 struct msm_mmu *mmu = priv->mmus[id]; 520 struct msm_mmu *mmu = priv->mmus[id];
527 if (mmu && msm_obj->domain[id].iova) { 521 if (mmu && msm_obj->domain[id].iova) {
528 uint32_t offset = (uint32_t)mmap_offset(obj); 522 uint32_t offset = msm_obj->domain[id].iova;
529 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); 523 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
530 } 524 }
531 } 525 }
532 526
533 drm_gem_free_mmap_offset(obj);
534
535 if (obj->import_attach) { 527 if (obj->import_attach) {
536 if (msm_obj->vaddr) 528 if (msm_obj->vaddr)
537 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); 529 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index bfb052688f8e..8fbbd0594c46 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -70,6 +70,19 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
70 return msm_obj->gpu != NULL; 70 return msm_obj->gpu != NULL;
71} 71}
72 72
73static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
74 uint32_t op)
75{
76 uint32_t fence = 0;
77
78 if (op & MSM_PREP_READ)
79 fence = msm_obj->write_fence;
80 if (op & MSM_PREP_WRITE)
81 fence = max(fence, msm_obj->read_fence);
82
83 return fence;
84}
85
73#define MAX_CMDS 4 86#define MAX_CMDS 4
74 87
75/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, 88/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index ad772fe36115..dd7a7ab603e2 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -37,6 +37,19 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
37 /* TODO msm_gem_vunmap() */ 37 /* TODO msm_gem_vunmap() */
38} 38}
39 39
40int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
41{
42 int ret;
43
44 mutex_lock(&obj->dev->struct_mutex);
45 ret = drm_gem_mmap_obj(obj, obj->size, vma);
46 mutex_unlock(&obj->dev->struct_mutex);
47 if (ret < 0)
48 return ret;
49
50 return msm_gem_mmap_obj(vma->vm_private_data, vma);
51}
52
40struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, 53struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
41 struct dma_buf_attachment *attach, struct sg_table *sg) 54 struct dma_buf_attachment *attach, struct sg_table *sg)
42{ 55{