aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-08-28 00:27:15 -0400
committerDave Airlie <airlied@redhat.com>2013-08-28 00:27:15 -0400
commite906d7bdd3b63ffac8b91f2f05c450775de95ef6 (patch)
tree014e1f6a07067111d4b3ba94e3f9229a1e70e6d1
parentacb4652703f0a452405a3ab9319594eddc41391b (diff)
parentbd6f82d8289422f618b98451a43887f452b3423e (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~robclark/linux into drm-next
Merge the MSM driver from Rob Clark * 'drm-next' of git://people.freedesktop.org/~robclark/linux: drm/msm: add basic hangcheck/recovery mechanism drm/msm: add a3xx gpu support drm/msm: add register definitions for gpu drm/msm: basic KMS driver for snapdragon drm/msm: add register definitions
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/Kconfig34
-rw-r--r--drivers/gpu/drm/msm/Makefile30
-rw-r--r--drivers/gpu/drm/msm/NOTES69
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h1438
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h2193
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c502
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h30
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h432
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c370
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h141
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h254
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h502
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h114
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h48
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c235
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h112
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h508
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c461
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c281
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c141
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c214
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h50
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4.xml.h1061
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_crtc.c684
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c317
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_format.c56
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_irq.c203
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.c368
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.h194
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_plane.c243
-rw-r--r--drivers/gpu/drm/msm/msm_connector.c34
-rw-r--r--drivers/gpu/drm/msm/msm_connector.h68
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c776
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h211
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c202
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c258
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c597
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h99
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c412
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c463
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h124
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c61
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h43
-rw-r--r--include/uapi/drm/Kbuild1
-rw-r--r--include/uapi/drm/msm_drm.h207
47 files changed, 14844 insertions, 0 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 626bc0cb1046..39573c5f7518 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -223,3 +223,5 @@ source "drivers/gpu/drm/omapdrm/Kconfig"
223source "drivers/gpu/drm/tilcdc/Kconfig" 223source "drivers/gpu/drm/tilcdc/Kconfig"
224 224
225source "drivers/gpu/drm/qxl/Kconfig" 225source "drivers/gpu/drm/qxl/Kconfig"
226
227source "drivers/gpu/drm/msm/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 7b2343a2f5eb..f089adfe70ee 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -54,4 +54,5 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
54obj-$(CONFIG_DRM_OMAP) += omapdrm/ 54obj-$(CONFIG_DRM_OMAP) += omapdrm/
55obj-$(CONFIG_DRM_TILCDC) += tilcdc/ 55obj-$(CONFIG_DRM_TILCDC) += tilcdc/
56obj-$(CONFIG_DRM_QXL) += qxl/ 56obj-$(CONFIG_DRM_QXL) += qxl/
57obj-$(CONFIG_DRM_MSM) += msm/
57obj-y += i2c/ 58obj-y += i2c/
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
new file mode 100644
index 000000000000..a06c19cc56f8
--- /dev/null
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -0,0 +1,34 @@
1
2config DRM_MSM
3 tristate "MSM DRM"
4 depends on DRM
5 depends on ARCH_MSM
6 depends on ARCH_MSM8960
7 select DRM_KMS_HELPER
8 select SHMEM
9 select TMPFS
10 default y
11 help
12 DRM/KMS driver for MSM/snapdragon.
13
14config DRM_MSM_FBDEV
15 bool "Enable legacy fbdev support for MSM modesetting driver"
16 depends on DRM_MSM
17 select FB_SYS_FILLRECT
18 select FB_SYS_COPYAREA
19 select FB_SYS_IMAGEBLIT
20 select FB_SYS_FOPS
21 default y
22 help
23 Choose this option if you have a need for the legacy fbdev
24 support. Note that this support also provide the linux console
25 support on top of the MSM modesetting driver.
26
27config DRM_MSM_REGISTER_LOGGING
28 bool "MSM DRM register logging"
29 depends on DRM_MSM
30 default n
31 help
32 Compile in support for logging register reads/writes in a format
33 that can be parsed by envytools demsm tool. If enabled, register
34 logging can be switched on via msm.reglog=y module param.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
new file mode 100644
index 000000000000..439dfb5b417b
--- /dev/null
+++ b/drivers/gpu/drm/msm/Makefile
@@ -0,0 +1,30 @@
1ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
2ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
3 ccflags-y += -Werror
4endif
5
6msm-y := \
7 adreno/adreno_gpu.o \
8 adreno/a3xx_gpu.o \
9 hdmi/hdmi.o \
10 hdmi/hdmi_connector.o \
11 hdmi/hdmi_i2c.o \
12 hdmi/hdmi_phy_8960.o \
13 hdmi/hdmi_phy_8x60.o \
14 mdp4/mdp4_crtc.o \
15 mdp4/mdp4_dtv_encoder.o \
16 mdp4/mdp4_format.o \
17 mdp4/mdp4_irq.o \
18 mdp4/mdp4_kms.o \
19 mdp4/mdp4_plane.o \
20 msm_connector.o \
21 msm_drv.o \
22 msm_fb.o \
23 msm_gem.o \
24 msm_gem_submit.o \
25 msm_gpu.o \
26 msm_ringbuffer.o
27
28msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
29
30obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES
new file mode 100644
index 000000000000..e036f6c1db94
--- /dev/null
+++ b/drivers/gpu/drm/msm/NOTES
@@ -0,0 +1,69 @@
1NOTES about msm drm/kms driver:
2
3In the current snapdragon SoC's, we have (at least) 3 different
4display controller blocks at play:
5 + MDP3 - ?? seems to be what is on geeksphone peak device
6 + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
7 + MDSS - snapdragon 800
8
9(I don't have a completely clear picture on which display controller
10maps to which part #)
11
12Plus a handful of blocks around them for HDMI/DSI/etc output.
13
14And on gpu side of things:
15 + zero, one, or two 2d cores (z180)
16 + and either a2xx or a3xx 3d core.
17
18But, HDMI/DSI/etc blocks seem like they can be shared across multiple
19display controller blocks. And I for sure don't want to have to deal
20with N different kms devices from xf86-video-freedreno. Plus, it
21seems like we can do some clever tricks like use GPU to trigger
22pageflip after rendering completes (ie. have the kms/crtc code build
23up gpu cmdstream to update scanout and write FLUSH register after).
24
25So, the approach is one drm driver, with some modularity. Different
26'struct msm_kms' implementations, depending on display controller.
27And one or more 'struct msm_gpu' for the various different gpu sub-
28modules.
29
30(Second part is not implemented yet. So far this is just basic KMS
31driver, and not exposing any custom ioctls to userspace for now.)
32
33The kms module provides the plane, crtc, and encoder objects, and
34loads whatever connectors are appropriate.
35
36For MDP4, the mapping is:
37
38 plane -> PIPE{RGBn,VGn} \
39 crtc -> OVLP{n} + DMA{P,S,E} (??) |-> MDP "device"
40 encoder -> DTV/LCDC/DSI (within MDP4) /
41 connector -> HDMI/DSI/etc --> other device(s)
42
43Since the irq's that drm core mostly cares about are vblank/framedone,
44we'll let msm_mdp4_kms provide the irq install/uninstall/etc functions
45and treat the MDP4 block's irq as "the" irq. Even though the connectors
46may have their own irqs which they install themselves. For this reason
47the display controller is the "master" device.
48
49Each connector probably ends up being a separate device, just for the
50logistics of finding/mapping io region, irq, etc. Idealy we would
51have a better way than just stashing the platform device in a global
52(ie. like DT super-node.. but I don't have any snapdragon hw yet that
53is using DT).
54
55Note that so far I've not been able to get any docs on the hw, and it
56seems that access to such docs would prevent me from working on the
57freedreno gallium driver. So there may be some mistakes in register
58names (I had to invent a few, since no sufficient hint was given in
59the downstream android fbdev driver), bitfield sizes, etc. My current
60state of understanding the registers is given in the envytools rnndb
61files at:
62
63 https://github.com/freedreno/envytools/tree/master/rnndb
64 (the mdp4/hdmi/dsi directories)
65
66These files are used both for a parser tool (in the same tree) to
67parse logged register reads/writes (both from downstream android fbdev
68driver, and this driver with register logging enabled), as well as to
69generate the register level headers.
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
new file mode 100644
index 000000000000..35463864b959
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -0,0 +1,1438 @@
1#ifndef A2XX_XML
2#define A2XX_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
17
18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark)
20
21Permission is hereby granted, free of charge, to any person obtaining
22a copy of this software and associated documentation files (the
23"Software"), to deal in the Software without restriction, including
24without limitation the rights to use, copy, modify, merge, publish,
25distribute, sublicense, and/or sell copies of the Software, and to
26permit persons to whom the Software is furnished to do so, subject to
27the following conditions:
28
29The above copyright notice and this permission notice (including the
30next paragraph) shall be included in all copies or substantial
31portions of the Software.
32
33THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
37LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
38OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/
41
42
43enum a2xx_rb_dither_type {
44 DITHER_PIXEL = 0,
45 DITHER_SUBPIXEL = 1,
46};
47
48enum a2xx_colorformatx {
49 COLORX_4_4_4_4 = 0,
50 COLORX_1_5_5_5 = 1,
51 COLORX_5_6_5 = 2,
52 COLORX_8 = 3,
53 COLORX_8_8 = 4,
54 COLORX_8_8_8_8 = 5,
55 COLORX_S8_8_8_8 = 6,
56 COLORX_16_FLOAT = 7,
57 COLORX_16_16_FLOAT = 8,
58 COLORX_16_16_16_16_FLOAT = 9,
59 COLORX_32_FLOAT = 10,
60 COLORX_32_32_FLOAT = 11,
61 COLORX_32_32_32_32_FLOAT = 12,
62 COLORX_2_3_3 = 13,
63 COLORX_8_8_8 = 14,
64};
65
66enum a2xx_sq_surfaceformat {
67 FMT_1_REVERSE = 0,
68 FMT_1 = 1,
69 FMT_8 = 2,
70 FMT_1_5_5_5 = 3,
71 FMT_5_6_5 = 4,
72 FMT_6_5_5 = 5,
73 FMT_8_8_8_8 = 6,
74 FMT_2_10_10_10 = 7,
75 FMT_8_A = 8,
76 FMT_8_B = 9,
77 FMT_8_8 = 10,
78 FMT_Cr_Y1_Cb_Y0 = 11,
79 FMT_Y1_Cr_Y0_Cb = 12,
80 FMT_5_5_5_1 = 13,
81 FMT_8_8_8_8_A = 14,
82 FMT_4_4_4_4 = 15,
83 FMT_10_11_11 = 16,
84 FMT_11_11_10 = 17,
85 FMT_DXT1 = 18,
86 FMT_DXT2_3 = 19,
87 FMT_DXT4_5 = 20,
88 FMT_24_8 = 22,
89 FMT_24_8_FLOAT = 23,
90 FMT_16 = 24,
91 FMT_16_16 = 25,
92 FMT_16_16_16_16 = 26,
93 FMT_16_EXPAND = 27,
94 FMT_16_16_EXPAND = 28,
95 FMT_16_16_16_16_EXPAND = 29,
96 FMT_16_FLOAT = 30,
97 FMT_16_16_FLOAT = 31,
98 FMT_16_16_16_16_FLOAT = 32,
99 FMT_32 = 33,
100 FMT_32_32 = 34,
101 FMT_32_32_32_32 = 35,
102 FMT_32_FLOAT = 36,
103 FMT_32_32_FLOAT = 37,
104 FMT_32_32_32_32_FLOAT = 38,
105 FMT_32_AS_8 = 39,
106 FMT_32_AS_8_8 = 40,
107 FMT_16_MPEG = 41,
108 FMT_16_16_MPEG = 42,
109 FMT_8_INTERLACED = 43,
110 FMT_32_AS_8_INTERLACED = 44,
111 FMT_32_AS_8_8_INTERLACED = 45,
112 FMT_16_INTERLACED = 46,
113 FMT_16_MPEG_INTERLACED = 47,
114 FMT_16_16_MPEG_INTERLACED = 48,
115 FMT_DXN = 49,
116 FMT_8_8_8_8_AS_16_16_16_16 = 50,
117 FMT_DXT1_AS_16_16_16_16 = 51,
118 FMT_DXT2_3_AS_16_16_16_16 = 52,
119 FMT_DXT4_5_AS_16_16_16_16 = 53,
120 FMT_2_10_10_10_AS_16_16_16_16 = 54,
121 FMT_10_11_11_AS_16_16_16_16 = 55,
122 FMT_11_11_10_AS_16_16_16_16 = 56,
123 FMT_32_32_32_FLOAT = 57,
124 FMT_DXT3A = 58,
125 FMT_DXT5A = 59,
126 FMT_CTX1 = 60,
127 FMT_DXT3A_AS_1_1_1_1 = 61,
128};
129
130enum a2xx_sq_ps_vtx_mode {
131 POSITION_1_VECTOR = 0,
132 POSITION_2_VECTORS_UNUSED = 1,
133 POSITION_2_VECTORS_SPRITE = 2,
134 POSITION_2_VECTORS_EDGE = 3,
135 POSITION_2_VECTORS_KILL = 4,
136 POSITION_2_VECTORS_SPRITE_KILL = 5,
137 POSITION_2_VECTORS_EDGE_KILL = 6,
138 MULTIPASS = 7,
139};
140
141enum a2xx_sq_sample_cntl {
142 CENTROIDS_ONLY = 0,
143 CENTERS_ONLY = 1,
144 CENTROIDS_AND_CENTERS = 2,
145};
146
147enum a2xx_dx_clip_space {
148 DXCLIP_OPENGL = 0,
149 DXCLIP_DIRECTX = 1,
150};
151
152enum a2xx_pa_su_sc_polymode {
153 POLY_DISABLED = 0,
154 POLY_DUALMODE = 1,
155};
156
157enum a2xx_rb_edram_mode {
158 EDRAM_NOP = 0,
159 COLOR_DEPTH = 4,
160 DEPTH_ONLY = 5,
161 EDRAM_COPY = 6,
162};
163
164enum a2xx_pa_sc_pattern_bit_order {
165 LITTLE = 0,
166 BIG = 1,
167};
168
169enum a2xx_pa_sc_auto_reset_cntl {
170 NEVER = 0,
171 EACH_PRIMITIVE = 1,
172 EACH_PACKET = 2,
173};
174
175enum a2xx_pa_pixcenter {
176 PIXCENTER_D3D = 0,
177 PIXCENTER_OGL = 1,
178};
179
180enum a2xx_pa_roundmode {
181 TRUNCATE = 0,
182 ROUND = 1,
183 ROUNDTOEVEN = 2,
184 ROUNDTOODD = 3,
185};
186
187enum a2xx_pa_quantmode {
188 ONE_SIXTEENTH = 0,
189 ONE_EIGTH = 1,
190 ONE_QUARTER = 2,
191 ONE_HALF = 3,
192 ONE = 4,
193};
194
195enum a2xx_rb_copy_sample_select {
196 SAMPLE_0 = 0,
197 SAMPLE_1 = 1,
198 SAMPLE_2 = 2,
199 SAMPLE_3 = 3,
200 SAMPLE_01 = 4,
201 SAMPLE_23 = 5,
202 SAMPLE_0123 = 6,
203};
204
205enum sq_tex_clamp {
206 SQ_TEX_WRAP = 0,
207 SQ_TEX_MIRROR = 1,
208 SQ_TEX_CLAMP_LAST_TEXEL = 2,
209 SQ_TEX_MIRROR_ONCE_LAST_TEXEL = 3,
210 SQ_TEX_CLAMP_HALF_BORDER = 4,
211 SQ_TEX_MIRROR_ONCE_HALF_BORDER = 5,
212 SQ_TEX_CLAMP_BORDER = 6,
213 SQ_TEX_MIRROR_ONCE_BORDER = 7,
214};
215
216enum sq_tex_swiz {
217 SQ_TEX_X = 0,
218 SQ_TEX_Y = 1,
219 SQ_TEX_Z = 2,
220 SQ_TEX_W = 3,
221 SQ_TEX_ZERO = 4,
222 SQ_TEX_ONE = 5,
223};
224
225enum sq_tex_filter {
226 SQ_TEX_FILTER_POINT = 0,
227 SQ_TEX_FILTER_BILINEAR = 1,
228 SQ_TEX_FILTER_BICUBIC = 2,
229};
230
231#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001
232
233#define REG_A2XX_RBBM_CNTL 0x0000003b
234
235#define REG_A2XX_RBBM_SOFT_RESET 0x0000003c
236
237#define REG_A2XX_CP_PFP_UCODE_ADDR 0x000000c0
238
239#define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1
240
241#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395
242
243#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397
244
245#define REG_A2XX_RBBM_PERFCOUNTER1_HI 0x00000398
246
247#define REG_A2XX_RBBM_DEBUG 0x0000039b
248
249#define REG_A2XX_RBBM_PM_OVERRIDE1 0x0000039c
250
251#define REG_A2XX_RBBM_PM_OVERRIDE2 0x0000039d
252
253#define REG_A2XX_RBBM_DEBUG_OUT 0x000003a0
254
255#define REG_A2XX_RBBM_DEBUG_CNTL 0x000003a1
256
257#define REG_A2XX_RBBM_READ_ERROR 0x000003b3
258
259#define REG_A2XX_RBBM_INT_CNTL 0x000003b4
260
261#define REG_A2XX_RBBM_INT_STATUS 0x000003b5
262
263#define REG_A2XX_RBBM_INT_ACK 0x000003b6
264
265#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7
266
267#define REG_A2XX_RBBM_PERIPHID1 0x000003f9
268
269#define REG_A2XX_RBBM_PERIPHID2 0x000003fa
270
271#define REG_A2XX_CP_PERFMON_CNTL 0x00000444
272
273#define REG_A2XX_CP_PERFCOUNTER_SELECT 0x00000445
274
275#define REG_A2XX_CP_PERFCOUNTER_LO 0x00000446
276
277#define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447
278
279#define REG_A2XX_CP_ST_BASE 0x0000044d
280
281#define REG_A2XX_CP_ST_BUFSZ 0x0000044e
282
283#define REG_A2XX_CP_IB1_BASE 0x00000458
284
285#define REG_A2XX_CP_IB1_BUFSZ 0x00000459
286
287#define REG_A2XX_CP_IB2_BASE 0x0000045a
288
289#define REG_A2XX_CP_IB2_BUFSZ 0x0000045b
290
291#define REG_A2XX_CP_STAT 0x0000047f
292
293#define REG_A2XX_RBBM_STATUS 0x000005d0
294#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f
295#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0
296static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val)
297{
298 return ((val) << A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT) & A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK;
299}
300#define A2XX_RBBM_STATUS_TC_BUSY 0x00000020
301#define A2XX_RBBM_STATUS_HIRQ_PENDING 0x00000100
302#define A2XX_RBBM_STATUS_CPRQ_PENDING 0x00000200
303#define A2XX_RBBM_STATUS_CFRQ_PENDING 0x00000400
304#define A2XX_RBBM_STATUS_PFRQ_PENDING 0x00000800
305#define A2XX_RBBM_STATUS_VGT_BUSY_NO_DMA 0x00001000
306#define A2XX_RBBM_STATUS_RBBM_WU_BUSY 0x00004000
307#define A2XX_RBBM_STATUS_CP_NRT_BUSY 0x00010000
308#define A2XX_RBBM_STATUS_MH_BUSY 0x00040000
309#define A2XX_RBBM_STATUS_MH_COHERENCY_BUSY 0x00080000
310#define A2XX_RBBM_STATUS_SX_BUSY 0x00200000
311#define A2XX_RBBM_STATUS_TPC_BUSY 0x00400000
312#define A2XX_RBBM_STATUS_SC_CNTX_BUSY 0x01000000
313#define A2XX_RBBM_STATUS_PA_BUSY 0x02000000
314#define A2XX_RBBM_STATUS_VGT_BUSY 0x04000000
315#define A2XX_RBBM_STATUS_SQ_CNTX17_BUSY 0x08000000
316#define A2XX_RBBM_STATUS_SQ_CNTX0_BUSY 0x10000000
317#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000
318#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000
319
320#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
321#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
322#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
323static inline uint32_t A2XX_A220_VSC_BIN_SIZE_WIDTH(uint32_t val)
324{
325 return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK;
326}
327#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
328#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT 5
329static inline uint32_t A2XX_A220_VSC_BIN_SIZE_HEIGHT(uint32_t val)
330{
331 return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK;
332}
333
334static inline uint32_t REG_A2XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
335
336static inline uint32_t REG_A2XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
337
338static inline uint32_t REG_A2XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
339
340static inline uint32_t REG_A2XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
341
342#define REG_A2XX_PC_DEBUG_CNTL 0x00000c38
343
344#define REG_A2XX_PC_DEBUG_DATA 0x00000c39
345
346#define REG_A2XX_PA_SC_VIZ_QUERY_STATUS 0x00000c44
347
348#define REG_A2XX_GRAS_DEBUG_CNTL 0x00000c80
349
350#define REG_A2XX_PA_SU_DEBUG_CNTL 0x00000c80
351
352#define REG_A2XX_GRAS_DEBUG_DATA 0x00000c81
353
354#define REG_A2XX_PA_SU_DEBUG_DATA 0x00000c81
355
356#define REG_A2XX_PA_SU_FACE_DATA 0x00000c86
357
358#define REG_A2XX_SQ_GPR_MANAGEMENT 0x00000d00
359
360#define REG_A2XX_SQ_FLOW_CONTROL 0x00000d01
361
362#define REG_A2XX_SQ_INST_STORE_MANAGMENT 0x00000d02
363
364#define REG_A2XX_SQ_DEBUG_MISC 0x00000d05
365
366#define REG_A2XX_SQ_INT_CNTL 0x00000d34
367
368#define REG_A2XX_SQ_INT_STATUS 0x00000d35
369
370#define REG_A2XX_SQ_INT_ACK 0x00000d36
371
372#define REG_A2XX_SQ_DEBUG_INPUT_FSM 0x00000dae
373
374#define REG_A2XX_SQ_DEBUG_CONST_MGR_FSM 0x00000daf
375
376#define REG_A2XX_SQ_DEBUG_TP_FSM 0x00000db0
377
378#define REG_A2XX_SQ_DEBUG_FSM_ALU_0 0x00000db1
379
380#define REG_A2XX_SQ_DEBUG_FSM_ALU_1 0x00000db2
381
382#define REG_A2XX_SQ_DEBUG_EXP_ALLOC 0x00000db3
383
384#define REG_A2XX_SQ_DEBUG_PTR_BUFF 0x00000db4
385
386#define REG_A2XX_SQ_DEBUG_GPR_VTX 0x00000db5
387
388#define REG_A2XX_SQ_DEBUG_GPR_PIX 0x00000db6
389
390#define REG_A2XX_SQ_DEBUG_TB_STATUS_SEL 0x00000db7
391
392#define REG_A2XX_SQ_DEBUG_VTX_TB_0 0x00000db8
393
394#define REG_A2XX_SQ_DEBUG_VTX_TB_1 0x00000db9
395
396#define REG_A2XX_SQ_DEBUG_VTX_TB_STATUS_REG 0x00000dba
397
398#define REG_A2XX_SQ_DEBUG_VTX_TB_STATE_MEM 0x00000dbb
399
400#define REG_A2XX_SQ_DEBUG_PIX_TB_0 0x00000dbc
401
402#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x00000dbd
403
404#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x00000dbe
405
406#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x00000dbf
407
408#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x00000dc0
409
410#define REG_A2XX_SQ_DEBUG_PIX_TB_STATE_MEM 0x00000dc1
411
412#define REG_A2XX_TC_CNTL_STATUS 0x00000e00
413#define A2XX_TC_CNTL_STATUS_L2_INVALIDATE 0x00000001
414
415#define REG_A2XX_TP0_CHICKEN 0x00000e1e
416
417#define REG_A2XX_RB_BC_CONTROL 0x00000f01
418#define A2XX_RB_BC_CONTROL_ACCUM_LINEAR_MODE_ENABLE 0x00000001
419#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK 0x00000006
420#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT 1
421static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT(uint32_t val)
422{
423 return ((val) << A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK;
424}
425#define A2XX_RB_BC_CONTROL_DISABLE_EDRAM_CAM 0x00000008
426#define A2XX_RB_BC_CONTROL_DISABLE_EZ_FAST_CONTEXT_SWITCH 0x00000010
427#define A2XX_RB_BC_CONTROL_DISABLE_EZ_NULL_ZCMD_DROP 0x00000020
428#define A2XX_RB_BC_CONTROL_DISABLE_LZ_NULL_ZCMD_DROP 0x00000040
429#define A2XX_RB_BC_CONTROL_ENABLE_AZ_THROTTLE 0x00000080
430#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK 0x00001f00
431#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT 8
432static inline uint32_t A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT(uint32_t val)
433{
434 return ((val) << A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT) & A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK;
435}
436#define A2XX_RB_BC_CONTROL_ENABLE_CRC_UPDATE 0x00004000
437#define A2XX_RB_BC_CONTROL_CRC_MODE 0x00008000
438#define A2XX_RB_BC_CONTROL_DISABLE_SAMPLE_COUNTERS 0x00010000
439#define A2XX_RB_BC_CONTROL_DISABLE_ACCUM 0x00020000
440#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK 0x003c0000
441#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT 18
442static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK(uint32_t val)
443{
444 return ((val) << A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK;
445}
446#define A2XX_RB_BC_CONTROL_LINEAR_PERFORMANCE_ENABLE 0x00400000
447#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK 0x07800000
448#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT 23
449static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT(uint32_t val)
450{
451 return ((val) << A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK;
452}
453#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK 0x18000000
454#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT 27
455static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val)
456{
457 return ((val) << A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK;
458}
459#define A2XX_RB_BC_CONTROL_MEM_EXPORT_LINEAR_MODE_ENABLE 0x20000000
460#define A2XX_RB_BC_CONTROL_CRC_SYSTEM 0x40000000
461#define A2XX_RB_BC_CONTROL_RESERVED6 0x80000000
462
463#define REG_A2XX_RB_EDRAM_INFO 0x00000f02
464
465#define REG_A2XX_RB_DEBUG_CNTL 0x00000f26
466
467#define REG_A2XX_RB_DEBUG_DATA 0x00000f27
468
469#define REG_A2XX_RB_SURFACE_INFO 0x00002000
470
471#define REG_A2XX_RB_COLOR_INFO 0x00002001
472#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f
473#define A2XX_RB_COLOR_INFO_FORMAT__SHIFT 0
474static inline uint32_t A2XX_RB_COLOR_INFO_FORMAT(enum a2xx_colorformatx val)
475{
476 return ((val) << A2XX_RB_COLOR_INFO_FORMAT__SHIFT) & A2XX_RB_COLOR_INFO_FORMAT__MASK;
477}
478#define A2XX_RB_COLOR_INFO_ROUND_MODE__MASK 0x00000030
479#define A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT 4
480static inline uint32_t A2XX_RB_COLOR_INFO_ROUND_MODE(uint32_t val)
481{
482 return ((val) << A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT) & A2XX_RB_COLOR_INFO_ROUND_MODE__MASK;
483}
484#define A2XX_RB_COLOR_INFO_LINEAR 0x00000040
485#define A2XX_RB_COLOR_INFO_ENDIAN__MASK 0x00000180
486#define A2XX_RB_COLOR_INFO_ENDIAN__SHIFT 7
487static inline uint32_t A2XX_RB_COLOR_INFO_ENDIAN(uint32_t val)
488{
489 return ((val) << A2XX_RB_COLOR_INFO_ENDIAN__SHIFT) & A2XX_RB_COLOR_INFO_ENDIAN__MASK;
490}
491#define A2XX_RB_COLOR_INFO_SWAP__MASK 0x00000600
492#define A2XX_RB_COLOR_INFO_SWAP__SHIFT 9
493static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
494{
495 return ((val) << A2XX_RB_COLOR_INFO_SWAP__SHIFT) & A2XX_RB_COLOR_INFO_SWAP__MASK;
496}
497#define A2XX_RB_COLOR_INFO_BASE__MASK 0xfffff000
498#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
499static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
500{
501 return ((val >> 10) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
502}
503
504#define REG_A2XX_RB_DEPTH_INFO 0x00002002
505#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
506#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
507static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
508{
509 return ((val) << A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
510}
511#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
512#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
513static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
514{
515 return ((val >> 10) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
516}
517
518#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
519
520#define REG_A2XX_COHER_DEST_BASE_0 0x00002006
521
522#define REG_A2XX_PA_SC_SCREEN_SCISSOR_TL 0x0000200e
523#define A2XX_PA_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
524#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
525#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
526static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
527{
528 return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK;
529}
530#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
531#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
532static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
533{
534 return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK;
535}
536
537#define REG_A2XX_PA_SC_SCREEN_SCISSOR_BR 0x0000200f
538#define A2XX_PA_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
539#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
540#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
541static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
542{
543 return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK;
544}
545#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
546#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
547static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
548{
549 return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK;
550}
551
552#define REG_A2XX_PA_SC_WINDOW_OFFSET 0x00002080
553#define A2XX_PA_SC_WINDOW_OFFSET_X__MASK 0x00007fff
554#define A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
555static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_X(int32_t val)
556{
557 return ((val) << A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_X__MASK;
558}
559#define A2XX_PA_SC_WINDOW_OFFSET_Y__MASK 0x7fff0000
560#define A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
561static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_Y(int32_t val)
562{
563 return ((val) << A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_Y__MASK;
564}
565#define A2XX_PA_SC_WINDOW_OFFSET_DISABLE 0x80000000
566
567#define REG_A2XX_PA_SC_WINDOW_SCISSOR_TL 0x00002081
568#define A2XX_PA_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
569#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
570#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
571static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
572{
573 return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK;
574}
575#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
576#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
577static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
578{
579 return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK;
580}
581
582#define REG_A2XX_PA_SC_WINDOW_SCISSOR_BR 0x00002082
583#define A2XX_PA_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
584#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
585#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
586static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
587{
588 return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK;
589}
590#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
591#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
592static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
593{
594 return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK;
595}
596
597#define REG_A2XX_UNKNOWN_2010 0x00002010
598
599#define REG_A2XX_VGT_MAX_VTX_INDX 0x00002100
600
601#define REG_A2XX_VGT_MIN_VTX_INDX 0x00002101
602
603#define REG_A2XX_VGT_INDX_OFFSET 0x00002102
604
605#define REG_A2XX_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x00002103
606
607#define REG_A2XX_RB_COLOR_MASK 0x00002104
608#define A2XX_RB_COLOR_MASK_WRITE_RED 0x00000001
609#define A2XX_RB_COLOR_MASK_WRITE_GREEN 0x00000002
610#define A2XX_RB_COLOR_MASK_WRITE_BLUE 0x00000004
611#define A2XX_RB_COLOR_MASK_WRITE_ALPHA 0x00000008
612
613#define REG_A2XX_RB_BLEND_RED 0x00002105
614
615#define REG_A2XX_RB_BLEND_GREEN 0x00002106
616
617#define REG_A2XX_RB_BLEND_BLUE 0x00002107
618
619#define REG_A2XX_RB_BLEND_ALPHA 0x00002108
620
621#define REG_A2XX_RB_FOG_COLOR 0x00002109
622
623#define REG_A2XX_RB_STENCILREFMASK_BF 0x0000210c
624#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
625#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
626static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
627{
628 return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
629}
630#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
631#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
632static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
633{
634 return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
635}
636#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
637#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
638static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
639{
640 return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
641}
642
643#define REG_A2XX_RB_STENCILREFMASK 0x0000210d
644#define A2XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
645#define A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
646static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
647{
648 return ((val) << A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILREF__MASK;
649}
650#define A2XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
651#define A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
652static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
653{
654 return ((val) << A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILMASK__MASK;
655}
656#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
657#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
658static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
659{
660 return ((val) << A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
661}
662
663#define REG_A2XX_RB_ALPHA_REF 0x0000210e
664
665#define REG_A2XX_PA_CL_VPORT_XSCALE 0x0000210f
666#define A2XX_PA_CL_VPORT_XSCALE__MASK 0xffffffff
667#define A2XX_PA_CL_VPORT_XSCALE__SHIFT 0
668static inline uint32_t A2XX_PA_CL_VPORT_XSCALE(float val)
669{
670 return ((fui(val)) << A2XX_PA_CL_VPORT_XSCALE__SHIFT) & A2XX_PA_CL_VPORT_XSCALE__MASK;
671}
672
673#define REG_A2XX_PA_CL_VPORT_XOFFSET 0x00002110
674#define A2XX_PA_CL_VPORT_XOFFSET__MASK 0xffffffff
675#define A2XX_PA_CL_VPORT_XOFFSET__SHIFT 0
676static inline uint32_t A2XX_PA_CL_VPORT_XOFFSET(float val)
677{
678 return ((fui(val)) << A2XX_PA_CL_VPORT_XOFFSET__SHIFT) & A2XX_PA_CL_VPORT_XOFFSET__MASK;
679}
680
681#define REG_A2XX_PA_CL_VPORT_YSCALE 0x00002111
682#define A2XX_PA_CL_VPORT_YSCALE__MASK 0xffffffff
683#define A2XX_PA_CL_VPORT_YSCALE__SHIFT 0
684static inline uint32_t A2XX_PA_CL_VPORT_YSCALE(float val)
685{
686 return ((fui(val)) << A2XX_PA_CL_VPORT_YSCALE__SHIFT) & A2XX_PA_CL_VPORT_YSCALE__MASK;
687}
688
689#define REG_A2XX_PA_CL_VPORT_YOFFSET 0x00002112
690#define A2XX_PA_CL_VPORT_YOFFSET__MASK 0xffffffff
691#define A2XX_PA_CL_VPORT_YOFFSET__SHIFT 0
692static inline uint32_t A2XX_PA_CL_VPORT_YOFFSET(float val)
693{
694 return ((fui(val)) << A2XX_PA_CL_VPORT_YOFFSET__SHIFT) & A2XX_PA_CL_VPORT_YOFFSET__MASK;
695}
696
697#define REG_A2XX_PA_CL_VPORT_ZSCALE 0x00002113
698#define A2XX_PA_CL_VPORT_ZSCALE__MASK 0xffffffff
699#define A2XX_PA_CL_VPORT_ZSCALE__SHIFT 0
700static inline uint32_t A2XX_PA_CL_VPORT_ZSCALE(float val)
701{
702 return ((fui(val)) << A2XX_PA_CL_VPORT_ZSCALE__SHIFT) & A2XX_PA_CL_VPORT_ZSCALE__MASK;
703}
704
705#define REG_A2XX_PA_CL_VPORT_ZOFFSET 0x00002114
706#define A2XX_PA_CL_VPORT_ZOFFSET__MASK 0xffffffff
707#define A2XX_PA_CL_VPORT_ZOFFSET__SHIFT 0
708static inline uint32_t A2XX_PA_CL_VPORT_ZOFFSET(float val)
709{
710 return ((fui(val)) << A2XX_PA_CL_VPORT_ZOFFSET__SHIFT) & A2XX_PA_CL_VPORT_ZOFFSET__MASK;
711}
712
713#define REG_A2XX_SQ_PROGRAM_CNTL 0x00002180
714#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK 0x000000ff
715#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT 0
716static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_REGS(uint32_t val)
717{
718 return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK;
719}
720#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK 0x0000ff00
721#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT 8
722static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_REGS(uint32_t val)
723{
724 return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK;
725}
726#define A2XX_SQ_PROGRAM_CNTL_VS_RESOURCE 0x00010000
727#define A2XX_SQ_PROGRAM_CNTL_PS_RESOURCE 0x00020000
728#define A2XX_SQ_PROGRAM_CNTL_PARAM_GEN 0x00040000
729#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_PIX 0x00080000
730#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK 0x00f00000
731#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT 20
732static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT(uint32_t val)
733{
734 return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK;
735}
736#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK 0x07000000
737#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT 24
738static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE(enum a2xx_sq_ps_vtx_mode val)
739{
740 return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK;
741}
742#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK 0x78000000
743#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT 27
744static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE(uint32_t val)
745{
746 return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK;
747}
748#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_VTX 0x80000000
749
750#define REG_A2XX_SQ_CONTEXT_MISC 0x00002181
751#define A2XX_SQ_CONTEXT_MISC_INST_PRED_OPTIMIZE 0x00000001
752#define A2XX_SQ_CONTEXT_MISC_SC_OUTPUT_SCREEN_XY 0x00000002
753#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK 0x0000000c
754#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT 2
755static inline uint32_t A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL(enum a2xx_sq_sample_cntl val)
756{
757 return ((val) << A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT) & A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK;
758}
759#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK 0x0000ff00
760#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT 8
761static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
762{
763 return ((val) << A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT) & A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK;
764}
765#define A2XX_SQ_CONTEXT_MISC_PERFCOUNTER_REF 0x00010000
766#define A2XX_SQ_CONTEXT_MISC_YEILD_OPTIMIZE 0x00020000
767#define A2XX_SQ_CONTEXT_MISC_TX_CACHE_SEL 0x00040000
768
769#define REG_A2XX_SQ_INTERPOLATOR_CNTL 0x00002182
770
771#define REG_A2XX_SQ_WRAPPING_0 0x00002183
772
773#define REG_A2XX_SQ_WRAPPING_1 0x00002184
774
775#define REG_A2XX_SQ_PS_PROGRAM 0x000021f6
776
777#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7
778
779#define REG_A2XX_RB_DEPTHCONTROL 0x00002200
780#define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001
781#define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002
782#define A2XX_RB_DEPTHCONTROL_Z_WRITE_ENABLE 0x00000004
783#define A2XX_RB_DEPTHCONTROL_EARLY_Z_ENABLE 0x00000008
784#define A2XX_RB_DEPTHCONTROL_ZFUNC__MASK 0x00000070
785#define A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT 4
786static inline uint32_t A2XX_RB_DEPTHCONTROL_ZFUNC(enum adreno_compare_func val)
787{
788 return ((val) << A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_ZFUNC__MASK;
789}
790#define A2XX_RB_DEPTHCONTROL_BACKFACE_ENABLE 0x00000080
791#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK 0x00000700
792#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT 8
793static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC(enum adreno_compare_func val)
794{
795 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK;
796}
797#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK 0x00003800
798#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT 11
799static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL(enum adreno_stencil_op val)
800{
801 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK;
802}
803#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK 0x0001c000
804#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT 14
805static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS(enum adreno_stencil_op val)
806{
807 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK;
808}
809#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK 0x000e0000
810#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT 17
811static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL(enum adreno_stencil_op val)
812{
813 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK;
814}
815#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK 0x00700000
816#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT 20
817static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF(enum adreno_compare_func val)
818{
819 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK;
820}
821#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK 0x03800000
822#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT 23
823static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF(enum adreno_stencil_op val)
824{
825 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK;
826}
827#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK 0x1c000000
828#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT 26
829static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF(enum adreno_stencil_op val)
830{
831 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK;
832}
833#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK 0xe0000000
834#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT 29
835static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF(enum adreno_stencil_op val)
836{
837 return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK;
838}
839
840#define REG_A2XX_RB_BLEND_CONTROL 0x00002201
841#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK 0x0000001f
842#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT 0
843static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend_factor val)
844{
845 return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK;
846}
847#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0
848#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5
849static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum adreno_rb_blend_opcode val)
850{
851 return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK;
852}
853#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK 0x00001f00
854#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT 8
855static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND(enum adreno_rb_blend_factor val)
856{
857 return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK;
858}
859#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK 0x001f0000
860#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT 16
861static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend_factor val)
862{
863 return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK;
864}
865#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000
866#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21
867static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum adreno_rb_blend_opcode val)
868{
869 return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK;
870}
871#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK 0x1f000000
872#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT 24
873static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND(enum adreno_rb_blend_factor val)
874{
875 return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK;
876}
877#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE_ENABLE 0x20000000
878#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE 0x40000000
879
880#define REG_A2XX_RB_COLORCONTROL 0x00002202
881#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK 0x00000007
882#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT 0
883static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_FUNC(enum adreno_compare_func val)
884{
885 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK;
886}
887#define A2XX_RB_COLORCONTROL_ALPHA_TEST_ENABLE 0x00000008
888#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_ENABLE 0x00000010
889#define A2XX_RB_COLORCONTROL_BLEND_DISABLE 0x00000020
890#define A2XX_RB_COLORCONTROL_VOB_ENABLE 0x00000040
891#define A2XX_RB_COLORCONTROL_VS_EXPORTS_FOG 0x00000080
892#define A2XX_RB_COLORCONTROL_ROP_CODE__MASK 0x00000f00
893#define A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT 8
894static inline uint32_t A2XX_RB_COLORCONTROL_ROP_CODE(uint32_t val)
895{
896 return ((val) << A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT) & A2XX_RB_COLORCONTROL_ROP_CODE__MASK;
897}
898#define A2XX_RB_COLORCONTROL_DITHER_MODE__MASK 0x00003000
899#define A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT 12
900static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
901{
902 return ((val) << A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_MODE__MASK;
903}
904#define A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK 0x0000c000
905#define A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT 14
906static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_TYPE(enum a2xx_rb_dither_type val)
907{
908 return ((val) << A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK;
909}
910#define A2XX_RB_COLORCONTROL_PIXEL_FOG 0x00010000
911#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK 0x03000000
912#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT 24
913static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0(uint32_t val)
914{
915 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK;
916}
917#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK 0x0c000000
918#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT 26
919static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1(uint32_t val)
920{
921 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK;
922}
923#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK 0x30000000
924#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT 28
925static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2(uint32_t val)
926{
927 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK;
928}
929#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK 0xc0000000
930#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT 30
931static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3(uint32_t val)
932{
933 return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK;
934}
935
936#define REG_A2XX_VGT_CURRENT_BIN_ID_MAX 0x00002203
937#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK 0x00000007
938#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT 0
939static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN(uint32_t val)
940{
941 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK;
942}
943#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK 0x00000038
944#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT 3
945static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_ROW(uint32_t val)
946{
947 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK;
948}
949#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK 0x000001c0
950#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT 6
951static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK(uint32_t val)
952{
953 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK;
954}
955
956#define REG_A2XX_PA_CL_CLIP_CNTL 0x00002204
957#define A2XX_PA_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
958#define A2XX_PA_CL_CLIP_CNTL_BOUNDARY_EDGE_FLAG_ENA 0x00040000
959#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK 0x00080000
960#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT 19
961static inline uint32_t A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF(enum a2xx_dx_clip_space val)
962{
963 return ((val) << A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT) & A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK;
964}
965#define A2XX_PA_CL_CLIP_CNTL_DIS_CLIP_ERR_DETECT 0x00100000
966#define A2XX_PA_CL_CLIP_CNTL_VTX_KILL_OR 0x00200000
967#define A2XX_PA_CL_CLIP_CNTL_XY_NAN_RETAIN 0x00400000
968#define A2XX_PA_CL_CLIP_CNTL_Z_NAN_RETAIN 0x00800000
969#define A2XX_PA_CL_CLIP_CNTL_W_NAN_RETAIN 0x01000000
970
971#define REG_A2XX_PA_SU_SC_MODE_CNTL 0x00002205
972#define A2XX_PA_SU_SC_MODE_CNTL_CULL_FRONT 0x00000001
973#define A2XX_PA_SU_SC_MODE_CNTL_CULL_BACK 0x00000002
974#define A2XX_PA_SU_SC_MODE_CNTL_FACE 0x00000004
975#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK 0x00000018
976#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT 3
977static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_POLYMODE(enum a2xx_pa_su_sc_polymode val)
978{
979 return ((val) << A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK;
980}
981#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK 0x000000e0
982#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT 5
983static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
984{
985 return ((val) << A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK;
986}
987#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK 0x00000700
988#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT 8
989static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
990{
991 return ((val) << A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK;
992}
993#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_FRONT_ENABLE 0x00000800
994#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_BACK_ENABLE 0x00001000
995#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_PARA_ENABLE 0x00002000
996#define A2XX_PA_SU_SC_MODE_CNTL_MSAA_ENABLE 0x00008000
997#define A2XX_PA_SU_SC_MODE_CNTL_VTX_WINDOW_OFFSET_ENABLE 0x00010000
998#define A2XX_PA_SU_SC_MODE_CNTL_LINE_STIPPLE_ENABLE 0x00040000
999#define A2XX_PA_SU_SC_MODE_CNTL_PROVOKING_VTX_LAST 0x00080000
1000#define A2XX_PA_SU_SC_MODE_CNTL_PERSP_CORR_DIS 0x00100000
1001#define A2XX_PA_SU_SC_MODE_CNTL_MULTI_PRIM_IB_ENA 0x00200000
1002#define A2XX_PA_SU_SC_MODE_CNTL_QUAD_ORDER_ENABLE 0x00800000
1003#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_ALL_TRI 0x02000000
1004#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_FIRST_TRI_NEW_STATE 0x04000000
1005#define A2XX_PA_SU_SC_MODE_CNTL_CLAMPED_FACENESS 0x10000000
1006#define A2XX_PA_SU_SC_MODE_CNTL_ZERO_AREA_FACENESS 0x20000000
1007#define A2XX_PA_SU_SC_MODE_CNTL_FACE_KILL_ENABLE 0x40000000
1008#define A2XX_PA_SU_SC_MODE_CNTL_FACE_WRITE_ENABLE 0x80000000
1009
1010#define REG_A2XX_PA_CL_VTE_CNTL 0x00002206
1011#define A2XX_PA_CL_VTE_CNTL_VPORT_X_SCALE_ENA 0x00000001
1012#define A2XX_PA_CL_VTE_CNTL_VPORT_X_OFFSET_ENA 0x00000002
1013#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_SCALE_ENA 0x00000004
1014#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_OFFSET_ENA 0x00000008
1015#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_SCALE_ENA 0x00000010
1016#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_OFFSET_ENA 0x00000020
1017#define A2XX_PA_CL_VTE_CNTL_VTX_XY_FMT 0x00000100
1018#define A2XX_PA_CL_VTE_CNTL_VTX_Z_FMT 0x00000200
1019#define A2XX_PA_CL_VTE_CNTL_VTX_W0_FMT 0x00000400
1020#define A2XX_PA_CL_VTE_CNTL_PERFCOUNTER_REF 0x00000800
1021
1022#define REG_A2XX_VGT_CURRENT_BIN_ID_MIN 0x00002207
1023#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK 0x00000007
1024#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT 0
1025static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN(uint32_t val)
1026{
1027 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK;
1028}
1029#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK 0x00000038
1030#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT 3
1031static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_ROW(uint32_t val)
1032{
1033 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK;
1034}
1035#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK 0x000001c0
1036#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT 6
1037static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK(uint32_t val)
1038{
1039 return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK;
1040}
1041
1042#define REG_A2XX_RB_MODECONTROL 0x00002208
1043#define A2XX_RB_MODECONTROL_EDRAM_MODE__MASK 0x00000007
1044#define A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT 0
1045static inline uint32_t A2XX_RB_MODECONTROL_EDRAM_MODE(enum a2xx_rb_edram_mode val)
1046{
1047 return ((val) << A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT) & A2XX_RB_MODECONTROL_EDRAM_MODE__MASK;
1048}
1049
1050#define REG_A2XX_A220_RB_LRZ_VSC_CONTROL 0x00002209
1051
1052#define REG_A2XX_RB_SAMPLE_POS 0x0000220a
1053
1054#define REG_A2XX_CLEAR_COLOR 0x0000220b
1055#define A2XX_CLEAR_COLOR_RED__MASK 0x000000ff
1056#define A2XX_CLEAR_COLOR_RED__SHIFT 0
1057static inline uint32_t A2XX_CLEAR_COLOR_RED(uint32_t val)
1058{
1059 return ((val) << A2XX_CLEAR_COLOR_RED__SHIFT) & A2XX_CLEAR_COLOR_RED__MASK;
1060}
1061#define A2XX_CLEAR_COLOR_GREEN__MASK 0x0000ff00
1062#define A2XX_CLEAR_COLOR_GREEN__SHIFT 8
1063static inline uint32_t A2XX_CLEAR_COLOR_GREEN(uint32_t val)
1064{
1065 return ((val) << A2XX_CLEAR_COLOR_GREEN__SHIFT) & A2XX_CLEAR_COLOR_GREEN__MASK;
1066}
1067#define A2XX_CLEAR_COLOR_BLUE__MASK 0x00ff0000
1068#define A2XX_CLEAR_COLOR_BLUE__SHIFT 16
1069static inline uint32_t A2XX_CLEAR_COLOR_BLUE(uint32_t val)
1070{
1071 return ((val) << A2XX_CLEAR_COLOR_BLUE__SHIFT) & A2XX_CLEAR_COLOR_BLUE__MASK;
1072}
1073#define A2XX_CLEAR_COLOR_ALPHA__MASK 0xff000000
1074#define A2XX_CLEAR_COLOR_ALPHA__SHIFT 24
1075static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val)
1076{
1077 return ((val) << A2XX_CLEAR_COLOR_ALPHA__SHIFT) & A2XX_CLEAR_COLOR_ALPHA__MASK;
1078}
1079
1080#define REG_A2XX_A220_GRAS_CONTROL 0x00002210
1081
1082#define REG_A2XX_PA_SU_POINT_SIZE 0x00002280
1083#define A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK 0x0000ffff
1084#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0
1085static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val)
1086{
1087 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
1088}
1089#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000
1090#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16
1091static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
1092{
1093 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
1094}
1095
1096#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281
1097#define A2XX_PA_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
1098#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0
1099static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val)
1100{
1101 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
1102}
1103#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000
1104#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16
1105static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
1106{
1107 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
1108}
1109
1110#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282
1111#define A2XX_PA_SU_LINE_CNTL_WIDTH__MASK 0x0000ffff
1112#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0
1113static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val)
1114{
1115 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
1116}
1117
1118#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283
1119#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK 0x0000ffff
1120#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT 0
1121static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN(uint32_t val)
1122{
1123 return ((val) << A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK;
1124}
1125#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK 0x00ff0000
1126#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT 16
1127static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT(uint32_t val)
1128{
1129 return ((val) << A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK;
1130}
1131#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK 0x10000000
1132#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT 28
1133static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER(enum a2xx_pa_sc_pattern_bit_order val)
1134{
1135 return ((val) << A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK;
1136}
1137#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK 0x60000000
1138#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT 29
1139static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL(enum a2xx_pa_sc_auto_reset_cntl val)
1140{
1141 return ((val) << A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK;
1142}
1143
1144#define REG_A2XX_PA_SC_VIZ_QUERY 0x00002293
1145
1146#define REG_A2XX_VGT_ENHANCE 0x00002294
1147
1148#define REG_A2XX_PA_SC_LINE_CNTL 0x00002300
1149#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK 0x0000ffff
1150#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT 0
1151static inline uint32_t A2XX_PA_SC_LINE_CNTL_BRES_CNTL(uint32_t val)
1152{
1153 return ((val) << A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT) & A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK;
1154}
1155#define A2XX_PA_SC_LINE_CNTL_USE_BRES_CNTL 0x00000100
1156#define A2XX_PA_SC_LINE_CNTL_EXPAND_LINE_WIDTH 0x00000200
1157#define A2XX_PA_SC_LINE_CNTL_LAST_PIXEL 0x00000400
1158
1159#define REG_A2XX_PA_SC_AA_CONFIG 0x00002301
1160
1161#define REG_A2XX_PA_SU_VTX_CNTL 0x00002302
1162#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK 0x00000001
1163#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT 0
1164static inline uint32_t A2XX_PA_SU_VTX_CNTL_PIX_CENTER(enum a2xx_pa_pixcenter val)
1165{
1166 return ((val) << A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT) & A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK;
1167}
1168#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK 0x00000006
1169#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT 1
1170static inline uint32_t A2XX_PA_SU_VTX_CNTL_ROUND_MODE(enum a2xx_pa_roundmode val)
1171{
1172 return ((val) << A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK;
1173}
1174#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK 0x00000380
1175#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT 7
1176static inline uint32_t A2XX_PA_SU_VTX_CNTL_QUANT_MODE(enum a2xx_pa_quantmode val)
1177{
1178 return ((val) << A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK;
1179}
1180
1181#define REG_A2XX_PA_CL_GB_VERT_CLIP_ADJ 0x00002303
1182#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK 0xffffffff
1183#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT 0
1184static inline uint32_t A2XX_PA_CL_GB_VERT_CLIP_ADJ(float val)
1185{
1186 return ((fui(val)) << A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK;
1187}
1188
1189#define REG_A2XX_PA_CL_GB_VERT_DISC_ADJ 0x00002304
1190#define A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK 0xffffffff
1191#define A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT 0
1192static inline uint32_t A2XX_PA_CL_GB_VERT_DISC_ADJ(float val)
1193{
1194 return ((fui(val)) << A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK;
1195}
1196
1197#define REG_A2XX_PA_CL_GB_HORZ_CLIP_ADJ 0x00002305
1198#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK 0xffffffff
1199#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT 0
1200static inline uint32_t A2XX_PA_CL_GB_HORZ_CLIP_ADJ(float val)
1201{
1202 return ((fui(val)) << A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK;
1203}
1204
1205#define REG_A2XX_PA_CL_GB_HORZ_DISC_ADJ 0x00002306
1206#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK 0xffffffff
1207#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT 0
1208static inline uint32_t A2XX_PA_CL_GB_HORZ_DISC_ADJ(float val)
1209{
1210 return ((fui(val)) << A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK;
1211}
1212
1213#define REG_A2XX_SQ_VS_CONST 0x00002307
1214#define A2XX_SQ_VS_CONST_BASE__MASK 0x000001ff
1215#define A2XX_SQ_VS_CONST_BASE__SHIFT 0
1216static inline uint32_t A2XX_SQ_VS_CONST_BASE(uint32_t val)
1217{
1218 return ((val) << A2XX_SQ_VS_CONST_BASE__SHIFT) & A2XX_SQ_VS_CONST_BASE__MASK;
1219}
1220#define A2XX_SQ_VS_CONST_SIZE__MASK 0x001ff000
1221#define A2XX_SQ_VS_CONST_SIZE__SHIFT 12
1222static inline uint32_t A2XX_SQ_VS_CONST_SIZE(uint32_t val)
1223{
1224 return ((val) << A2XX_SQ_VS_CONST_SIZE__SHIFT) & A2XX_SQ_VS_CONST_SIZE__MASK;
1225}
1226
1227#define REG_A2XX_SQ_PS_CONST 0x00002308
1228#define A2XX_SQ_PS_CONST_BASE__MASK 0x000001ff
1229#define A2XX_SQ_PS_CONST_BASE__SHIFT 0
1230static inline uint32_t A2XX_SQ_PS_CONST_BASE(uint32_t val)
1231{
1232 return ((val) << A2XX_SQ_PS_CONST_BASE__SHIFT) & A2XX_SQ_PS_CONST_BASE__MASK;
1233}
1234#define A2XX_SQ_PS_CONST_SIZE__MASK 0x001ff000
1235#define A2XX_SQ_PS_CONST_SIZE__SHIFT 12
1236static inline uint32_t A2XX_SQ_PS_CONST_SIZE(uint32_t val)
1237{
1238 return ((val) << A2XX_SQ_PS_CONST_SIZE__SHIFT) & A2XX_SQ_PS_CONST_SIZE__MASK;
1239}
1240
1241#define REG_A2XX_SQ_DEBUG_MISC_0 0x00002309
1242
1243#define REG_A2XX_SQ_DEBUG_MISC_1 0x0000230a
1244
1245#define REG_A2XX_PA_SC_AA_MASK 0x00002312
1246
1247#define REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL 0x00002316
1248
1249#define REG_A2XX_VGT_OUT_DEALLOC_CNTL 0x00002317
1250
1251#define REG_A2XX_RB_COPY_CONTROL 0x00002318
1252#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK 0x00000007
1253#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT 0
1254static inline uint32_t A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT(enum a2xx_rb_copy_sample_select val)
1255{
1256 return ((val) << A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT) & A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK;
1257}
1258#define A2XX_RB_COPY_CONTROL_DEPTH_CLEAR_ENABLE 0x00000008
1259#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK 0x000000f0
1260#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT 4
1261static inline uint32_t A2XX_RB_COPY_CONTROL_CLEAR_MASK(uint32_t val)
1262{
1263 return ((val) << A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT) & A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK;
1264}
1265
1266#define REG_A2XX_RB_COPY_DEST_BASE 0x00002319
1267
1268#define REG_A2XX_RB_COPY_DEST_PITCH 0x0000231a
1269#define A2XX_RB_COPY_DEST_PITCH__MASK 0xffffffff
1270#define A2XX_RB_COPY_DEST_PITCH__SHIFT 0
1271static inline uint32_t A2XX_RB_COPY_DEST_PITCH(uint32_t val)
1272{
1273 return ((val >> 5) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK;
1274}
1275
1276#define REG_A2XX_RB_COPY_DEST_INFO 0x0000231b
1277#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK 0x00000007
1278#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT 0
1279static inline uint32_t A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN(enum adreno_rb_surface_endian val)
1280{
1281 return ((val) << A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT) & A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK;
1282}
1283#define A2XX_RB_COPY_DEST_INFO_LINEAR 0x00000008
1284#define A2XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000f0
1285#define A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 4
1286static inline uint32_t A2XX_RB_COPY_DEST_INFO_FORMAT(enum a2xx_colorformatx val)
1287{
1288 return ((val) << A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A2XX_RB_COPY_DEST_INFO_FORMAT__MASK;
1289}
1290#define A2XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
1291#define A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
1292static inline uint32_t A2XX_RB_COPY_DEST_INFO_SWAP(uint32_t val)
1293{
1294 return ((val) << A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A2XX_RB_COPY_DEST_INFO_SWAP__MASK;
1295}
1296#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
1297#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
1298static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
1299{
1300 return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
1301}
1302#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK 0x00003000
1303#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT 12
1304static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_TYPE(enum a2xx_rb_dither_type val)
1305{
1306 return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK;
1307}
1308#define A2XX_RB_COPY_DEST_INFO_WRITE_RED 0x00004000
1309#define A2XX_RB_COPY_DEST_INFO_WRITE_GREEN 0x00008000
1310#define A2XX_RB_COPY_DEST_INFO_WRITE_BLUE 0x00010000
1311#define A2XX_RB_COPY_DEST_INFO_WRITE_ALPHA 0x00020000
1312
1313#define REG_A2XX_RB_COPY_DEST_OFFSET 0x0000231c
1314#define A2XX_RB_COPY_DEST_OFFSET_X__MASK 0x00001fff
1315#define A2XX_RB_COPY_DEST_OFFSET_X__SHIFT 0
1316static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_X(uint32_t val)
1317{
1318 return ((val) << A2XX_RB_COPY_DEST_OFFSET_X__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_X__MASK;
1319}
1320#define A2XX_RB_COPY_DEST_OFFSET_Y__MASK 0x03ffe000
1321#define A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT 13
1322static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
1323{
1324 return ((val) << A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_Y__MASK;
1325}
1326
1327#define REG_A2XX_RB_DEPTH_CLEAR 0x0000231d
1328
1329#define REG_A2XX_RB_SAMPLE_COUNT_CTL 0x00002324
1330
1331#define REG_A2XX_RB_COLOR_DEST_MASK 0x00002326
1332
1333#define REG_A2XX_A225_GRAS_UCP0X 0x00002340
1334
1335#define REG_A2XX_A225_GRAS_UCP5W 0x00002357
1336
1337#define REG_A2XX_A225_GRAS_UCP_ENABLED 0x00002360
1338
1339#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE 0x00002380
1340
1341#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_OFFSET 0x00002383
1342
1343#define REG_A2XX_SQ_CONSTANT_0 0x00004000
1344
1345#define REG_A2XX_SQ_FETCH_0 0x00004800
1346
1347#define REG_A2XX_SQ_CF_BOOLEANS 0x00004900
1348
1349#define REG_A2XX_SQ_CF_LOOP 0x00004908
1350
1351#define REG_A2XX_COHER_SIZE_PM4 0x00000a29
1352
1353#define REG_A2XX_COHER_BASE_PM4 0x00000a2a
1354
1355#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b
1356
1357#define REG_A2XX_SQ_TEX_0 0x00000000
1358#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00
1359#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10
1360static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
1361{
1362 return ((val) << A2XX_SQ_TEX_0_CLAMP_X__SHIFT) & A2XX_SQ_TEX_0_CLAMP_X__MASK;
1363}
1364#define A2XX_SQ_TEX_0_CLAMP_Y__MASK 0x0000e000
1365#define A2XX_SQ_TEX_0_CLAMP_Y__SHIFT 13
1366static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Y(enum sq_tex_clamp val)
1367{
1368 return ((val) << A2XX_SQ_TEX_0_CLAMP_Y__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Y__MASK;
1369}
1370#define A2XX_SQ_TEX_0_CLAMP_Z__MASK 0x00070000
1371#define A2XX_SQ_TEX_0_CLAMP_Z__SHIFT 16
1372static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
1373{
1374 return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
1375}
1376#define A2XX_SQ_TEX_0_PITCH__MASK 0xffc00000
1377#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
1378static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
1379{
1380 return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
1381}
1382
1383#define REG_A2XX_SQ_TEX_1 0x00000001
1384
1385#define REG_A2XX_SQ_TEX_2 0x00000002
1386#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff
1387#define A2XX_SQ_TEX_2_WIDTH__SHIFT 0
1388static inline uint32_t A2XX_SQ_TEX_2_WIDTH(uint32_t val)
1389{
1390 return ((val) << A2XX_SQ_TEX_2_WIDTH__SHIFT) & A2XX_SQ_TEX_2_WIDTH__MASK;
1391}
1392#define A2XX_SQ_TEX_2_HEIGHT__MASK 0x03ffe000
1393#define A2XX_SQ_TEX_2_HEIGHT__SHIFT 13
1394static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val)
1395{
1396 return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
1397}
1398
1399#define REG_A2XX_SQ_TEX_3 0x00000003
1400#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e
1401#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1
1402static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
1403{
1404 return ((val) << A2XX_SQ_TEX_3_SWIZ_X__SHIFT) & A2XX_SQ_TEX_3_SWIZ_X__MASK;
1405}
1406#define A2XX_SQ_TEX_3_SWIZ_Y__MASK 0x00000070
1407#define A2XX_SQ_TEX_3_SWIZ_Y__SHIFT 4
1408static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Y(enum sq_tex_swiz val)
1409{
1410 return ((val) << A2XX_SQ_TEX_3_SWIZ_Y__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Y__MASK;
1411}
1412#define A2XX_SQ_TEX_3_SWIZ_Z__MASK 0x00000380
1413#define A2XX_SQ_TEX_3_SWIZ_Z__SHIFT 7
1414static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Z(enum sq_tex_swiz val)
1415{
1416 return ((val) << A2XX_SQ_TEX_3_SWIZ_Z__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Z__MASK;
1417}
1418#define A2XX_SQ_TEX_3_SWIZ_W__MASK 0x00001c00
1419#define A2XX_SQ_TEX_3_SWIZ_W__SHIFT 10
1420static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
1421{
1422 return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
1423}
1424#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000
1425#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19
1426static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
1427{
1428 return ((val) << A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK;
1429}
1430#define A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK 0x00600000
1431#define A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT 21
1432static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val)
1433{
1434 return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
1435}
1436
1437
1438#endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
new file mode 100644
index 000000000000..d183516067b4
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -0,0 +1,2193 @@
1#ifndef A3XX_XML
2#define A3XX_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
17
18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark)
20
21Permission is hereby granted, free of charge, to any person obtaining
22a copy of this software and associated documentation files (the
23"Software"), to deal in the Software without restriction, including
24without limitation the rights to use, copy, modify, merge, publish,
25distribute, sublicense, and/or sell copies of the Software, and to
26permit persons to whom the Software is furnished to do so, subject to
27the following conditions:
28
29The above copyright notice and this permission notice (including the
30next paragraph) shall be included in all copies or substantial
31portions of the Software.
32
33THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
37LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
38OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/
41
42
43enum a3xx_render_mode {
44 RB_RENDERING_PASS = 0,
45 RB_TILING_PASS = 1,
46 RB_RESOLVE_PASS = 2,
47};
48
49enum a3xx_tile_mode {
50 LINEAR = 0,
51 TILE_32X32 = 2,
52};
53
54enum a3xx_threadmode {
55 MULTI = 0,
56 SINGLE = 1,
57};
58
59enum a3xx_instrbuffermode {
60 BUFFER = 1,
61};
62
63enum a3xx_threadsize {
64 TWO_QUADS = 0,
65 FOUR_QUADS = 1,
66};
67
68enum a3xx_state_block_id {
69 HLSQ_BLOCK_ID_TP_TEX = 2,
70 HLSQ_BLOCK_ID_TP_MIPMAP = 3,
71 HLSQ_BLOCK_ID_SP_VS = 4,
72 HLSQ_BLOCK_ID_SP_FS = 6,
73};
74
75enum a3xx_cache_opcode {
76 INVALIDATE = 1,
77};
78
79enum a3xx_vtx_fmt {
80 VFMT_FLOAT_32 = 0,
81 VFMT_FLOAT_32_32 = 1,
82 VFMT_FLOAT_32_32_32 = 2,
83 VFMT_FLOAT_32_32_32_32 = 3,
84 VFMT_FLOAT_16 = 4,
85 VFMT_FLOAT_16_16 = 5,
86 VFMT_FLOAT_16_16_16 = 6,
87 VFMT_FLOAT_16_16_16_16 = 7,
88 VFMT_FIXED_32 = 8,
89 VFMT_FIXED_32_32 = 9,
90 VFMT_FIXED_32_32_32 = 10,
91 VFMT_FIXED_32_32_32_32 = 11,
92 VFMT_SHORT_16 = 16,
93 VFMT_SHORT_16_16 = 17,
94 VFMT_SHORT_16_16_16 = 18,
95 VFMT_SHORT_16_16_16_16 = 19,
96 VFMT_USHORT_16 = 20,
97 VFMT_USHORT_16_16 = 21,
98 VFMT_USHORT_16_16_16 = 22,
99 VFMT_USHORT_16_16_16_16 = 23,
100 VFMT_NORM_SHORT_16 = 24,
101 VFMT_NORM_SHORT_16_16 = 25,
102 VFMT_NORM_SHORT_16_16_16 = 26,
103 VFMT_NORM_SHORT_16_16_16_16 = 27,
104 VFMT_NORM_USHORT_16 = 28,
105 VFMT_NORM_USHORT_16_16 = 29,
106 VFMT_NORM_USHORT_16_16_16 = 30,
107 VFMT_NORM_USHORT_16_16_16_16 = 31,
108 VFMT_UBYTE_8 = 40,
109 VFMT_UBYTE_8_8 = 41,
110 VFMT_UBYTE_8_8_8 = 42,
111 VFMT_UBYTE_8_8_8_8 = 43,
112 VFMT_NORM_UBYTE_8 = 44,
113 VFMT_NORM_UBYTE_8_8 = 45,
114 VFMT_NORM_UBYTE_8_8_8 = 46,
115 VFMT_NORM_UBYTE_8_8_8_8 = 47,
116 VFMT_BYTE_8 = 48,
117 VFMT_BYTE_8_8 = 49,
118 VFMT_BYTE_8_8_8 = 50,
119 VFMT_BYTE_8_8_8_8 = 51,
120 VFMT_NORM_BYTE_8 = 52,
121 VFMT_NORM_BYTE_8_8 = 53,
122 VFMT_NORM_BYTE_8_8_8 = 54,
123 VFMT_NORM_BYTE_8_8_8_8 = 55,
124 VFMT_UINT_10_10_10_2 = 60,
125 VFMT_NORM_UINT_10_10_10_2 = 61,
126 VFMT_INT_10_10_10_2 = 62,
127 VFMT_NORM_INT_10_10_10_2 = 63,
128};
129
130enum a3xx_tex_fmt {
131 TFMT_NORM_USHORT_565 = 4,
132 TFMT_NORM_USHORT_5551 = 6,
133 TFMT_NORM_USHORT_4444 = 7,
134 TFMT_NORM_UINT_X8Z24 = 10,
135 TFMT_NORM_UINT_NV12_UV_TILED = 17,
136 TFMT_NORM_UINT_NV12_Y_TILED = 19,
137 TFMT_NORM_UINT_NV12_UV = 21,
138 TFMT_NORM_UINT_NV12_Y = 23,
139 TFMT_NORM_UINT_I420_Y = 24,
140 TFMT_NORM_UINT_I420_U = 26,
141 TFMT_NORM_UINT_I420_V = 27,
142 TFMT_NORM_UINT_2_10_10_10 = 41,
143 TFMT_NORM_UINT_A8 = 44,
144 TFMT_NORM_UINT_L8_A8 = 47,
145 TFMT_NORM_UINT_8 = 48,
146 TFMT_NORM_UINT_8_8 = 49,
147 TFMT_NORM_UINT_8_8_8 = 50,
148 TFMT_NORM_UINT_8_8_8_8 = 51,
149 TFMT_FLOAT_16 = 64,
150 TFMT_FLOAT_16_16 = 65,
151 TFMT_FLOAT_16_16_16_16 = 67,
152 TFMT_FLOAT_32 = 84,
153 TFMT_FLOAT_32_32 = 85,
154 TFMT_FLOAT_32_32_32_32 = 87,
155};
156
157enum a3xx_tex_fetchsize {
158 TFETCH_DISABLE = 0,
159 TFETCH_1_BYTE = 1,
160 TFETCH_2_BYTE = 2,
161 TFETCH_4_BYTE = 3,
162 TFETCH_8_BYTE = 4,
163 TFETCH_16_BYTE = 5,
164};
165
166enum a3xx_color_fmt {
167 RB_R8G8B8_UNORM = 4,
168 RB_R8G8B8A8_UNORM = 8,
169 RB_Z16_UNORM = 12,
170 RB_A8_UNORM = 20,
171};
172
173enum a3xx_color_swap {
174 WZYX = 0,
175 WXYZ = 1,
176 ZYXW = 2,
177 XYZW = 3,
178};
179
180enum a3xx_msaa_samples {
181 MSAA_ONE = 0,
182 MSAA_TWO = 1,
183 MSAA_FOUR = 2,
184};
185
186enum a3xx_sp_perfcounter_select {
187 SP_FS_CFLOW_INSTRUCTIONS = 12,
188 SP_FS_FULL_ALU_INSTRUCTIONS = 14,
189 SP0_ICL1_MISSES = 26,
190 SP_ALU_ACTIVE_CYCLES = 29,
191};
192
193enum adreno_rb_copy_control_mode {
194 RB_COPY_RESOLVE = 1,
195 RB_COPY_DEPTH_STENCIL = 5,
196};
197
198enum a3xx_tex_filter {
199 A3XX_TEX_NEAREST = 0,
200 A3XX_TEX_LINEAR = 1,
201};
202
203enum a3xx_tex_clamp {
204 A3XX_TEX_REPEAT = 0,
205 A3XX_TEX_CLAMP_TO_EDGE = 1,
206 A3XX_TEX_MIRROR_REPEAT = 2,
207 A3XX_TEX_CLAMP_NONE = 3,
208};
209
210enum a3xx_tex_swiz {
211 A3XX_TEX_X = 0,
212 A3XX_TEX_Y = 1,
213 A3XX_TEX_Z = 2,
214 A3XX_TEX_W = 3,
215 A3XX_TEX_ZERO = 4,
216 A3XX_TEX_ONE = 5,
217};
218
219enum a3xx_tex_type {
220 A3XX_TEX_1D = 0,
221 A3XX_TEX_2D = 1,
222 A3XX_TEX_CUBE = 2,
223 A3XX_TEX_3D = 3,
224};
225
226#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001
227#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002
228#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004
229#define A3XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
230#define A3XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
231#define A3XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
232#define A3XX_INT0_VFD_ERROR 0x00000040
233#define A3XX_INT0_CP_SW_INT 0x00000080
234#define A3XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
235#define A3XX_INT0_CP_OPCODE_ERROR 0x00000200
236#define A3XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
237#define A3XX_INT0_CP_HW_FAULT 0x00000800
238#define A3XX_INT0_CP_DMA 0x00001000
239#define A3XX_INT0_CP_IB2_INT 0x00002000
240#define A3XX_INT0_CP_IB1_INT 0x00004000
241#define A3XX_INT0_CP_RB_INT 0x00008000
242#define A3XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
243#define A3XX_INT0_CP_RB_DONE_TS 0x00020000
244#define A3XX_INT0_CP_VS_DONE_TS 0x00040000
245#define A3XX_INT0_CP_PS_DONE_TS 0x00080000
246#define A3XX_INT0_CACHE_FLUSH_TS 0x00100000
247#define A3XX_INT0_CP_AHB_ERROR_HALT 0x00200000
248#define A3XX_INT0_MISC_HANG_DETECT 0x01000000
249#define A3XX_INT0_UCHE_OOB_ACCESS 0x02000000
250#define REG_A3XX_RBBM_HW_VERSION 0x00000000
251
252#define REG_A3XX_RBBM_HW_RELEASE 0x00000001
253
254#define REG_A3XX_RBBM_HW_CONFIGURATION 0x00000002
255
256#define REG_A3XX_RBBM_CLOCK_CTL 0x00000010
257
258#define REG_A3XX_RBBM_SP_HYST_CNT 0x00000012
259
260#define REG_A3XX_RBBM_SW_RESET_CMD 0x00000018
261
262#define REG_A3XX_RBBM_AHB_CTL0 0x00000020
263
264#define REG_A3XX_RBBM_AHB_CTL1 0x00000021
265
266#define REG_A3XX_RBBM_AHB_CMD 0x00000022
267
268#define REG_A3XX_RBBM_AHB_ERROR_STATUS 0x00000027
269
270#define REG_A3XX_RBBM_GPR0_CTL 0x0000002e
271
272#define REG_A3XX_RBBM_STATUS 0x00000030
273#define A3XX_RBBM_STATUS_HI_BUSY 0x00000001
274#define A3XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
275#define A3XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
276#define A3XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
277#define A3XX_RBBM_STATUS_VBIF_BUSY 0x00008000
278#define A3XX_RBBM_STATUS_TSE_BUSY 0x00010000
279#define A3XX_RBBM_STATUS_RAS_BUSY 0x00020000
280#define A3XX_RBBM_STATUS_RB_BUSY 0x00040000
281#define A3XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
282#define A3XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
283#define A3XX_RBBM_STATUS_VFD_BUSY 0x00200000
284#define A3XX_RBBM_STATUS_VPC_BUSY 0x00400000
285#define A3XX_RBBM_STATUS_UCHE_BUSY 0x00800000
286#define A3XX_RBBM_STATUS_SP_BUSY 0x01000000
287#define A3XX_RBBM_STATUS_TPL1_BUSY 0x02000000
288#define A3XX_RBBM_STATUS_MARB_BUSY 0x04000000
289#define A3XX_RBBM_STATUS_VSC_BUSY 0x08000000
290#define A3XX_RBBM_STATUS_ARB_BUSY 0x10000000
291#define A3XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
292#define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
293#define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000
294
295#define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033
296
297#define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050
298
299#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x00000051
300
301#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x00000054
302
303#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x00000057
304
305#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a
306
307#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061
308
309#define REG_A3XX_RBBM_INT_0_MASK 0x00000063
310
311#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064
312
313#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080
314
315#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081
316
317#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD1 0x00000082
318
319#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000084
320
321#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000085
322
323#define REG_A3XX_RBBM_PERFCOUNTER0_SELECT 0x00000086
324
325#define REG_A3XX_RBBM_PERFCOUNTER1_SELECT 0x00000087
326
327#define REG_A3XX_RBBM_GPU_BUSY_MASKED 0x00000088
328
329#define REG_A3XX_RBBM_PERFCTR_CP_0_LO 0x00000090
330
331#define REG_A3XX_RBBM_PERFCTR_CP_0_HI 0x00000091
332
333#define REG_A3XX_RBBM_PERFCTR_RBBM_0_LO 0x00000092
334
335#define REG_A3XX_RBBM_PERFCTR_RBBM_0_HI 0x00000093
336
337#define REG_A3XX_RBBM_PERFCTR_RBBM_1_LO 0x00000094
338
339#define REG_A3XX_RBBM_PERFCTR_RBBM_1_HI 0x00000095
340
341#define REG_A3XX_RBBM_PERFCTR_PC_0_LO 0x00000096
342
343#define REG_A3XX_RBBM_PERFCTR_PC_0_HI 0x00000097
344
345#define REG_A3XX_RBBM_PERFCTR_PC_1_LO 0x00000098
346
347#define REG_A3XX_RBBM_PERFCTR_PC_1_HI 0x00000099
348
349#define REG_A3XX_RBBM_PERFCTR_PC_2_LO 0x0000009a
350
351#define REG_A3XX_RBBM_PERFCTR_PC_2_HI 0x0000009b
352
353#define REG_A3XX_RBBM_PERFCTR_PC_3_LO 0x0000009c
354
355#define REG_A3XX_RBBM_PERFCTR_PC_3_HI 0x0000009d
356
357#define REG_A3XX_RBBM_PERFCTR_VFD_0_LO 0x0000009e
358
359#define REG_A3XX_RBBM_PERFCTR_VFD_0_HI 0x0000009f
360
361#define REG_A3XX_RBBM_PERFCTR_VFD_1_LO 0x000000a0
362
363#define REG_A3XX_RBBM_PERFCTR_VFD_1_HI 0x000000a1
364
365#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000a2
366
367#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000a3
368
369#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000a4
370
371#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000a5
372
373#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000a6
374
375#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000a7
376
377#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000a8
378
379#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000a9
380
381#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000aa
382
383#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000ab
384
385#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000ac
386
387#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000ad
388
389#define REG_A3XX_RBBM_PERFCTR_VPC_0_LO 0x000000ae
390
391#define REG_A3XX_RBBM_PERFCTR_VPC_0_HI 0x000000af
392
393#define REG_A3XX_RBBM_PERFCTR_VPC_1_LO 0x000000b0
394
395#define REG_A3XX_RBBM_PERFCTR_VPC_1_HI 0x000000b1
396
397#define REG_A3XX_RBBM_PERFCTR_TSE_0_LO 0x000000b2
398
399#define REG_A3XX_RBBM_PERFCTR_TSE_0_HI 0x000000b3
400
401#define REG_A3XX_RBBM_PERFCTR_TSE_1_LO 0x000000b4
402
403#define REG_A3XX_RBBM_PERFCTR_TSE_1_HI 0x000000b5
404
405#define REG_A3XX_RBBM_PERFCTR_RAS_0_LO 0x000000b6
406
407#define REG_A3XX_RBBM_PERFCTR_RAS_0_HI 0x000000b7
408
409#define REG_A3XX_RBBM_PERFCTR_RAS_1_LO 0x000000b8
410
411#define REG_A3XX_RBBM_PERFCTR_RAS_1_HI 0x000000b9
412
413#define REG_A3XX_RBBM_PERFCTR_UCHE_0_LO 0x000000ba
414
415#define REG_A3XX_RBBM_PERFCTR_UCHE_0_HI 0x000000bb
416
417#define REG_A3XX_RBBM_PERFCTR_UCHE_1_LO 0x000000bc
418
419#define REG_A3XX_RBBM_PERFCTR_UCHE_1_HI 0x000000bd
420
421#define REG_A3XX_RBBM_PERFCTR_UCHE_2_LO 0x000000be
422
423#define REG_A3XX_RBBM_PERFCTR_UCHE_2_HI 0x000000bf
424
425#define REG_A3XX_RBBM_PERFCTR_UCHE_3_LO 0x000000c0
426
427#define REG_A3XX_RBBM_PERFCTR_UCHE_3_HI 0x000000c1
428
429#define REG_A3XX_RBBM_PERFCTR_UCHE_4_LO 0x000000c2
430
431#define REG_A3XX_RBBM_PERFCTR_UCHE_4_HI 0x000000c3
432
433#define REG_A3XX_RBBM_PERFCTR_UCHE_5_LO 0x000000c4
434
435#define REG_A3XX_RBBM_PERFCTR_UCHE_5_HI 0x000000c5
436
437#define REG_A3XX_RBBM_PERFCTR_TP_0_LO 0x000000c6
438
439#define REG_A3XX_RBBM_PERFCTR_TP_0_HI 0x000000c7
440
441#define REG_A3XX_RBBM_PERFCTR_TP_1_LO 0x000000c8
442
443#define REG_A3XX_RBBM_PERFCTR_TP_1_HI 0x000000c9
444
445#define REG_A3XX_RBBM_PERFCTR_TP_2_LO 0x000000ca
446
447#define REG_A3XX_RBBM_PERFCTR_TP_2_HI 0x000000cb
448
449#define REG_A3XX_RBBM_PERFCTR_TP_3_LO 0x000000cc
450
451#define REG_A3XX_RBBM_PERFCTR_TP_3_HI 0x000000cd
452
453#define REG_A3XX_RBBM_PERFCTR_TP_4_LO 0x000000ce
454
455#define REG_A3XX_RBBM_PERFCTR_TP_4_HI 0x000000cf
456
457#define REG_A3XX_RBBM_PERFCTR_TP_5_LO 0x000000d0
458
459#define REG_A3XX_RBBM_PERFCTR_TP_5_HI 0x000000d1
460
461#define REG_A3XX_RBBM_PERFCTR_SP_0_LO 0x000000d2
462
463#define REG_A3XX_RBBM_PERFCTR_SP_0_HI 0x000000d3
464
465#define REG_A3XX_RBBM_PERFCTR_SP_1_LO 0x000000d4
466
467#define REG_A3XX_RBBM_PERFCTR_SP_1_HI 0x000000d5
468
469#define REG_A3XX_RBBM_PERFCTR_SP_2_LO 0x000000d6
470
471#define REG_A3XX_RBBM_PERFCTR_SP_2_HI 0x000000d7
472
473#define REG_A3XX_RBBM_PERFCTR_SP_3_LO 0x000000d8
474
475#define REG_A3XX_RBBM_PERFCTR_SP_3_HI 0x000000d9
476
477#define REG_A3XX_RBBM_PERFCTR_SP_4_LO 0x000000da
478
479#define REG_A3XX_RBBM_PERFCTR_SP_4_HI 0x000000db
480
481#define REG_A3XX_RBBM_PERFCTR_SP_5_LO 0x000000dc
482
483#define REG_A3XX_RBBM_PERFCTR_SP_5_HI 0x000000dd
484
485#define REG_A3XX_RBBM_PERFCTR_SP_6_LO 0x000000de
486
487#define REG_A3XX_RBBM_PERFCTR_SP_6_HI 0x000000df
488
489#define REG_A3XX_RBBM_PERFCTR_SP_7_LO 0x000000e0
490
491#define REG_A3XX_RBBM_PERFCTR_SP_7_HI 0x000000e1
492
493#define REG_A3XX_RBBM_PERFCTR_RB_0_LO 0x000000e2
494
495#define REG_A3XX_RBBM_PERFCTR_RB_0_HI 0x000000e3
496
497#define REG_A3XX_RBBM_PERFCTR_RB_1_LO 0x000000e4
498
499#define REG_A3XX_RBBM_PERFCTR_RB_1_HI 0x000000e5
500
501#define REG_A3XX_RBBM_PERFCTR_PWR_0_LO 0x000000ea
502
503#define REG_A3XX_RBBM_PERFCTR_PWR_0_HI 0x000000eb
504
505#define REG_A3XX_RBBM_PERFCTR_PWR_1_LO 0x000000ec
506
507#define REG_A3XX_RBBM_PERFCTR_PWR_1_HI 0x000000ed
508
509#define REG_A3XX_RBBM_RBBM_CTL 0x00000100
510
511#define REG_A3XX_RBBM_DEBUG_BUS_CTL 0x00000111
512
513#define REG_A3XX_RBBM_DEBUG_BUS_DATA_STATUS 0x00000112
514
515#define REG_A3XX_CP_PFP_UCODE_ADDR 0x000001c9
516
517#define REG_A3XX_CP_PFP_UCODE_DATA 0x000001ca
518
519#define REG_A3XX_CP_ROQ_ADDR 0x000001cc
520
521#define REG_A3XX_CP_ROQ_DATA 0x000001cd
522
523#define REG_A3XX_CP_MERCIU_ADDR 0x000001d1
524
525#define REG_A3XX_CP_MERCIU_DATA 0x000001d2
526
527#define REG_A3XX_CP_MERCIU_DATA2 0x000001d3
528
529#define REG_A3XX_CP_MEQ_ADDR 0x000001da
530
531#define REG_A3XX_CP_MEQ_DATA 0x000001db
532
533#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445
534
535#define REG_A3XX_CP_HW_FAULT 0x0000045c
536
537#define REG_A3XX_CP_PROTECT_CTRL 0x0000045e
538
539#define REG_A3XX_CP_PROTECT_STATUS 0x0000045f
540
541static inline uint32_t REG_A3XX_CP_PROTECT(uint32_t i0) { return 0x00000460 + 0x1*i0; }
542
543static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 + 0x1*i0; }
544
545#define REG_A3XX_CP_AHB_FAULT 0x0000054d
546
547#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040
548#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000
549#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
550#define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
551#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
552#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000
553#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000
554
555#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044
556#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
557#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
558static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
559{
560 return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
561}
562#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
563#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
564static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
565{
566 return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
567}
568
569#define REG_A3XX_GRAS_CL_VPORT_XOFFSET 0x00002048
570#define A3XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff
571#define A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0
572static inline uint32_t A3XX_GRAS_CL_VPORT_XOFFSET(float val)
573{
574 return ((fui(val)) << A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_XOFFSET__MASK;
575}
576
577#define REG_A3XX_GRAS_CL_VPORT_XSCALE 0x00002049
578#define A3XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff
579#define A3XX_GRAS_CL_VPORT_XSCALE__SHIFT 0
580static inline uint32_t A3XX_GRAS_CL_VPORT_XSCALE(float val)
581{
582 return ((fui(val)) << A3XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_XSCALE__MASK;
583}
584
585#define REG_A3XX_GRAS_CL_VPORT_YOFFSET 0x0000204a
586#define A3XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff
587#define A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0
588static inline uint32_t A3XX_GRAS_CL_VPORT_YOFFSET(float val)
589{
590 return ((fui(val)) << A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_YOFFSET__MASK;
591}
592
593#define REG_A3XX_GRAS_CL_VPORT_YSCALE 0x0000204b
594#define A3XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff
595#define A3XX_GRAS_CL_VPORT_YSCALE__SHIFT 0
596static inline uint32_t A3XX_GRAS_CL_VPORT_YSCALE(float val)
597{
598 return ((fui(val)) << A3XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_YSCALE__MASK;
599}
600
601#define REG_A3XX_GRAS_CL_VPORT_ZOFFSET 0x0000204c
602#define A3XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff
603#define A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0
604static inline uint32_t A3XX_GRAS_CL_VPORT_ZOFFSET(float val)
605{
606 return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_ZOFFSET__MASK;
607}
608
609#define REG_A3XX_GRAS_CL_VPORT_ZSCALE 0x0000204d
610#define A3XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff
611#define A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0
612static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
613{
614 return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_ZSCALE__MASK;
615}
616
617#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068
618
619#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
620
621#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
622#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff
623#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
624static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
625{
626 return ((((uint32_t)(val * 40.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
627}
628
629#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
630#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
631#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
632static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
633{
634 return ((((uint32_t)(val * 44.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
635}
636
637#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
638#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
639#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
640#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007fc
641#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 2
642static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(uint32_t val)
643{
644 return ((val) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
645}
646#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
647
648#define REG_A3XX_GRAS_SC_CONTROL 0x00002072
649#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x000000f0
650#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 4
651static inline uint32_t A3XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
652{
653 return ((val) << A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
654}
655#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000f00
656#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 8
657static inline uint32_t A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(enum a3xx_msaa_samples val)
658{
659 return ((val) << A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
660}
661#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
662#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
663static inline uint32_t A3XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
664{
665 return ((val) << A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
666}
667
668#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x00002074
669#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
670#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
671#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
672static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
673{
674 return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
675}
676#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
677#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
678static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
679{
680 return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
681}
682
683#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x00002075
684#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
685#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
686#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
687static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
688{
689 return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
690}
691#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
692#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
693static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
694{
695 return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
696}
697
698#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x00002079
699#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
700#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
701#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
702static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
703{
704 return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
705}
706#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
707#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
708static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
709{
710 return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
711}
712
713#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000207a
714#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
715#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
716#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
717static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
718{
719 return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
720}
721#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
722#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
723static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
724{
725 return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
726}
727
728#define REG_A3XX_RB_MODE_CONTROL 0x000020c0
729#define A3XX_RB_MODE_CONTROL_GMEM_BYPASS 0x00000080
730#define A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK 0x00000700
731#define A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT 8
732static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
733{
734 return ((val) << A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT) & A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK;
735}
736#define A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE 0x00008000
737#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000
738
739#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1
740#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0
741#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
742static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
743{
744 return ((val >> 5) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK;
745}
746#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
747#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
748#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
749#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
750static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
751{
752 return ((val) << A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK;
753}
754
755#define REG_A3XX_RB_MSAA_CONTROL 0x000020c2
756#define A3XX_RB_MSAA_CONTROL_DISABLE 0x00000400
757#define A3XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000f000
758#define A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 12
759static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLES(enum a3xx_msaa_samples val)
760{
761 return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLES__MASK;
762}
763#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK 0xffff0000
764#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT 16
765static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val)
766{
767 return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK;
768}
769
770#define REG_A3XX_UNKNOWN_20C3 0x000020c3
771
772static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
773
774static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
775#define A3XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
776#define A3XX_RB_MRT_CONTROL_BLEND 0x00000010
777#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020
778#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
779#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
780static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(uint32_t val)
781{
782 return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK;
783}
784#define A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK 0x00003000
785#define A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT 12
786static inline uint32_t A3XX_RB_MRT_CONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
787{
788 return ((val) << A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT) & A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK;
789}
790#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
791#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
792static inline uint32_t A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
793{
794 return ((val) << A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
795}
796
797static inline uint32_t REG_A3XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020c5 + 0x4*i0; }
798#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
799#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
800static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a3xx_color_fmt val)
801{
802 return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
803}
804#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0
805#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6
806static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a3xx_tile_mode val)
807{
808 return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
809}
810#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00000c00
811#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 10
812static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
813{
814 return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
815}
816#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000
817#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
818static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
819{
820 return ((val >> 5) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
821}
822
823static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6 + 0x4*i0; }
824#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK 0xfffffff0
825#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT 4
826static inline uint32_t A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE(uint32_t val)
827{
828 return ((val >> 5) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK;
829}
830
831static inline uint32_t REG_A3XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020c7 + 0x4*i0; }
832#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
833#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
834static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
835{
836 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
837}
838#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
839#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
840static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
841{
842 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
843}
844#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
845#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
846static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
847{
848 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
849}
850#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
851#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
852static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
853{
854 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
855}
856#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
857#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
858static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
859{
860 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
861}
862#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
863#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
864static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
865{
866 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
867}
868#define A3XX_RB_MRT_BLEND_CONTROL_CLAMP_ENABLE 0x20000000
869
870#define REG_A3XX_RB_BLEND_RED 0x000020e4
871#define A3XX_RB_BLEND_RED_UINT__MASK 0x000000ff
872#define A3XX_RB_BLEND_RED_UINT__SHIFT 0
873static inline uint32_t A3XX_RB_BLEND_RED_UINT(uint32_t val)
874{
875 return ((val) << A3XX_RB_BLEND_RED_UINT__SHIFT) & A3XX_RB_BLEND_RED_UINT__MASK;
876}
877#define A3XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
878#define A3XX_RB_BLEND_RED_FLOAT__SHIFT 16
879static inline uint32_t A3XX_RB_BLEND_RED_FLOAT(float val)
880{
881 return ((util_float_to_half(val)) << A3XX_RB_BLEND_RED_FLOAT__SHIFT) & A3XX_RB_BLEND_RED_FLOAT__MASK;
882}
883
884#define REG_A3XX_RB_BLEND_GREEN 0x000020e5
885#define A3XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
886#define A3XX_RB_BLEND_GREEN_UINT__SHIFT 0
887static inline uint32_t A3XX_RB_BLEND_GREEN_UINT(uint32_t val)
888{
889 return ((val) << A3XX_RB_BLEND_GREEN_UINT__SHIFT) & A3XX_RB_BLEND_GREEN_UINT__MASK;
890}
891#define A3XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
892#define A3XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
893static inline uint32_t A3XX_RB_BLEND_GREEN_FLOAT(float val)
894{
895 return ((util_float_to_half(val)) << A3XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A3XX_RB_BLEND_GREEN_FLOAT__MASK;
896}
897
898#define REG_A3XX_RB_BLEND_BLUE 0x000020e6
899#define A3XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
900#define A3XX_RB_BLEND_BLUE_UINT__SHIFT 0
901static inline uint32_t A3XX_RB_BLEND_BLUE_UINT(uint32_t val)
902{
903 return ((val) << A3XX_RB_BLEND_BLUE_UINT__SHIFT) & A3XX_RB_BLEND_BLUE_UINT__MASK;
904}
905#define A3XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
906#define A3XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
907static inline uint32_t A3XX_RB_BLEND_BLUE_FLOAT(float val)
908{
909 return ((util_float_to_half(val)) << A3XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A3XX_RB_BLEND_BLUE_FLOAT__MASK;
910}
911
912#define REG_A3XX_RB_BLEND_ALPHA 0x000020e7
913#define A3XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
914#define A3XX_RB_BLEND_ALPHA_UINT__SHIFT 0
915static inline uint32_t A3XX_RB_BLEND_ALPHA_UINT(uint32_t val)
916{
917 return ((val) << A3XX_RB_BLEND_ALPHA_UINT__SHIFT) & A3XX_RB_BLEND_ALPHA_UINT__MASK;
918}
919#define A3XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
920#define A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
921static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val)
922{
923 return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
924}
925
926#define REG_A3XX_UNKNOWN_20E8 0x000020e8
927
928#define REG_A3XX_UNKNOWN_20E9 0x000020e9
929
930#define REG_A3XX_UNKNOWN_20EA 0x000020ea
931
932#define REG_A3XX_UNKNOWN_20EB 0x000020eb
933
934#define REG_A3XX_RB_COPY_CONTROL 0x000020ec
935#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
936#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
937static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
938{
939 return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
940}
941#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
942#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4
943static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
944{
945 return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
946}
947#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xfffffc00
948#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 10
949static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
950{
951 return ((val >> 10) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
952}
953
954#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed
955#define A3XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0
956#define A3XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
957static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
958{
959 return ((val >> 5) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK;
960}
961
962#define REG_A3XX_RB_COPY_DEST_PITCH 0x000020ee
963#define A3XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
964#define A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
965static inline uint32_t A3XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
966{
967 return ((val >> 5) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK;
968}
969
970#define REG_A3XX_RB_COPY_DEST_INFO 0x000020ef
971#define A3XX_RB_COPY_DEST_INFO_TILE__MASK 0x00000003
972#define A3XX_RB_COPY_DEST_INFO_TILE__SHIFT 0
973static inline uint32_t A3XX_RB_COPY_DEST_INFO_TILE(enum a3xx_tile_mode val)
974{
975 return ((val) << A3XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A3XX_RB_COPY_DEST_INFO_TILE__MASK;
976}
977#define A3XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
978#define A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
979static inline uint32_t A3XX_RB_COPY_DEST_INFO_FORMAT(enum a3xx_color_fmt val)
980{
981 return ((val) << A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A3XX_RB_COPY_DEST_INFO_FORMAT__MASK;
982}
983#define A3XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
984#define A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
985static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
986{
987 return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK;
988}
989#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
990#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
991static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
992{
993 return ((val) << A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
994}
995#define A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
996#define A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
997static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
998{
999 return ((val) << A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
1000}
1001
1002#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
1003#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
1004#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
1005#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_ENABLE 0x00000008
1006#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
1007#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
1008static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
1009{
1010 return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
1011}
1012#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
1013#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
1014
1015#define REG_A3XX_UNKNOWN_2101 0x00002101
1016
1017#define REG_A3XX_RB_DEPTH_INFO 0x00002102
1018#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
1019#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
1020static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
1021{
1022 return ((val) << A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
1023}
1024#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff800
1025#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11
1026static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
1027{
1028 return ((val >> 10) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
1029}
1030
1031#define REG_A3XX_RB_DEPTH_PITCH 0x00002103
1032#define A3XX_RB_DEPTH_PITCH__MASK 0xffffffff
1033#define A3XX_RB_DEPTH_PITCH__SHIFT 0
1034static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val)
1035{
1036 return ((val >> 3) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK;
1037}
1038
1039#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104
1040#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
1041#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000004
1042#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
1043#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
1044static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
1045{
1046 return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC__MASK;
1047}
1048#define A3XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
1049#define A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
1050static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
1051{
1052 return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL__MASK;
1053}
1054#define A3XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
1055#define A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
1056static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
1057{
1058 return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS__MASK;
1059}
1060#define A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
1061#define A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
1062static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
1063{
1064 return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
1065}
1066#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
1067#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
1068static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
1069{
1070 return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
1071}
1072#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
1073#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
1074static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
1075{
1076 return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
1077}
1078#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
1079#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
1080static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
1081{
1082 return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
1083}
1084#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
1085#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
1086static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
1087{
1088 return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
1089}
1090
1091#define REG_A3XX_UNKNOWN_2105 0x00002105
1092
1093#define REG_A3XX_UNKNOWN_2106 0x00002106
1094
1095#define REG_A3XX_UNKNOWN_2107 0x00002107
1096
1097#define REG_A3XX_RB_STENCILREFMASK 0x00002108
1098#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
1099#define A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
1100static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
1101{
1102 return ((val) << A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILREF__MASK;
1103}
1104#define A3XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
1105#define A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
1106static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
1107{
1108 return ((val) << A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILMASK__MASK;
1109}
1110#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
1111#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
1112static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
1113{
1114 return ((val) << A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
1115}
1116
1117#define REG_A3XX_RB_STENCILREFMASK_BF 0x00002109
1118#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
1119#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
1120static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
1121{
1122 return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
1123}
1124#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
1125#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
1126static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
1127{
1128 return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
1129}
1130#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
1131#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
1132static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
1133{
1134 return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
1135}
1136
1137#define REG_A3XX_PA_SC_WINDOW_OFFSET 0x0000210e
1138#define A3XX_PA_SC_WINDOW_OFFSET_X__MASK 0x0000ffff
1139#define A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
1140static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_X(uint32_t val)
1141{
1142 return ((val) << A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_X__MASK;
1143}
1144#define A3XX_PA_SC_WINDOW_OFFSET_Y__MASK 0xffff0000
1145#define A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
1146static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_Y(uint32_t val)
1147{
1148 return ((val) << A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_Y__MASK;
1149}
1150
1151#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
1152
1153#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
1154
1155#define REG_A3XX_PC_PRIM_VTX_CNTL 0x000021ec
1156#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK 0x0000001f
1157#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT 0
1158static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(uint32_t val)
1159{
1160 return ((val) << A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK;
1161}
1162#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x000000e0
1163#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 5
1164static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
1165{
1166 return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK;
1167}
1168#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000700
1169#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT 8
1170static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
1171{
1172 return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
1173}
1174#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
1175
1176#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
1177
1178#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200
1179#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
1180#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
1181static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
1182{
1183 return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
1184}
1185#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
1186#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
1187#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
1188#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
1189#define A3XX_HLSQ_CONTROL_0_REG_CONSTSWITCHMODE 0x08000000
1190#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
1191#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
1192#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
1193#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
1194
1195#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201
1196#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
1197#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
1198static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
1199{
1200 return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
1201}
1202#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
1203#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
1204
1205#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
1206#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
1207#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
1208static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
1209{
1210 return ((val) << A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
1211}
1212
1213#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203
1214
1215#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
1216#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
1217#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1218static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1219{
1220 return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
1221}
1222#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
1223#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
1224static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
1225{
1226 return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
1227}
1228#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1229#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1230static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1231{
1232 return ((val) << A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
1233}
1234
1235#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205
1236#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
1237#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1238static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1239{
1240 return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
1241}
1242#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
1243#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
1244static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
1245{
1246 return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
1247}
1248#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1249#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1250static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1251{
1252 return ((val) << A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
1253}
1254
1255#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206
1256#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
1257#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
1258static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
1259{
1260 return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK;
1261}
1262#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
1263#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
1264static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
1265{
1266 return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK;
1267}
1268
1269#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207
1270#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
1271#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
1272static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
1273{
1274 return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK;
1275}
1276#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
1277#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
1278static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
1279{
1280 return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK;
1281}
1282
1283#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a
1284
1285#define REG_A3XX_HLSQ_CL_NDRANGE_1_REG 0x0000220b
1286
1287#define REG_A3XX_HLSQ_CL_NDRANGE_2_REG 0x0000220c
1288
1289#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211
1290
1291#define REG_A3XX_HLSQ_CL_CONTROL_1_REG 0x00002212
1292
1293#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214
1294
1295#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215
1296
1297#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217
1298
1299#define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a
1300
1301#define REG_A3XX_VFD_CONTROL_0 0x00002240
1302#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x0003ffff
1303#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
1304static inline uint32_t A3XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
1305{
1306 return ((val) << A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
1307}
1308#define A3XX_VFD_CONTROL_0_PACKETSIZE__MASK 0x003c0000
1309#define A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT 18
1310static inline uint32_t A3XX_VFD_CONTROL_0_PACKETSIZE(uint32_t val)
1311{
1312 return ((val) << A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT) & A3XX_VFD_CONTROL_0_PACKETSIZE__MASK;
1313}
1314#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x07c00000
1315#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 22
1316static inline uint32_t A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
1317{
1318 return ((val) << A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
1319}
1320#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xf8000000
1321#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 27
1322static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
1323{
1324 return ((val) << A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
1325}
1326
1327#define REG_A3XX_VFD_CONTROL_1 0x00002241
1328#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
1329#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
1330static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
1331{
1332 return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
1333}
1334#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
1335#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
1336static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
1337{
1338 return ((val) << A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A3XX_VFD_CONTROL_1_REGID4VTX__MASK;
1339}
1340#define A3XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
1341#define A3XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
1342static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
1343{
1344 return ((val) << A3XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A3XX_VFD_CONTROL_1_REGID4INST__MASK;
1345}
1346
1347#define REG_A3XX_VFD_INDEX_MIN 0x00002242
1348
1349#define REG_A3XX_VFD_INDEX_MAX 0x00002243
1350
1351#define REG_A3XX_VFD_INSTANCEID_OFFSET 0x00002244
1352
1353#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
1354
1355static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
1356
1357static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
1358#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
1359#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
1360static inline uint32_t A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
1361{
1362 return ((val) << A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
1363}
1364#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80
1365#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
1366static inline uint32_t A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
1367{
1368 return ((val) << A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
1369}
1370#define A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00020000
1371#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK 0x00fc0000
1372#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT 18
1373static inline uint32_t A3XX_VFD_FETCH_INSTR_0_INDEXCODE(uint32_t val)
1374{
1375 return ((val) << A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK;
1376}
1377#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000
1378#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24
1379static inline uint32_t A3XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
1380{
1381 return ((val) << A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK;
1382}
1383
1384static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x00002247 + 0x2*i0; }
1385
1386static inline uint32_t REG_A3XX_VFD_DECODE(uint32_t i0) { return 0x00002266 + 0x1*i0; }
1387
1388static inline uint32_t REG_A3XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x00002266 + 0x1*i0; }
1389#define A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
1390#define A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
1391static inline uint32_t A3XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
1392{
1393 return ((val) << A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
1394}
1395#define A3XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
1396#define A3XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
1397#define A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
1398static inline uint32_t A3XX_VFD_DECODE_INSTR_FORMAT(enum a3xx_vtx_fmt val)
1399{
1400 return ((val) << A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A3XX_VFD_DECODE_INSTR_FORMAT__MASK;
1401}
1402#define A3XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
1403#define A3XX_VFD_DECODE_INSTR_REGID__SHIFT 12
1404static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
1405{
1406 return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
1407}
1408#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
1409#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
1410static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
1411{
1412 return ((val) << A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
1413}
1414#define A3XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
1415#define A3XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
1416
1417#define REG_A3XX_VFD_VS_THREADING_THRESHOLD 0x0000227e
1418#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK 0x0000000f
1419#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT 0
1420static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD(uint32_t val)
1421{
1422 return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK;
1423}
1424#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK 0x0000ff00
1425#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT 8
1426static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val)
1427{
1428 return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK;
1429}
1430
1431#define REG_A3XX_VPC_ATTR 0x00002280
1432#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x00000fff
1433#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0
1434static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val)
1435{
1436 return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK;
1437}
1438#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000
1439#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12
1440static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val)
1441{
1442 return ((val) << A3XX_VPC_ATTR_THRDASSIGN__SHIFT) & A3XX_VPC_ATTR_THRDASSIGN__MASK;
1443}
1444#define A3XX_VPC_ATTR_LMSIZE__MASK 0xf0000000
1445#define A3XX_VPC_ATTR_LMSIZE__SHIFT 28
1446static inline uint32_t A3XX_VPC_ATTR_LMSIZE(uint32_t val)
1447{
1448 return ((val) << A3XX_VPC_ATTR_LMSIZE__SHIFT) & A3XX_VPC_ATTR_LMSIZE__MASK;
1449}
1450
1451#define REG_A3XX_VPC_PACK 0x00002281
1452#define A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
1453#define A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
1454static inline uint32_t A3XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
1455{
1456 return ((val) << A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
1457}
1458#define A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
1459#define A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
1460static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
1461{
1462 return ((val) << A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
1463}
1464
1465static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
1466
1467static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
1468
1469static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
1470
1471static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; }
1472
1473#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0 0x0000228a
1474
1475#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x0000228b
1476
1477#define REG_A3XX_SP_SP_CTRL_REG 0x000022c0
1478#define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000
1479#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x000c0000
1480#define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18
1481static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val)
1482{
1483 return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK;
1484}
1485#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000
1486#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20
1487static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
1488{
1489 return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK;
1490}
1491#define A3XX_SP_SP_CTRL_REG_LOMODE__MASK 0x00c00000
1492#define A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT 22
1493static inline uint32_t A3XX_SP_SP_CTRL_REG_LOMODE(uint32_t val)
1494{
1495 return ((val) << A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_LOMODE__MASK;
1496}
1497
1498#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4
1499#define A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
1500#define A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
1501static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
1502{
1503 return ((val) << A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
1504}
1505#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
1506#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
1507static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
1508{
1509 return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK;
1510}
1511#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
1512#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
1513#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
1514static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
1515{
1516 return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
1517}
1518#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
1519#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
1520static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
1521{
1522 return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
1523}
1524#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
1525#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
1526static inline uint32_t A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
1527{
1528 return ((val) << A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
1529}
1530#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
1531#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
1532static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
1533{
1534 return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
1535}
1536#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
1537#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
1538#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
1539#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
1540static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
1541{
1542 return ((val) << A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG0_LENGTH__MASK;
1543}
1544
1545#define REG_A3XX_SP_VS_CTRL_REG1 0x000022c5
1546#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
1547#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
1548static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
1549{
1550 return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
1551}
1552#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
1553#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
1554static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
1555{
1556 return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK;
1557}
1558#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x3f000000
1559#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
1560static inline uint32_t A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
1561{
1562 return ((val) << A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
1563}
1564
1565#define REG_A3XX_SP_VS_PARAM_REG 0x000022c6
1566#define A3XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
1567#define A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
1568static inline uint32_t A3XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
1569{
1570 return ((val) << A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_POSREGID__MASK;
1571}
1572#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
1573#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
1574static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
1575{
1576 return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
1577}
1578#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
1579#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
1580static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
1581{
1582 return ((val) << A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
1583}
1584
1585static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
1586
1587static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
1588#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
1589#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
1590static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
1591{
1592 return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK;
1593}
1594#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
1595#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
1596static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
1597{
1598 return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
1599}
1600#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
1601#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
1602static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
1603{
1604 return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK;
1605}
1606#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
1607#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
1608static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
1609{
1610 return ((val) << A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
1611}
1612
1613static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
1614
1615static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
1616#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
1617#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
1618static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
1619{
1620 return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
1621}
1622#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
1623#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
1624static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
1625{
1626 return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
1627}
1628#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
1629#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
1630static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
1631{
1632 return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
1633}
1634#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
1635#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
1636static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
1637{
1638 return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
1639}
1640
1641#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4
1642#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1643#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1644static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1645{
1646 return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1647}
1648#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1649#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1650static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1651{
1652 return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1653}
1654
1655#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
1656
1657#define REG_A3XX_SP_VS_PVT_MEM_CTRL_REG 0x000022d6
1658
1659#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
1660
1661#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8
1662
1663#define REG_A3XX_SP_VS_LENGTH_REG 0x000022df
1664#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
1665#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT 0
1666static inline uint32_t A3XX_SP_VS_LENGTH_REG_SHADERLENGTH(uint32_t val)
1667{
1668 return ((val) << A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK;
1669}
1670
1671#define REG_A3XX_SP_FS_CTRL_REG0 0x000022e0
1672#define A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
1673#define A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
1674static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
1675{
1676 return ((val) << A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
1677}
1678#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
1679#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
1680static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
1681{
1682 return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK;
1683}
1684#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
1685#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
1686#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
1687static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
1688{
1689 return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
1690}
1691#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
1692#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
1693static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
1694{
1695 return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
1696}
1697#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
1698#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
1699static inline uint32_t A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
1700{
1701 return ((val) << A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
1702}
1703#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
1704#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
1705static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
1706{
1707 return ((val) << A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
1708}
1709#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
1710#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
1711#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000
1712#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24
1713static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val)
1714{
1715 return ((val) << A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG0_LENGTH__MASK;
1716}
1717
1718#define REG_A3XX_SP_FS_CTRL_REG1 0x000022e1
1719#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
1720#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
1721static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
1722{
1723 return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
1724}
1725#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
1726#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
1727static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
1728{
1729 return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK;
1730}
1731#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x00f00000
1732#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 20
1733static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
1734{
1735 return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK;
1736}
1737#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x3f000000
1738#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24
1739static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
1740{
1741 return ((val) << A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT) & A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK;
1742}
1743
1744#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2
1745#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1746#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1747static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1748{
1749 return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1750}
1751#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1752#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1753static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1754{
1755 return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1756}
1757
1758#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
1759
1760#define REG_A3XX_SP_FS_PVT_MEM_CTRL_REG 0x000022e4
1761
1762#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
1763
1764#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6
1765
1766#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x000022e8
1767
1768#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9
1769
1770#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec
1771
1772static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
1773
1774static inline uint32_t REG_A3XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
1775#define A3XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
1776#define A3XX_SP_FS_MRT_REG_REGID__SHIFT 0
1777static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
1778{
1779 return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK;
1780}
1781#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
1782
1783static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
1784
1785static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT_REG(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
1786#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK 0x0000003f
1787#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT 0
1788static inline uint32_t A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT(enum a3xx_color_fmt val)
1789{
1790 return ((val) << A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT) & A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK;
1791}
1792
1793#define REG_A3XX_SP_FS_LENGTH_REG 0x000022ff
1794#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
1795#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT 0
1796static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val)
1797{
1798 return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK;
1799}
1800
1801#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340
1802#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
1803#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
1804static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
1805{
1806 return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK;
1807}
1808#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
1809#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
1810static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
1811{
1812 return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK;
1813}
1814#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
1815#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
1816static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
1817{
1818 return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK;
1819}
1820
1821#define REG_A3XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002341
1822
1823#define REG_A3XX_TPL1_TP_FS_TEX_OFFSET 0x00002342
1824#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
1825#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
1826static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
1827{
1828 return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK;
1829}
1830#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
1831#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
1832static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
1833{
1834 return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK;
1835}
1836#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
1837#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
1838static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
1839{
1840 return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK;
1841}
1842
1843#define REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x00002343
1844
1845#define REG_A3XX_VBIF_CLKON 0x00003001
1846
1847#define REG_A3XX_VBIF_FIXED_SORT_EN 0x0000300c
1848
1849#define REG_A3XX_VBIF_FIXED_SORT_SEL0 0x0000300d
1850
1851#define REG_A3XX_VBIF_FIXED_SORT_SEL1 0x0000300e
1852
1853#define REG_A3XX_VBIF_ABIT_SORT 0x0000301c
1854
1855#define REG_A3XX_VBIF_ABIT_SORT_CONF 0x0000301d
1856
1857#define REG_A3XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
1858
1859#define REG_A3XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
1860
1861#define REG_A3XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
1862
1863#define REG_A3XX_VBIF_IN_WR_LIM_CONF0 0x00003030
1864
1865#define REG_A3XX_VBIF_IN_WR_LIM_CONF1 0x00003031
1866
1867#define REG_A3XX_VBIF_OUT_RD_LIM_CONF0 0x00003034
1868
1869#define REG_A3XX_VBIF_OUT_WR_LIM_CONF0 0x00003035
1870
1871#define REG_A3XX_VBIF_DDR_OUT_MAX_BURST 0x00003036
1872
1873#define REG_A3XX_VBIF_ARB_CTL 0x0000303c
1874
1875#define REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
1876
1877#define REG_A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x00003058
1878
1879#define REG_A3XX_VBIF_OUT_AXI_AOOO_EN 0x0000305e
1880
1881#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f
1882
1883#define REG_A3XX_VSC_BIN_SIZE 0x00000c01
1884#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
1885#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
1886static inline uint32_t A3XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
1887{
1888 return ((val >> 5) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK;
1889}
1890#define A3XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
1891#define A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
1892static inline uint32_t A3XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
1893{
1894 return ((val >> 5) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK;
1895}
1896
1897#define REG_A3XX_VSC_SIZE_ADDRESS 0x00000c02
1898
1899static inline uint32_t REG_A3XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
1900
1901static inline uint32_t REG_A3XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
1902#define A3XX_VSC_PIPE_CONFIG_X__MASK 0x000003ff
1903#define A3XX_VSC_PIPE_CONFIG_X__SHIFT 0
1904static inline uint32_t A3XX_VSC_PIPE_CONFIG_X(uint32_t val)
1905{
1906 return ((val) << A3XX_VSC_PIPE_CONFIG_X__SHIFT) & A3XX_VSC_PIPE_CONFIG_X__MASK;
1907}
1908#define A3XX_VSC_PIPE_CONFIG_Y__MASK 0x000ffc00
1909#define A3XX_VSC_PIPE_CONFIG_Y__SHIFT 10
1910static inline uint32_t A3XX_VSC_PIPE_CONFIG_Y(uint32_t val)
1911{
1912 return ((val) << A3XX_VSC_PIPE_CONFIG_Y__SHIFT) & A3XX_VSC_PIPE_CONFIG_Y__MASK;
1913}
1914#define A3XX_VSC_PIPE_CONFIG_W__MASK 0x00f00000
1915#define A3XX_VSC_PIPE_CONFIG_W__SHIFT 20
1916static inline uint32_t A3XX_VSC_PIPE_CONFIG_W(uint32_t val)
1917{
1918 return ((val) << A3XX_VSC_PIPE_CONFIG_W__SHIFT) & A3XX_VSC_PIPE_CONFIG_W__MASK;
1919}
1920#define A3XX_VSC_PIPE_CONFIG_H__MASK 0x0f000000
1921#define A3XX_VSC_PIPE_CONFIG_H__SHIFT 24
1922static inline uint32_t A3XX_VSC_PIPE_CONFIG_H(uint32_t val)
1923{
1924 return ((val) << A3XX_VSC_PIPE_CONFIG_H__SHIFT) & A3XX_VSC_PIPE_CONFIG_H__MASK;
1925}
1926
1927static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
1928
1929static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
1930
1931#define REG_A3XX_UNKNOWN_0C3D 0x00000c3d
1932
1933#define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48
1934
1935#define REG_A3XX_PC_PERFCOUNTER1_SELECT 0x00000c49
1936
1937#define REG_A3XX_PC_PERFCOUNTER2_SELECT 0x00000c4a
1938
1939#define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b
1940
1941#define REG_A3XX_UNKNOWN_0C81 0x00000c81
1942
1943#define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88
1944
1945#define REG_A3XX_GRAS_PERFCOUNTER1_SELECT 0x00000c89
1946
1947#define REG_A3XX_GRAS_PERFCOUNTER2_SELECT 0x00000c8a
1948
1949#define REG_A3XX_GRAS_PERFCOUNTER3_SELECT 0x00000c8b
1950
1951static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
1952
1953static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_X(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
1954
1955static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Y(uint32_t i0) { return 0x00000ca1 + 0x4*i0; }
1956
1957static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Z(uint32_t i0) { return 0x00000ca2 + 0x4*i0; }
1958
1959static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x00000ca3 + 0x4*i0; }
1960
1961#define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0
1962
1963#define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6
1964
1965#define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7
1966
1967#define REG_A3XX_RB_WINDOW_SIZE 0x00000ce0
1968#define A3XX_RB_WINDOW_SIZE_WIDTH__MASK 0x00003fff
1969#define A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT 0
1970static inline uint32_t A3XX_RB_WINDOW_SIZE_WIDTH(uint32_t val)
1971{
1972 return ((val) << A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT) & A3XX_RB_WINDOW_SIZE_WIDTH__MASK;
1973}
1974#define A3XX_RB_WINDOW_SIZE_HEIGHT__MASK 0x0fffc000
1975#define A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT 14
1976static inline uint32_t A3XX_RB_WINDOW_SIZE_HEIGHT(uint32_t val)
1977{
1978 return ((val) << A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT) & A3XX_RB_WINDOW_SIZE_HEIGHT__MASK;
1979}
1980
1981#define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00
1982
1983#define REG_A3XX_HLSQ_PERFCOUNTER1_SELECT 0x00000e01
1984
1985#define REG_A3XX_HLSQ_PERFCOUNTER2_SELECT 0x00000e02
1986
1987#define REG_A3XX_HLSQ_PERFCOUNTER3_SELECT 0x00000e03
1988
1989#define REG_A3XX_HLSQ_PERFCOUNTER4_SELECT 0x00000e04
1990
1991#define REG_A3XX_HLSQ_PERFCOUNTER5_SELECT 0x00000e05
1992
1993#define REG_A3XX_UNKNOWN_0E43 0x00000e43
1994
1995#define REG_A3XX_VFD_PERFCOUNTER0_SELECT 0x00000e44
1996
1997#define REG_A3XX_VFD_PERFCOUNTER1_SELECT 0x00000e45
1998
1999#define REG_A3XX_VPC_VPC_DEBUG_RAM_SEL 0x00000e61
2000
2001#define REG_A3XX_VPC_VPC_DEBUG_RAM_READ 0x00000e62
2002
2003#define REG_A3XX_VPC_PERFCOUNTER0_SELECT 0x00000e64
2004
2005#define REG_A3XX_VPC_PERFCOUNTER1_SELECT 0x00000e65
2006
2007#define REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG 0x00000e82
2008
2009#define REG_A3XX_UCHE_PERFCOUNTER0_SELECT 0x00000e84
2010
2011#define REG_A3XX_UCHE_PERFCOUNTER1_SELECT 0x00000e85
2012
2013#define REG_A3XX_UCHE_PERFCOUNTER2_SELECT 0x00000e86
2014
2015#define REG_A3XX_UCHE_PERFCOUNTER3_SELECT 0x00000e87
2016
2017#define REG_A3XX_UCHE_PERFCOUNTER4_SELECT 0x00000e88
2018
2019#define REG_A3XX_UCHE_PERFCOUNTER5_SELECT 0x00000e89
2020
2021#define REG_A3XX_UCHE_CACHE_INVALIDATE0_REG 0x00000ea0
2022#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK 0x0fffffff
2023#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT 0
2024static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(uint32_t val)
2025{
2026 return ((val) << A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK;
2027}
2028
2029#define REG_A3XX_UCHE_CACHE_INVALIDATE1_REG 0x00000ea1
2030#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK 0x0fffffff
2031#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT 0
2032static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(uint32_t val)
2033{
2034 return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK;
2035}
2036#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK 0x30000000
2037#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT 28
2038static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_opcode val)
2039{
2040 return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK;
2041}
2042#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000
2043
2044#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4
2045
2046#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5
2047
2048#define REG_A3XX_SP_PERFCOUNTER2_SELECT 0x00000ec6
2049
2050#define REG_A3XX_SP_PERFCOUNTER3_SELECT 0x00000ec7
2051
2052#define REG_A3XX_SP_PERFCOUNTER4_SELECT 0x00000ec8
2053
2054#define REG_A3XX_SP_PERFCOUNTER5_SELECT 0x00000ec9
2055
2056#define REG_A3XX_SP_PERFCOUNTER6_SELECT 0x00000eca
2057
2058#define REG_A3XX_SP_PERFCOUNTER7_SELECT 0x00000ecb
2059
2060#define REG_A3XX_UNKNOWN_0EE0 0x00000ee0
2061
2062#define REG_A3XX_UNKNOWN_0F03 0x00000f03
2063
2064#define REG_A3XX_TP_PERFCOUNTER0_SELECT 0x00000f04
2065
2066#define REG_A3XX_TP_PERFCOUNTER1_SELECT 0x00000f05
2067
2068#define REG_A3XX_TP_PERFCOUNTER2_SELECT 0x00000f06
2069
2070#define REG_A3XX_TP_PERFCOUNTER3_SELECT 0x00000f07
2071
2072#define REG_A3XX_TP_PERFCOUNTER4_SELECT 0x00000f08
2073
2074#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
2075
2076#define REG_A3XX_TEX_SAMP_0 0x00000000
2077#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
2078#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
2079static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val)
2080{
2081 return ((val) << A3XX_TEX_SAMP_0_XY_MAG__SHIFT) & A3XX_TEX_SAMP_0_XY_MAG__MASK;
2082}
2083#define A3XX_TEX_SAMP_0_XY_MIN__MASK 0x00000030
2084#define A3XX_TEX_SAMP_0_XY_MIN__SHIFT 4
2085static inline uint32_t A3XX_TEX_SAMP_0_XY_MIN(enum a3xx_tex_filter val)
2086{
2087 return ((val) << A3XX_TEX_SAMP_0_XY_MIN__SHIFT) & A3XX_TEX_SAMP_0_XY_MIN__MASK;
2088}
2089#define A3XX_TEX_SAMP_0_WRAP_S__MASK 0x000001c0
2090#define A3XX_TEX_SAMP_0_WRAP_S__SHIFT 6
2091static inline uint32_t A3XX_TEX_SAMP_0_WRAP_S(enum a3xx_tex_clamp val)
2092{
2093 return ((val) << A3XX_TEX_SAMP_0_WRAP_S__SHIFT) & A3XX_TEX_SAMP_0_WRAP_S__MASK;
2094}
2095#define A3XX_TEX_SAMP_0_WRAP_T__MASK 0x00000e00
2096#define A3XX_TEX_SAMP_0_WRAP_T__SHIFT 9
2097static inline uint32_t A3XX_TEX_SAMP_0_WRAP_T(enum a3xx_tex_clamp val)
2098{
2099 return ((val) << A3XX_TEX_SAMP_0_WRAP_T__SHIFT) & A3XX_TEX_SAMP_0_WRAP_T__MASK;
2100}
2101#define A3XX_TEX_SAMP_0_WRAP_R__MASK 0x00007000
2102#define A3XX_TEX_SAMP_0_WRAP_R__SHIFT 12
2103static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
2104{
2105 return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK;
2106}
2107#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
2108
2109#define REG_A3XX_TEX_SAMP_1 0x00000001
2110
2111#define REG_A3XX_TEX_CONST_0 0x00000000
2112#define A3XX_TEX_CONST_0_TILED 0x00000001
2113#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
2114#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4
2115static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val)
2116{
2117 return ((val) << A3XX_TEX_CONST_0_SWIZ_X__SHIFT) & A3XX_TEX_CONST_0_SWIZ_X__MASK;
2118}
2119#define A3XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
2120#define A3XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
2121static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Y(enum a3xx_tex_swiz val)
2122{
2123 return ((val) << A3XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Y__MASK;
2124}
2125#define A3XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
2126#define A3XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
2127static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Z(enum a3xx_tex_swiz val)
2128{
2129 return ((val) << A3XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Z__MASK;
2130}
2131#define A3XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
2132#define A3XX_TEX_CONST_0_SWIZ_W__SHIFT 13
2133static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val)
2134{
2135 return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK;
2136}
2137#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
2138#define A3XX_TEX_CONST_0_FMT__SHIFT 22
2139static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
2140{
2141 return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK;
2142}
2143#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000
2144#define A3XX_TEX_CONST_0_TYPE__SHIFT 30
2145static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val)
2146{
2147 return ((val) << A3XX_TEX_CONST_0_TYPE__SHIFT) & A3XX_TEX_CONST_0_TYPE__MASK;
2148}
2149
2150#define REG_A3XX_TEX_CONST_1 0x00000001
2151#define A3XX_TEX_CONST_1_HEIGHT__MASK 0x00003fff
2152#define A3XX_TEX_CONST_1_HEIGHT__SHIFT 0
2153static inline uint32_t A3XX_TEX_CONST_1_HEIGHT(uint32_t val)
2154{
2155 return ((val) << A3XX_TEX_CONST_1_HEIGHT__SHIFT) & A3XX_TEX_CONST_1_HEIGHT__MASK;
2156}
2157#define A3XX_TEX_CONST_1_WIDTH__MASK 0x0fffc000
2158#define A3XX_TEX_CONST_1_WIDTH__SHIFT 14
2159static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val)
2160{
2161 return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK;
2162}
2163#define A3XX_TEX_CONST_1_FETCHSIZE__MASK 0xf0000000
2164#define A3XX_TEX_CONST_1_FETCHSIZE__SHIFT 28
2165static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val)
2166{
2167 return ((val) << A3XX_TEX_CONST_1_FETCHSIZE__SHIFT) & A3XX_TEX_CONST_1_FETCHSIZE__MASK;
2168}
2169
2170#define REG_A3XX_TEX_CONST_2 0x00000002
2171#define A3XX_TEX_CONST_2_INDX__MASK 0x000000ff
2172#define A3XX_TEX_CONST_2_INDX__SHIFT 0
2173static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val)
2174{
2175 return ((val) << A3XX_TEX_CONST_2_INDX__SHIFT) & A3XX_TEX_CONST_2_INDX__MASK;
2176}
2177#define A3XX_TEX_CONST_2_PITCH__MASK 0x3ffff000
2178#define A3XX_TEX_CONST_2_PITCH__SHIFT 12
2179static inline uint32_t A3XX_TEX_CONST_2_PITCH(uint32_t val)
2180{
2181 return ((val) << A3XX_TEX_CONST_2_PITCH__SHIFT) & A3XX_TEX_CONST_2_PITCH__MASK;
2182}
2183#define A3XX_TEX_CONST_2_SWAP__MASK 0xc0000000
2184#define A3XX_TEX_CONST_2_SWAP__SHIFT 30
2185static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
2186{
2187 return ((val) << A3XX_TEX_CONST_2_SWAP__SHIFT) & A3XX_TEX_CONST_2_SWAP__MASK;
2188}
2189
2190#define REG_A3XX_TEX_CONST_3 0x00000003
2191
2192
2193#endif /* A3XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
new file mode 100644
index 000000000000..035bd13dc8bd
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -0,0 +1,502 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "a3xx_gpu.h"
19
20#define A3XX_INT0_MASK \
21 (A3XX_INT0_RBBM_AHB_ERROR | \
22 A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
23 A3XX_INT0_CP_T0_PACKET_IN_IB | \
24 A3XX_INT0_CP_OPCODE_ERROR | \
25 A3XX_INT0_CP_RESERVED_BIT_ERROR | \
26 A3XX_INT0_CP_HW_FAULT | \
27 A3XX_INT0_CP_IB1_INT | \
28 A3XX_INT0_CP_IB2_INT | \
29 A3XX_INT0_CP_RB_INT | \
30 A3XX_INT0_CP_REG_PROTECT_FAULT | \
31 A3XX_INT0_CP_AHB_ERROR_HALT | \
32 A3XX_INT0_UCHE_OOB_ACCESS)
33
34static struct platform_device *a3xx_pdev;
35
36static void a3xx_me_init(struct msm_gpu *gpu)
37{
38 struct msm_ringbuffer *ring = gpu->rb;
39
40 OUT_PKT3(ring, CP_ME_INIT, 17);
41 OUT_RING(ring, 0x000003f7);
42 OUT_RING(ring, 0x00000000);
43 OUT_RING(ring, 0x00000000);
44 OUT_RING(ring, 0x00000000);
45 OUT_RING(ring, 0x00000080);
46 OUT_RING(ring, 0x00000100);
47 OUT_RING(ring, 0x00000180);
48 OUT_RING(ring, 0x00006600);
49 OUT_RING(ring, 0x00000150);
50 OUT_RING(ring, 0x0000014e);
51 OUT_RING(ring, 0x00000154);
52 OUT_RING(ring, 0x00000001);
53 OUT_RING(ring, 0x00000000);
54 OUT_RING(ring, 0x00000000);
55 OUT_RING(ring, 0x00000000);
56 OUT_RING(ring, 0x00000000);
57 OUT_RING(ring, 0x00000000);
58
59 gpu->funcs->flush(gpu);
60 gpu->funcs->idle(gpu);
61}
62
63static int a3xx_hw_init(struct msm_gpu *gpu)
64{
65 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
66 uint32_t *ptr, len;
67 int i, ret;
68
69 DBG("%s", gpu->name);
70
71 if (adreno_is_a305(adreno_gpu)) {
72 /* Set up 16 deep read/write request queues: */
73 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
74 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
75 gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
76 gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
77 gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
78 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
79 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
80 /* Enable WR-REQ: */
81 gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
82 /* Set up round robin arbitration between both AXI ports: */
83 gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
84 /* Set up AOOO: */
85 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
86 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
87
88 } else if (adreno_is_a320(adreno_gpu)) {
89 /* Set up 16 deep read/write request queues: */
90 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
91 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
92 gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
93 gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
94 gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
95 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
96 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
97 /* Enable WR-REQ: */
98 gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
99 /* Set up round robin arbitration between both AXI ports: */
100 gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
101 /* Set up AOOO: */
102 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
103 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
104 /* Enable 1K sort: */
105 gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
106 gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
107
108 } else if (adreno_is_a330(adreno_gpu)) {
109 /* Set up 16 deep read/write request queues: */
110 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
111 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818);
112 gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818);
113 gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818);
114 gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
115 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
116 gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818);
117 /* Enable WR-REQ: */
118 gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
119 /* Set up round robin arbitration between both AXI ports: */
120 gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
121 /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
122 gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
123 /* Set up AOOO: */
124 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000ffff);
125 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0xffffffff);
126 /* Enable 1K sort: */
127 gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001ffff);
128 gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
129 /* Disable VBIF clock gating. This is to enable AXI running
130 * higher frequency than GPU:
131 */
132 gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001);
133
134 } else {
135 BUG();
136 }
137
138 /* Make all blocks contribute to the GPU BUSY perf counter: */
139 gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
140
141 /* Tune the hystersis counters for SP and CP idle detection: */
142 gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10);
143 gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
144
145 /* Enable the RBBM error reporting bits. This lets us get
146 * useful information on failure:
147 */
148 gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001);
149
150 /* Enable AHB error reporting: */
151 gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff);
152
153 /* Turn on the power counters: */
154 gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000);
155
156 /* Turn on hang detection - this spews a lot of useful information
157 * into the RBBM registers on a hang:
158 */
159 gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff);
160
161 /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */
162 gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
163
164 /* Enable Clock gating: */
165 gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
166
167 /* Set the OCMEM base address for A330 */
168//TODO:
169// if (adreno_is_a330(adreno_gpu)) {
170// gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
171// (unsigned int)(a3xx_gpu->ocmem_base >> 14));
172// }
173
174 /* Turn on performance counters: */
175 gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
176
177 /* Set SP perfcounter 7 to count SP_FS_FULL_ALU_INSTRUCTIONS
178 * we will use this to augment our hang detection:
179 */
180 gpu_write(gpu, REG_A3XX_SP_PERFCOUNTER7_SELECT,
181 SP_FS_FULL_ALU_INSTRUCTIONS);
182
183 gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK);
184
185 ret = adreno_hw_init(gpu);
186 if (ret)
187 return ret;
188
189 /* setup access protection: */
190 gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
191
192 /* RBBM registers */
193 gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040);
194 gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080);
195 gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc);
196 gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108);
197 gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140);
198 gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400);
199
200 /* CP registers */
201 gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700);
202 gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8);
203 gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0);
204 gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178);
205 gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180);
206
207 /* RB registers */
208 gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300);
209
210 /* VBIF registers */
211 gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000);
212
213 /* NOTE: PM4/micro-engine firmware registers look to be the same
214 * for a2xx and a3xx.. we could possibly push that part down to
215 * adreno_gpu base class. Or push both PM4 and PFP but
216 * parameterize the pfp ucode addr/data registers..
217 */
218
219 /* Load PM4: */
220 ptr = (uint32_t *)(adreno_gpu->pm4->data);
221 len = adreno_gpu->pm4->size / 4;
222 DBG("loading PM4 ucode version: %u", ptr[0]);
223
224 gpu_write(gpu, REG_AXXX_CP_DEBUG,
225 AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
226 AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
227 gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
228 for (i = 1; i < len; i++)
229 gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
230
231 /* Load PFP: */
232 ptr = (uint32_t *)(adreno_gpu->pfp->data);
233 len = adreno_gpu->pfp->size / 4;
234 DBG("loading PFP ucode version: %u", ptr[0]);
235
236 gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
237 for (i = 1; i < len; i++)
238 gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
239
240 /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
241 if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu))
242 gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
243 AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
244 AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
245 AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
246
247
248 /* clear ME_HALT to start micro engine */
249 gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
250
251 a3xx_me_init(gpu);
252
253 return 0;
254}
255
256static void a3xx_destroy(struct msm_gpu *gpu)
257{
258 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
259 struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
260
261 DBG("%s", gpu->name);
262
263 adreno_gpu_cleanup(adreno_gpu);
264 put_device(&a3xx_gpu->pdev->dev);
265 kfree(a3xx_gpu);
266}
267
268static void a3xx_idle(struct msm_gpu *gpu)
269{
270 unsigned long t;
271
272 /* wait for ringbuffer to drain: */
273 adreno_idle(gpu);
274
275 t = jiffies + ADRENO_IDLE_TIMEOUT;
276
277 /* then wait for GPU to finish: */
278 do {
279 uint32_t rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
280 if (!(rbbm_status & A3XX_RBBM_STATUS_GPU_BUSY))
281 return;
282 } while(time_before(jiffies, t));
283
284 DRM_ERROR("timeout waiting for %s to idle!\n", gpu->name);
285
286 /* TODO maybe we need to reset GPU here to recover from hang? */
287}
288
289static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
290{
291 uint32_t status;
292
293 status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS);
294 DBG("%s: %08x", gpu->name, status);
295
296 // TODO
297
298 gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status);
299
300 msm_gpu_retire(gpu);
301
302 return IRQ_HANDLED;
303}
304
305#ifdef CONFIG_DEBUG_FS
306static const unsigned int a3xx_registers[] = {
307 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
308 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
309 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
310 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
311 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
312 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
313 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
314 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
315 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
316 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
317 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
318 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05,
319 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65,
320 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
321 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
322 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
323 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
324 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
325 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
326 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
327 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
328 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
329 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
330 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
331 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
332 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
333 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
334 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
335 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
336 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
337 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
338 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
339 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
340 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
341 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
342 0x303c, 0x303c, 0x305e, 0x305f,
343};
344
345static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
346{
347 int i;
348
349 adreno_show(gpu, m);
350 seq_printf(m, "status: %08x\n",
351 gpu_read(gpu, REG_A3XX_RBBM_STATUS));
352
353 /* dump these out in a form that can be parsed by demsm: */
354 seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
355 for (i = 0; i < ARRAY_SIZE(a3xx_registers); i += 2) {
356 uint32_t start = a3xx_registers[i];
357 uint32_t end = a3xx_registers[i+1];
358 uint32_t addr;
359
360 for (addr = start; addr <= end; addr++) {
361 uint32_t val = gpu_read(gpu, addr);
362 seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
363 }
364 }
365}
366#endif
367
368static const struct adreno_gpu_funcs funcs = {
369 .base = {
370 .get_param = adreno_get_param,
371 .hw_init = a3xx_hw_init,
372 .pm_suspend = msm_gpu_pm_suspend,
373 .pm_resume = msm_gpu_pm_resume,
374 .recover = adreno_recover,
375 .last_fence = adreno_last_fence,
376 .submit = adreno_submit,
377 .flush = adreno_flush,
378 .idle = a3xx_idle,
379 .irq = a3xx_irq,
380 .destroy = a3xx_destroy,
381#ifdef CONFIG_DEBUG_FS
382 .show = a3xx_show,
383#endif
384 },
385};
386
387struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
388{
389 struct a3xx_gpu *a3xx_gpu = NULL;
390 struct msm_gpu *gpu;
391 struct platform_device *pdev = a3xx_pdev;
392 struct adreno_platform_config *config;
393 int ret;
394
395 if (!pdev) {
396 dev_err(dev->dev, "no a3xx device\n");
397 ret = -ENXIO;
398 goto fail;
399 }
400
401 config = pdev->dev.platform_data;
402
403 a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL);
404 if (!a3xx_gpu) {
405 ret = -ENOMEM;
406 goto fail;
407 }
408
409 gpu = &a3xx_gpu->base.base;
410
411 get_device(&pdev->dev);
412 a3xx_gpu->pdev = pdev;
413
414 gpu->fast_rate = config->fast_rate;
415 gpu->slow_rate = config->slow_rate;
416 gpu->bus_freq = config->bus_freq;
417
418 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
419 gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
420
421 ret = adreno_gpu_init(dev, pdev, &a3xx_gpu->base,
422 &funcs, config->rev);
423 if (ret)
424 goto fail;
425
426 return &a3xx_gpu->base.base;
427
428fail:
429 if (a3xx_gpu)
430 a3xx_destroy(&a3xx_gpu->base.base);
431
432 return ERR_PTR(ret);
433}
434
435/*
436 * The a3xx device:
437 */
438
439static int a3xx_probe(struct platform_device *pdev)
440{
441 static struct adreno_platform_config config = {};
442#ifdef CONFIG_OF
443 /* TODO */
444#else
445 uint32_t version = socinfo_get_version();
446 if (cpu_is_apq8064ab()) {
447 config.fast_rate = 450000000;
448 config.slow_rate = 27000000;
449 config.bus_freq = 4;
450 config.rev = ADRENO_REV(3, 2, 1, 0);
451 } else if (cpu_is_apq8064() || cpu_is_msm8960ab()) {
452 config.fast_rate = 400000000;
453 config.slow_rate = 27000000;
454 config.bus_freq = 4;
455
456 if (SOCINFO_VERSION_MAJOR(version) == 2)
457 config.rev = ADRENO_REV(3, 2, 0, 2);
458 else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
459 (SOCINFO_VERSION_MINOR(version) == 1))
460 config.rev = ADRENO_REV(3, 2, 0, 1);
461 else
462 config.rev = ADRENO_REV(3, 2, 0, 0);
463
464 } else if (cpu_is_msm8930()) {
465 config.fast_rate = 400000000;
466 config.slow_rate = 27000000;
467 config.bus_freq = 3;
468
469 if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
470 (SOCINFO_VERSION_MINOR(version) == 2))
471 config.rev = ADRENO_REV(3, 0, 5, 2);
472 else
473 config.rev = ADRENO_REV(3, 0, 5, 0);
474
475 }
476#endif
477 pdev->dev.platform_data = &config;
478 a3xx_pdev = pdev;
479 return 0;
480}
481
482static int a3xx_remove(struct platform_device *pdev)
483{
484 a3xx_pdev = NULL;
485 return 0;
486}
487
488static struct platform_driver a3xx_driver = {
489 .probe = a3xx_probe,
490 .remove = a3xx_remove,
491 .driver.name = "kgsl-3d0",
492};
493
494void __init a3xx_register(void)
495{
496 platform_driver_register(&a3xx_driver);
497}
498
499void __exit a3xx_unregister(void)
500{
501 platform_driver_unregister(&a3xx_driver);
502}
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
new file mode 100644
index 000000000000..32c398c2d00a
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __A3XX_GPU_H__
19#define __A3XX_GPU_H__
20
21#include "adreno_gpu.h"
22#include "a3xx.xml.h"
23
24struct a3xx_gpu {
25 struct adreno_gpu base;
26 struct platform_device *pdev;
27};
28#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
29
30#endif /* __A3XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
new file mode 100644
index 000000000000..61979d458ac0
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -0,0 +1,432 @@
1#ifndef ADRENO_COMMON_XML
2#define ADRENO_COMMON_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
17
18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark)
20
21Permission is hereby granted, free of charge, to any person obtaining
22a copy of this software and associated documentation files (the
23"Software"), to deal in the Software without restriction, including
24without limitation the rights to use, copy, modify, merge, publish,
25distribute, sublicense, and/or sell copies of the Software, and to
26permit persons to whom the Software is furnished to do so, subject to
27the following conditions:
28
29The above copyright notice and this permission notice (including the
30next paragraph) shall be included in all copies or substantial
31portions of the Software.
32
33THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
37LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
38OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/
41
42
43enum adreno_pa_su_sc_draw {
44 PC_DRAW_POINTS = 0,
45 PC_DRAW_LINES = 1,
46 PC_DRAW_TRIANGLES = 2,
47};
48
49enum adreno_compare_func {
50 FUNC_NEVER = 0,
51 FUNC_LESS = 1,
52 FUNC_EQUAL = 2,
53 FUNC_LEQUAL = 3,
54 FUNC_GREATER = 4,
55 FUNC_NOTEQUAL = 5,
56 FUNC_GEQUAL = 6,
57 FUNC_ALWAYS = 7,
58};
59
60enum adreno_stencil_op {
61 STENCIL_KEEP = 0,
62 STENCIL_ZERO = 1,
63 STENCIL_REPLACE = 2,
64 STENCIL_INCR_CLAMP = 3,
65 STENCIL_DECR_CLAMP = 4,
66 STENCIL_INVERT = 5,
67 STENCIL_INCR_WRAP = 6,
68 STENCIL_DECR_WRAP = 7,
69};
70
71enum adreno_rb_blend_factor {
72 FACTOR_ZERO = 0,
73 FACTOR_ONE = 1,
74 FACTOR_SRC_COLOR = 4,
75 FACTOR_ONE_MINUS_SRC_COLOR = 5,
76 FACTOR_SRC_ALPHA = 6,
77 FACTOR_ONE_MINUS_SRC_ALPHA = 7,
78 FACTOR_DST_COLOR = 8,
79 FACTOR_ONE_MINUS_DST_COLOR = 9,
80 FACTOR_DST_ALPHA = 10,
81 FACTOR_ONE_MINUS_DST_ALPHA = 11,
82 FACTOR_CONSTANT_COLOR = 12,
83 FACTOR_ONE_MINUS_CONSTANT_COLOR = 13,
84 FACTOR_CONSTANT_ALPHA = 14,
85 FACTOR_ONE_MINUS_CONSTANT_ALPHA = 15,
86 FACTOR_SRC_ALPHA_SATURATE = 16,
87};
88
89enum adreno_rb_blend_opcode {
90 BLEND_DST_PLUS_SRC = 0,
91 BLEND_SRC_MINUS_DST = 1,
92 BLEND_MIN_DST_SRC = 2,
93 BLEND_MAX_DST_SRC = 3,
94 BLEND_DST_MINUS_SRC = 4,
95 BLEND_DST_PLUS_SRC_BIAS = 5,
96};
97
98enum adreno_rb_surface_endian {
99 ENDIAN_NONE = 0,
100 ENDIAN_8IN16 = 1,
101 ENDIAN_8IN32 = 2,
102 ENDIAN_16IN32 = 3,
103 ENDIAN_8IN64 = 4,
104 ENDIAN_8IN128 = 5,
105};
106
107enum adreno_rb_dither_mode {
108 DITHER_DISABLE = 0,
109 DITHER_ALWAYS = 1,
110 DITHER_IF_ALPHA_OFF = 2,
111};
112
113enum adreno_rb_depth_format {
114 DEPTHX_16 = 0,
115 DEPTHX_24_8 = 1,
116};
117
118enum adreno_mmu_clnt_beh {
119 BEH_NEVR = 0,
120 BEH_TRAN_RNG = 1,
121 BEH_TRAN_FLT = 2,
122};
123
124#define REG_AXXX_MH_MMU_CONFIG 0x00000040
125#define AXXX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001
126#define AXXX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002
127#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030
128#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4
129static inline uint32_t AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
130{
131 return ((val) << AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
132}
133#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0
134#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6
135static inline uint32_t AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
136{
137 return ((val) << AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
138}
139#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300
140#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8
141static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
142{
143 return ((val) << AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
144}
145#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00
146#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10
147static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
148{
149 return ((val) << AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
150}
151#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000
152#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12
153static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
154{
155 return ((val) << AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
156}
157#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000
158#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14
159static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
160{
161 return ((val) << AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
162}
163#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000
164#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16
165static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
166{
167 return ((val) << AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
168}
169#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000
170#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18
171static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
172{
173 return ((val) << AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
174}
175#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000
176#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20
177static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
178{
179 return ((val) << AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
180}
181#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000
182#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22
183static inline uint32_t AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
184{
185 return ((val) << AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
186}
187#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000
188#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24
189static inline uint32_t AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
190{
191 return ((val) << AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
192}
193
194#define REG_AXXX_MH_MMU_VA_RANGE 0x00000041
195
196#define REG_AXXX_MH_MMU_PT_BASE 0x00000042
197
198#define REG_AXXX_MH_MMU_PAGE_FAULT 0x00000043
199
200#define REG_AXXX_MH_MMU_TRAN_ERROR 0x00000044
201
202#define REG_AXXX_MH_MMU_INVALIDATE 0x00000045
203
204#define REG_AXXX_MH_MMU_MPU_BASE 0x00000046
205
206#define REG_AXXX_MH_MMU_MPU_END 0x00000047
207
208#define REG_AXXX_CP_RB_BASE 0x000001c0
209
210#define REG_AXXX_CP_RB_CNTL 0x000001c1
211#define AXXX_CP_RB_CNTL_BUFSZ__MASK 0x0000003f
212#define AXXX_CP_RB_CNTL_BUFSZ__SHIFT 0
213static inline uint32_t AXXX_CP_RB_CNTL_BUFSZ(uint32_t val)
214{
215 return ((val) << AXXX_CP_RB_CNTL_BUFSZ__SHIFT) & AXXX_CP_RB_CNTL_BUFSZ__MASK;
216}
217#define AXXX_CP_RB_CNTL_BLKSZ__MASK 0x00003f00
218#define AXXX_CP_RB_CNTL_BLKSZ__SHIFT 8
219static inline uint32_t AXXX_CP_RB_CNTL_BLKSZ(uint32_t val)
220{
221 return ((val) << AXXX_CP_RB_CNTL_BLKSZ__SHIFT) & AXXX_CP_RB_CNTL_BLKSZ__MASK;
222}
223#define AXXX_CP_RB_CNTL_BUF_SWAP__MASK 0x00030000
224#define AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT 16
225static inline uint32_t AXXX_CP_RB_CNTL_BUF_SWAP(uint32_t val)
226{
227 return ((val) << AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT) & AXXX_CP_RB_CNTL_BUF_SWAP__MASK;
228}
229#define AXXX_CP_RB_CNTL_POLL_EN 0x00100000
230#define AXXX_CP_RB_CNTL_NO_UPDATE 0x08000000
231#define AXXX_CP_RB_CNTL_RPTR_WR_EN 0x80000000
232
233#define REG_AXXX_CP_RB_RPTR_ADDR 0x000001c3
234#define AXXX_CP_RB_RPTR_ADDR_SWAP__MASK 0x00000003
235#define AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT 0
236static inline uint32_t AXXX_CP_RB_RPTR_ADDR_SWAP(uint32_t val)
237{
238 return ((val) << AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT) & AXXX_CP_RB_RPTR_ADDR_SWAP__MASK;
239}
240#define AXXX_CP_RB_RPTR_ADDR_ADDR__MASK 0xfffffffc
241#define AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT 2
242static inline uint32_t AXXX_CP_RB_RPTR_ADDR_ADDR(uint32_t val)
243{
244 return ((val >> 2) << AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT) & AXXX_CP_RB_RPTR_ADDR_ADDR__MASK;
245}
246
247#define REG_AXXX_CP_RB_RPTR 0x000001c4
248
249#define REG_AXXX_CP_RB_WPTR 0x000001c5
250
251#define REG_AXXX_CP_RB_WPTR_DELAY 0x000001c6
252
253#define REG_AXXX_CP_RB_RPTR_WR 0x000001c7
254
255#define REG_AXXX_CP_RB_WPTR_BASE 0x000001c8
256
257#define REG_AXXX_CP_QUEUE_THRESHOLDS 0x000001d5
258#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK 0x0000000f
259#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT 0
260static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(uint32_t val)
261{
262 return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK;
263}
264#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK 0x00000f00
265#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT 8
266static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(uint32_t val)
267{
268 return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK;
269}
270#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK 0x000f0000
271#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT 16
272static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val)
273{
274 return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK;
275}
276
277#define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6
278
279#define REG_AXXX_CP_CSQ_AVAIL 0x000001d7
280#define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f
281#define AXXX_CP_CSQ_AVAIL_RING__SHIFT 0
282static inline uint32_t AXXX_CP_CSQ_AVAIL_RING(uint32_t val)
283{
284 return ((val) << AXXX_CP_CSQ_AVAIL_RING__SHIFT) & AXXX_CP_CSQ_AVAIL_RING__MASK;
285}
286#define AXXX_CP_CSQ_AVAIL_IB1__MASK 0x00007f00
287#define AXXX_CP_CSQ_AVAIL_IB1__SHIFT 8
288static inline uint32_t AXXX_CP_CSQ_AVAIL_IB1(uint32_t val)
289{
290 return ((val) << AXXX_CP_CSQ_AVAIL_IB1__SHIFT) & AXXX_CP_CSQ_AVAIL_IB1__MASK;
291}
292#define AXXX_CP_CSQ_AVAIL_IB2__MASK 0x007f0000
293#define AXXX_CP_CSQ_AVAIL_IB2__SHIFT 16
294static inline uint32_t AXXX_CP_CSQ_AVAIL_IB2(uint32_t val)
295{
296 return ((val) << AXXX_CP_CSQ_AVAIL_IB2__SHIFT) & AXXX_CP_CSQ_AVAIL_IB2__MASK;
297}
298
299#define REG_AXXX_CP_STQ_AVAIL 0x000001d8
300#define AXXX_CP_STQ_AVAIL_ST__MASK 0x0000007f
301#define AXXX_CP_STQ_AVAIL_ST__SHIFT 0
302static inline uint32_t AXXX_CP_STQ_AVAIL_ST(uint32_t val)
303{
304 return ((val) << AXXX_CP_STQ_AVAIL_ST__SHIFT) & AXXX_CP_STQ_AVAIL_ST__MASK;
305}
306
307#define REG_AXXX_CP_MEQ_AVAIL 0x000001d9
308#define AXXX_CP_MEQ_AVAIL_MEQ__MASK 0x0000001f
309#define AXXX_CP_MEQ_AVAIL_MEQ__SHIFT 0
310static inline uint32_t AXXX_CP_MEQ_AVAIL_MEQ(uint32_t val)
311{
312 return ((val) << AXXX_CP_MEQ_AVAIL_MEQ__SHIFT) & AXXX_CP_MEQ_AVAIL_MEQ__MASK;
313}
314
315#define REG_AXXX_SCRATCH_UMSK 0x000001dc
316#define AXXX_SCRATCH_UMSK_UMSK__MASK 0x000000ff
317#define AXXX_SCRATCH_UMSK_UMSK__SHIFT 0
318static inline uint32_t AXXX_SCRATCH_UMSK_UMSK(uint32_t val)
319{
320 return ((val) << AXXX_SCRATCH_UMSK_UMSK__SHIFT) & AXXX_SCRATCH_UMSK_UMSK__MASK;
321}
322#define AXXX_SCRATCH_UMSK_SWAP__MASK 0x00030000
323#define AXXX_SCRATCH_UMSK_SWAP__SHIFT 16
324static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
325{
326 return ((val) << AXXX_SCRATCH_UMSK_SWAP__SHIFT) & AXXX_SCRATCH_UMSK_SWAP__MASK;
327}
328
329#define REG_AXXX_SCRATCH_ADDR 0x000001dd
330
331#define REG_AXXX_CP_ME_RDADDR 0x000001ea
332
333#define REG_AXXX_CP_STATE_DEBUG_INDEX 0x000001ec
334
335#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed
336
337#define REG_AXXX_CP_INT_CNTL 0x000001f2
338
339#define REG_AXXX_CP_INT_STATUS 0x000001f3
340
341#define REG_AXXX_CP_INT_ACK 0x000001f4
342
343#define REG_AXXX_CP_ME_CNTL 0x000001f6
344
345#define REG_AXXX_CP_ME_STATUS 0x000001f7
346
347#define REG_AXXX_CP_ME_RAM_WADDR 0x000001f8
348
349#define REG_AXXX_CP_ME_RAM_RADDR 0x000001f9
350
351#define REG_AXXX_CP_ME_RAM_DATA 0x000001fa
352
353#define REG_AXXX_CP_DEBUG 0x000001fc
354#define AXXX_CP_DEBUG_PREDICATE_DISABLE 0x00800000
355#define AXXX_CP_DEBUG_PROG_END_PTR_ENABLE 0x01000000
356#define AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE 0x02000000
357#define AXXX_CP_DEBUG_PREFETCH_PASS_NOPS 0x04000000
358#define AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE 0x08000000
359#define AXXX_CP_DEBUG_PREFETCH_MATCH_DISABLE 0x10000000
360#define AXXX_CP_DEBUG_SIMPLE_ME_FLOW_CONTROL 0x40000000
361#define AXXX_CP_DEBUG_MIU_WRITE_PACK_DISABLE 0x80000000
362
363#define REG_AXXX_CP_CSQ_RB_STAT 0x000001fd
364#define AXXX_CP_CSQ_RB_STAT_RPTR__MASK 0x0000007f
365#define AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT 0
366static inline uint32_t AXXX_CP_CSQ_RB_STAT_RPTR(uint32_t val)
367{
368 return ((val) << AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_RPTR__MASK;
369}
370#define AXXX_CP_CSQ_RB_STAT_WPTR__MASK 0x007f0000
371#define AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT 16
372static inline uint32_t AXXX_CP_CSQ_RB_STAT_WPTR(uint32_t val)
373{
374 return ((val) << AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_WPTR__MASK;
375}
376
377#define REG_AXXX_CP_CSQ_IB1_STAT 0x000001fe
378#define AXXX_CP_CSQ_IB1_STAT_RPTR__MASK 0x0000007f
379#define AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT 0
380static inline uint32_t AXXX_CP_CSQ_IB1_STAT_RPTR(uint32_t val)
381{
382 return ((val) << AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_RPTR__MASK;
383}
384#define AXXX_CP_CSQ_IB1_STAT_WPTR__MASK 0x007f0000
385#define AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT 16
386static inline uint32_t AXXX_CP_CSQ_IB1_STAT_WPTR(uint32_t val)
387{
388 return ((val) << AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_WPTR__MASK;
389}
390
391#define REG_AXXX_CP_CSQ_IB2_STAT 0x000001ff
392#define AXXX_CP_CSQ_IB2_STAT_RPTR__MASK 0x0000007f
393#define AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT 0
394static inline uint32_t AXXX_CP_CSQ_IB2_STAT_RPTR(uint32_t val)
395{
396 return ((val) << AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_RPTR__MASK;
397}
398#define AXXX_CP_CSQ_IB2_STAT_WPTR__MASK 0x007f0000
399#define AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT 16
400static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
401{
402 return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK;
403}
404
405#define REG_AXXX_CP_SCRATCH_REG0 0x00000578
406
407#define REG_AXXX_CP_SCRATCH_REG1 0x00000579
408
409#define REG_AXXX_CP_SCRATCH_REG2 0x0000057a
410
411#define REG_AXXX_CP_SCRATCH_REG3 0x0000057b
412
413#define REG_AXXX_CP_SCRATCH_REG4 0x0000057c
414
415#define REG_AXXX_CP_SCRATCH_REG5 0x0000057d
416
417#define REG_AXXX_CP_SCRATCH_REG6 0x0000057e
418
419#define REG_AXXX_CP_SCRATCH_REG7 0x0000057f
420
421#define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a
422
423#define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b
424
425#define REG_AXXX_CP_ME_CF_EVENT_DATA 0x0000060c
426
427#define REG_AXXX_CP_ME_NRT_ADDR 0x0000060d
428
429#define REG_AXXX_CP_ME_NRT_DATA 0x0000060e
430
431
432#endif /* ADRENO_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
new file mode 100644
index 000000000000..a60584763b61
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -0,0 +1,370 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "adreno_gpu.h"
19#include "msm_gem.h"
20
21struct adreno_info {
22 struct adreno_rev rev;
23 uint32_t revn;
24 const char *name;
25 const char *pm4fw, *pfpfw;
26 uint32_t gmem;
27};
28
29#define ANY_ID 0xff
30
31static const struct adreno_info gpulist[] = {
32 {
33 .rev = ADRENO_REV(3, 0, 5, ANY_ID),
34 .revn = 305,
35 .name = "A305",
36 .pm4fw = "a300_pm4.fw",
37 .pfpfw = "a300_pfp.fw",
38 .gmem = SZ_256K,
39 }, {
40 .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
41 .revn = 320,
42 .name = "A320",
43 .pm4fw = "a300_pm4.fw",
44 .pfpfw = "a300_pfp.fw",
45 .gmem = SZ_512K,
46 }, {
47 .rev = ADRENO_REV(3, 3, 0, 0),
48 .revn = 330,
49 .name = "A330",
50 .pm4fw = "a330_pm4.fw",
51 .pfpfw = "a330_pfp.fw",
52 .gmem = SZ_1M,
53 },
54};
55
56#define RB_SIZE SZ_32K
57#define RB_BLKSIZE 16
58
59int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
60{
61 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
62
63 switch (param) {
64 case MSM_PARAM_GPU_ID:
65 *value = adreno_gpu->info->revn;
66 return 0;
67 case MSM_PARAM_GMEM_SIZE:
68 *value = adreno_gpu->info->gmem;
69 return 0;
70 default:
71 DBG("%s: invalid param: %u", gpu->name, param);
72 return -EINVAL;
73 }
74}
75
76#define rbmemptr(adreno_gpu, member) \
77 ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
78
79int adreno_hw_init(struct msm_gpu *gpu)
80{
81 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
82
83 DBG("%s", gpu->name);
84
85 /* Setup REG_CP_RB_CNTL: */
86 gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
87 /* size is log2(quad-words): */
88 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
89 AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE));
90
91 /* Setup ringbuffer address: */
92 gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
93 gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr));
94
95 /* Setup scratch/timestamp: */
96 gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence));
97
98 gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1);
99
100 return 0;
101}
102
103static uint32_t get_wptr(struct msm_ringbuffer *ring)
104{
105 return ring->cur - ring->start;
106}
107
108uint32_t adreno_last_fence(struct msm_gpu *gpu)
109{
110 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
111 return adreno_gpu->memptrs->fence;
112}
113
114void adreno_recover(struct msm_gpu *gpu)
115{
116 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
117 struct drm_device *dev = gpu->dev;
118 int ret;
119
120 gpu->funcs->pm_suspend(gpu);
121
122 /* reset ringbuffer: */
123 gpu->rb->cur = gpu->rb->start;
124
125 /* reset completed fence seqno, just discard anything pending: */
126 adreno_gpu->memptrs->fence = gpu->submitted_fence;
127
128 gpu->funcs->pm_resume(gpu);
129 ret = gpu->funcs->hw_init(gpu);
130 if (ret) {
131 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
132 /* hmm, oh well? */
133 }
134}
135
136int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
137 struct msm_file_private *ctx)
138{
139 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
140 struct msm_drm_private *priv = gpu->dev->dev_private;
141 struct msm_ringbuffer *ring = gpu->rb;
142 unsigned i, ibs = 0;
143
144 for (i = 0; i < submit->nr_cmds; i++) {
145 switch (submit->cmd[i].type) {
146 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
147 /* ignore IB-targets */
148 break;
149 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
150 /* ignore if there has not been a ctx switch: */
151 if (priv->lastctx == ctx)
152 break;
153 case MSM_SUBMIT_CMD_BUF:
154 OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
155 OUT_RING(ring, submit->cmd[i].iova);
156 OUT_RING(ring, submit->cmd[i].size);
157 ibs++;
158 break;
159 }
160 }
161
162 /* on a320, at least, we seem to need to pad things out to an
163 * even number of qwords to avoid issue w/ CP hanging on wrap-
164 * around:
165 */
166 if (ibs % 2)
167 OUT_PKT2(ring);
168
169 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
170 OUT_RING(ring, submit->fence);
171
172 if (adreno_is_a3xx(adreno_gpu)) {
173 /* Flush HLSQ lazy updates to make sure there is nothing
174 * pending for indirect loads after the timestamp has
175 * passed:
176 */
177 OUT_PKT3(ring, CP_EVENT_WRITE, 1);
178 OUT_RING(ring, HLSQ_FLUSH);
179
180 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
181 OUT_RING(ring, 0x00000000);
182 }
183
184 OUT_PKT3(ring, CP_EVENT_WRITE, 3);
185 OUT_RING(ring, CACHE_FLUSH_TS);
186 OUT_RING(ring, rbmemptr(adreno_gpu, fence));
187 OUT_RING(ring, submit->fence);
188
189 /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
190 OUT_PKT3(ring, CP_INTERRUPT, 1);
191 OUT_RING(ring, 0x80000000);
192
193#if 0
194 if (adreno_is_a3xx(adreno_gpu)) {
195 /* Dummy set-constant to trigger context rollover */
196 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
197 OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
198 OUT_RING(ring, 0x00000000);
199 }
200#endif
201
202 gpu->funcs->flush(gpu);
203
204 return 0;
205}
206
207void adreno_flush(struct msm_gpu *gpu)
208{
209 uint32_t wptr = get_wptr(gpu->rb);
210
211 /* ensure writes to ringbuffer have hit system memory: */
212 mb();
213
214 gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr);
215}
216
217void adreno_idle(struct msm_gpu *gpu)
218{
219 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
220 uint32_t rptr, wptr = get_wptr(gpu->rb);
221 unsigned long t;
222
223 t = jiffies + ADRENO_IDLE_TIMEOUT;
224
225 /* then wait for CP to drain ringbuffer: */
226 do {
227 rptr = adreno_gpu->memptrs->rptr;
228 if (rptr == wptr)
229 return;
230 } while(time_before(jiffies, t));
231
232 DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name);
233
234 /* TODO maybe we need to reset GPU here to recover from hang? */
235}
236
237#ifdef CONFIG_DEBUG_FS
238void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
239{
240 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
241
242 seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
243 adreno_gpu->info->revn, adreno_gpu->rev.core,
244 adreno_gpu->rev.major, adreno_gpu->rev.minor,
245 adreno_gpu->rev.patchid);
246
247 seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
248 gpu->submitted_fence);
249 seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
250 seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
251 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
252}
253#endif
254
255void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
256{
257 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
258 uint32_t freedwords;
259 do {
260 uint32_t size = gpu->rb->size / 4;
261 uint32_t wptr = get_wptr(gpu->rb);
262 uint32_t rptr = adreno_gpu->memptrs->rptr;
263 freedwords = (rptr + (size - 1) - wptr) % size;
264 } while(freedwords < ndwords);
265}
266
267static const char *iommu_ports[] = {
268 "gfx3d_user", "gfx3d_priv",
269 "gfx3d1_user", "gfx3d1_priv",
270};
271
272static inline bool _rev_match(uint8_t entry, uint8_t id)
273{
274 return (entry == ANY_ID) || (entry == id);
275}
276
277int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
278 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
279 struct adreno_rev rev)
280{
281 int i, ret;
282
283 /* identify gpu: */
284 for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
285 const struct adreno_info *info = &gpulist[i];
286 if (_rev_match(info->rev.core, rev.core) &&
287 _rev_match(info->rev.major, rev.major) &&
288 _rev_match(info->rev.minor, rev.minor) &&
289 _rev_match(info->rev.patchid, rev.patchid)) {
290 gpu->info = info;
291 gpu->revn = info->revn;
292 break;
293 }
294 }
295
296 if (i == ARRAY_SIZE(gpulist)) {
297 dev_err(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
298 rev.core, rev.major, rev.minor, rev.patchid);
299 return -ENXIO;
300 }
301
302 DBG("Found GPU: %s (%u.%u.%u.%u)", gpu->info->name,
303 rev.core, rev.major, rev.minor, rev.patchid);
304
305 gpu->funcs = funcs;
306 gpu->rev = rev;
307
308 ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev);
309 if (ret) {
310 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
311 gpu->info->pm4fw, ret);
312 return ret;
313 }
314
315 ret = request_firmware(&gpu->pfp, gpu->info->pfpfw, drm->dev);
316 if (ret) {
317 dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
318 gpu->info->pfpfw, ret);
319 return ret;
320 }
321
322 ret = msm_gpu_init(drm, pdev, &gpu->base, &funcs->base,
323 gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
324 RB_SIZE);
325 if (ret)
326 return ret;
327
328 ret = msm_iommu_attach(drm, gpu->base.iommu,
329 iommu_ports, ARRAY_SIZE(iommu_ports));
330 if (ret)
331 return ret;
332
333 gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
334 MSM_BO_UNCACHED);
335 if (IS_ERR(gpu->memptrs_bo)) {
336 ret = PTR_ERR(gpu->memptrs_bo);
337 gpu->memptrs_bo = NULL;
338 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
339 return ret;
340 }
341
342 gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo);
343 if (!gpu->memptrs) {
344 dev_err(drm->dev, "could not vmap memptrs\n");
345 return -ENOMEM;
346 }
347
348 ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id,
349 &gpu->memptrs_iova);
350 if (ret) {
351 dev_err(drm->dev, "could not map memptrs: %d\n", ret);
352 return ret;
353 }
354
355 return 0;
356}
357
358void adreno_gpu_cleanup(struct adreno_gpu *gpu)
359{
360 if (gpu->memptrs_bo) {
361 if (gpu->memptrs_iova)
362 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
363 drm_gem_object_unreference(gpu->memptrs_bo);
364 }
365 if (gpu->pm4)
366 release_firmware(gpu->pm4);
367 if (gpu->pfp)
368 release_firmware(gpu->pfp);
369 msm_gpu_cleanup(&gpu->base);
370}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
new file mode 100644
index 000000000000..f73abfba7c22
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -0,0 +1,141 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ADRENO_GPU_H__
19#define __ADRENO_GPU_H__
20
21#include <linux/firmware.h>
22
23#include "msm_gpu.h"
24
25#include "adreno_common.xml.h"
26#include "adreno_pm4.xml.h"
27
28struct adreno_rev {
29 uint8_t core;
30 uint8_t major;
31 uint8_t minor;
32 uint8_t patchid;
33};
34
35#define ADRENO_REV(core, major, minor, patchid) \
36 ((struct adreno_rev){ core, major, minor, patchid })
37
38struct adreno_gpu_funcs {
39 struct msm_gpu_funcs base;
40};
41
42struct adreno_info;
43
44struct adreno_rbmemptrs {
45 volatile uint32_t rptr;
46 volatile uint32_t wptr;
47 volatile uint32_t fence;
48};
49
50struct adreno_gpu {
51 struct msm_gpu base;
52 struct adreno_rev rev;
53 const struct adreno_info *info;
54 uint32_t revn; /* numeric revision name */
55 const struct adreno_gpu_funcs *funcs;
56
57 /* firmware: */
58 const struct firmware *pm4, *pfp;
59
60 /* ringbuffer rptr/wptr: */
61 // TODO should this be in msm_ringbuffer? I think it would be
62 // different for z180..
63 struct adreno_rbmemptrs *memptrs;
64 struct drm_gem_object *memptrs_bo;
65 uint32_t memptrs_iova;
66};
67#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
68
69/* platform config data (ie. from DT, or pdata) */
70struct adreno_platform_config {
71 struct adreno_rev rev;
72 uint32_t fast_rate, slow_rate, bus_freq;
73};
74
75#define ADRENO_IDLE_TIMEOUT (20 * 1000)
76
77static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
78{
79 return (gpu->revn >= 300) && (gpu->revn < 400);
80}
81
82static inline bool adreno_is_a305(struct adreno_gpu *gpu)
83{
84 return gpu->revn == 305;
85}
86
87static inline bool adreno_is_a320(struct adreno_gpu *gpu)
88{
89 return gpu->revn == 320;
90}
91
92static inline bool adreno_is_a330(struct adreno_gpu *gpu)
93{
94 return gpu->revn == 330;
95}
96
97int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
98int adreno_hw_init(struct msm_gpu *gpu);
99uint32_t adreno_last_fence(struct msm_gpu *gpu);
100void adreno_recover(struct msm_gpu *gpu);
101int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
102 struct msm_file_private *ctx);
103void adreno_flush(struct msm_gpu *gpu);
104void adreno_idle(struct msm_gpu *gpu);
105#ifdef CONFIG_DEBUG_FS
106void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
107#endif
108void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
109
110int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
111 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
112 struct adreno_rev rev);
113void adreno_gpu_cleanup(struct adreno_gpu *gpu);
114
115
116/* ringbuffer helpers (the parts that are adreno specific) */
117
118static inline void
119OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
120{
121 adreno_wait_ring(ring->gpu, cnt+1);
122 OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
123}
124
125/* no-op packet: */
126static inline void
127OUT_PKT2(struct msm_ringbuffer *ring)
128{
129 adreno_wait_ring(ring->gpu, 1);
130 OUT_RING(ring, CP_TYPE2_PKT);
131}
132
133static inline void
134OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
135{
136 adreno_wait_ring(ring->gpu, cnt+1);
137 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
138}
139
140
141#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
new file mode 100644
index 000000000000..94c13f418e75
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -0,0 +1,254 @@
1#ifndef ADRENO_PM4_XML
2#define ADRENO_PM4_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
16- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
17
18Copyright (C) 2013 by the following authors:
19- Rob Clark <robdclark@gmail.com> (robclark)
20
21Permission is hereby granted, free of charge, to any person obtaining
22a copy of this software and associated documentation files (the
23"Software"), to deal in the Software without restriction, including
24without limitation the rights to use, copy, modify, merge, publish,
25distribute, sublicense, and/or sell copies of the Software, and to
26permit persons to whom the Software is furnished to do so, subject to
27the following conditions:
28
29The above copyright notice and this permission notice (including the
30next paragraph) shall be included in all copies or substantial
31portions of the Software.
32
33THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
35MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
36IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
37LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
38OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
39WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/
41
42
43enum vgt_event_type {
44 VS_DEALLOC = 0,
45 PS_DEALLOC = 1,
46 VS_DONE_TS = 2,
47 PS_DONE_TS = 3,
48 CACHE_FLUSH_TS = 4,
49 CONTEXT_DONE = 5,
50 CACHE_FLUSH = 6,
51 HLSQ_FLUSH = 7,
52 VIZQUERY_START = 7,
53 VIZQUERY_END = 8,
54 SC_WAIT_WC = 9,
55 RST_PIX_CNT = 13,
56 RST_VTX_CNT = 14,
57 TILE_FLUSH = 15,
58 CACHE_FLUSH_AND_INV_TS_EVENT = 20,
59 ZPASS_DONE = 21,
60 CACHE_FLUSH_AND_INV_EVENT = 22,
61 PERFCOUNTER_START = 23,
62 PERFCOUNTER_STOP = 24,
63 VS_FETCH_DONE = 27,
64 FACENESS_FLUSH = 28,
65};
66
67enum pc_di_primtype {
68 DI_PT_NONE = 0,
69 DI_PT_POINTLIST = 1,
70 DI_PT_LINELIST = 2,
71 DI_PT_LINESTRIP = 3,
72 DI_PT_TRILIST = 4,
73 DI_PT_TRIFAN = 5,
74 DI_PT_TRISTRIP = 6,
75 DI_PT_RECTLIST = 8,
76 DI_PT_QUADLIST = 13,
77 DI_PT_QUADSTRIP = 14,
78 DI_PT_POLYGON = 15,
79 DI_PT_2D_COPY_RECT_LIST_V0 = 16,
80 DI_PT_2D_COPY_RECT_LIST_V1 = 17,
81 DI_PT_2D_COPY_RECT_LIST_V2 = 18,
82 DI_PT_2D_COPY_RECT_LIST_V3 = 19,
83 DI_PT_2D_FILL_RECT_LIST = 20,
84 DI_PT_2D_LINE_STRIP = 21,
85 DI_PT_2D_TRI_STRIP = 22,
86};
87
88enum pc_di_src_sel {
89 DI_SRC_SEL_DMA = 0,
90 DI_SRC_SEL_IMMEDIATE = 1,
91 DI_SRC_SEL_AUTO_INDEX = 2,
92 DI_SRC_SEL_RESERVED = 3,
93};
94
95enum pc_di_index_size {
96 INDEX_SIZE_IGN = 0,
97 INDEX_SIZE_16_BIT = 0,
98 INDEX_SIZE_32_BIT = 1,
99 INDEX_SIZE_8_BIT = 2,
100 INDEX_SIZE_INVALID = 0,
101};
102
103enum pc_di_vis_cull_mode {
104 IGNORE_VISIBILITY = 0,
105};
106
107enum adreno_pm4_packet_type {
108 CP_TYPE0_PKT = 0,
109 CP_TYPE1_PKT = 0x40000000,
110 CP_TYPE2_PKT = 0x80000000,
111 CP_TYPE3_PKT = 0xc0000000,
112};
113
114enum adreno_pm4_type3_packets {
115 CP_ME_INIT = 72,
116 CP_NOP = 16,
117 CP_INDIRECT_BUFFER = 63,
118 CP_INDIRECT_BUFFER_PFD = 55,
119 CP_WAIT_FOR_IDLE = 38,
120 CP_WAIT_REG_MEM = 60,
121 CP_WAIT_REG_EQ = 82,
122 CP_WAT_REG_GTE = 83,
123 CP_WAIT_UNTIL_READ = 92,
124 CP_WAIT_IB_PFD_COMPLETE = 93,
125 CP_REG_RMW = 33,
126 CP_SET_BIN_DATA = 47,
127 CP_REG_TO_MEM = 62,
128 CP_MEM_WRITE = 61,
129 CP_MEM_WRITE_CNTR = 79,
130 CP_COND_EXEC = 68,
131 CP_COND_WRITE = 69,
132 CP_EVENT_WRITE = 70,
133 CP_EVENT_WRITE_SHD = 88,
134 CP_EVENT_WRITE_CFL = 89,
135 CP_EVENT_WRITE_ZPD = 91,
136 CP_RUN_OPENCL = 49,
137 CP_DRAW_INDX = 34,
138 CP_DRAW_INDX_2 = 54,
139 CP_DRAW_INDX_BIN = 52,
140 CP_DRAW_INDX_2_BIN = 53,
141 CP_VIZ_QUERY = 35,
142 CP_SET_STATE = 37,
143 CP_SET_CONSTANT = 45,
144 CP_IM_LOAD = 39,
145 CP_IM_LOAD_IMMEDIATE = 43,
146 CP_LOAD_CONSTANT_CONTEXT = 46,
147 CP_INVALIDATE_STATE = 59,
148 CP_SET_SHADER_BASES = 74,
149 CP_SET_BIN_MASK = 80,
150 CP_SET_BIN_SELECT = 81,
151 CP_CONTEXT_UPDATE = 94,
152 CP_INTERRUPT = 64,
153 CP_IM_STORE = 44,
154 CP_SET_BIN_BASE_OFFSET = 75,
155 CP_SET_DRAW_INIT_FLAGS = 75,
156 CP_SET_PROTECTED_MODE = 95,
157 CP_LOAD_STATE = 48,
158 CP_COND_INDIRECT_BUFFER_PFE = 58,
159 CP_COND_INDIRECT_BUFFER_PFD = 50,
160 CP_INDIRECT_BUFFER_PFE = 63,
161 CP_SET_BIN = 76,
162};
163
164enum adreno_state_block {
165 SB_VERT_TEX = 0,
166 SB_VERT_MIPADDR = 1,
167 SB_FRAG_TEX = 2,
168 SB_FRAG_MIPADDR = 3,
169 SB_VERT_SHADER = 4,
170 SB_FRAG_SHADER = 6,
171};
172
173enum adreno_state_type {
174 ST_SHADER = 0,
175 ST_CONSTANTS = 1,
176};
177
178enum adreno_state_src {
179 SS_DIRECT = 0,
180 SS_INDIRECT = 4,
181};
182
183#define REG_CP_LOAD_STATE_0 0x00000000
184#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
185#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
186static inline uint32_t CP_LOAD_STATE_0_DST_OFF(uint32_t val)
187{
188 return ((val) << CP_LOAD_STATE_0_DST_OFF__SHIFT) & CP_LOAD_STATE_0_DST_OFF__MASK;
189}
190#define CP_LOAD_STATE_0_STATE_SRC__MASK 0x00070000
191#define CP_LOAD_STATE_0_STATE_SRC__SHIFT 16
192static inline uint32_t CP_LOAD_STATE_0_STATE_SRC(enum adreno_state_src val)
193{
194 return ((val) << CP_LOAD_STATE_0_STATE_SRC__SHIFT) & CP_LOAD_STATE_0_STATE_SRC__MASK;
195}
196#define CP_LOAD_STATE_0_STATE_BLOCK__MASK 0x00380000
197#define CP_LOAD_STATE_0_STATE_BLOCK__SHIFT 19
198static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val)
199{
200 return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK;
201}
202#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0x7fc00000
203#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22
204static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val)
205{
206 return ((val) << CP_LOAD_STATE_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE_0_NUM_UNIT__MASK;
207}
208
209#define REG_CP_LOAD_STATE_1 0x00000001
210#define CP_LOAD_STATE_1_STATE_TYPE__MASK 0x00000003
211#define CP_LOAD_STATE_1_STATE_TYPE__SHIFT 0
212static inline uint32_t CP_LOAD_STATE_1_STATE_TYPE(enum adreno_state_type val)
213{
214 return ((val) << CP_LOAD_STATE_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE_1_STATE_TYPE__MASK;
215}
216#define CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK 0xfffffffc
217#define CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT 2
218static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
219{
220 return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
221}
222
223#define REG_CP_SET_BIN_0 0x00000000
224
225#define REG_CP_SET_BIN_1 0x00000001
226#define CP_SET_BIN_1_X1__MASK 0x0000ffff
227#define CP_SET_BIN_1_X1__SHIFT 0
228static inline uint32_t CP_SET_BIN_1_X1(uint32_t val)
229{
230 return ((val) << CP_SET_BIN_1_X1__SHIFT) & CP_SET_BIN_1_X1__MASK;
231}
232#define CP_SET_BIN_1_Y1__MASK 0xffff0000
233#define CP_SET_BIN_1_Y1__SHIFT 16
234static inline uint32_t CP_SET_BIN_1_Y1(uint32_t val)
235{
236 return ((val) << CP_SET_BIN_1_Y1__SHIFT) & CP_SET_BIN_1_Y1__MASK;
237}
238
239#define REG_CP_SET_BIN_2 0x00000002
240#define CP_SET_BIN_2_X2__MASK 0x0000ffff
241#define CP_SET_BIN_2_X2__SHIFT 0
242static inline uint32_t CP_SET_BIN_2_X2(uint32_t val)
243{
244 return ((val) << CP_SET_BIN_2_X2__SHIFT) & CP_SET_BIN_2_X2__MASK;
245}
246#define CP_SET_BIN_2_Y2__MASK 0xffff0000
247#define CP_SET_BIN_2_Y2__SHIFT 16
248static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val)
249{
250 return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK;
251}
252
253
254#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
new file mode 100644
index 000000000000..6f8396be431d
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -0,0 +1,502 @@
1#ifndef DSI_XML
2#define DSI_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45enum dsi_traffic_mode {
46 NON_BURST_SYNCH_PULSE = 0,
47 NON_BURST_SYNCH_EVENT = 1,
48 BURST_MODE = 2,
49};
50
51enum dsi_dst_format {
52 DST_FORMAT_RGB565 = 0,
53 DST_FORMAT_RGB666 = 1,
54 DST_FORMAT_RGB666_LOOSE = 2,
55 DST_FORMAT_RGB888 = 3,
56};
57
58enum dsi_rgb_swap {
59 SWAP_RGB = 0,
60 SWAP_RBG = 1,
61 SWAP_BGR = 2,
62 SWAP_BRG = 3,
63 SWAP_GRB = 4,
64 SWAP_GBR = 5,
65};
66
67enum dsi_cmd_trigger {
68 TRIGGER_NONE = 0,
69 TRIGGER_TE = 2,
70 TRIGGER_SW = 4,
71 TRIGGER_SW_SEOF = 5,
72 TRIGGER_SW_TE = 6,
73};
74
75#define DSI_IRQ_CMD_DMA_DONE 0x00000001
76#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002
77#define DSI_IRQ_CMD_MDP_DONE 0x00000100
78#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200
79#define DSI_IRQ_VIDEO_DONE 0x00010000
80#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000
81#define DSI_IRQ_ERROR 0x01000000
82#define DSI_IRQ_MASK_ERROR 0x02000000
83#define REG_DSI_CTRL 0x00000000
84#define DSI_CTRL_ENABLE 0x00000001
85#define DSI_CTRL_VID_MODE_EN 0x00000002
86#define DSI_CTRL_CMD_MODE_EN 0x00000004
87#define DSI_CTRL_LANE0 0x00000010
88#define DSI_CTRL_LANE1 0x00000020
89#define DSI_CTRL_LANE2 0x00000040
90#define DSI_CTRL_LANE3 0x00000080
91#define DSI_CTRL_CLK_EN 0x00000100
92#define DSI_CTRL_ECC_CHECK 0x00100000
93#define DSI_CTRL_CRC_CHECK 0x01000000
94
95#define REG_DSI_STATUS0 0x00000004
96#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002
97#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008
98#define DSI_STATUS0_DSI_BUSY 0x00000010
99
100#define REG_DSI_FIFO_STATUS 0x00000008
101
102#define REG_DSI_VID_CFG0 0x0000000c
103#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003
104#define DSI_VID_CFG0_VIRT_CHANNEL__SHIFT 0
105static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val)
106{
107 return ((val) << DSI_VID_CFG0_VIRT_CHANNEL__SHIFT) & DSI_VID_CFG0_VIRT_CHANNEL__MASK;
108}
109#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030
110#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4
111static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_dst_format val)
112{
113 return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK;
114}
115#define DSI_VID_CFG0_TRAFFIC_MODE__MASK 0x00000300
116#define DSI_VID_CFG0_TRAFFIC_MODE__SHIFT 8
117static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val)
118{
119 return ((val) << DSI_VID_CFG0_TRAFFIC_MODE__SHIFT) & DSI_VID_CFG0_TRAFFIC_MODE__MASK;
120}
121#define DSI_VID_CFG0_BLLP_POWER_STOP 0x00001000
122#define DSI_VID_CFG0_EOF_BLLP_POWER_STOP 0x00008000
123#define DSI_VID_CFG0_HSA_POWER_STOP 0x00010000
124#define DSI_VID_CFG0_HBP_POWER_STOP 0x00100000
125#define DSI_VID_CFG0_HFP_POWER_STOP 0x01000000
126#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000
127
128#define REG_DSI_VID_CFG1 0x0000001c
129#define DSI_VID_CFG1_R_SEL 0x00000010
130#define DSI_VID_CFG1_G_SEL 0x00000100
131#define DSI_VID_CFG1_B_SEL 0x00001000
132#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00070000
133#define DSI_VID_CFG1_RGB_SWAP__SHIFT 16
134static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val)
135{
136 return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK;
137}
138#define DSI_VID_CFG1_INTERLEAVE_MAX__MASK 0x00f00000
139#define DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT 20
140static inline uint32_t DSI_VID_CFG1_INTERLEAVE_MAX(uint32_t val)
141{
142 return ((val) << DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT) & DSI_VID_CFG1_INTERLEAVE_MAX__MASK;
143}
144
145#define REG_DSI_ACTIVE_H 0x00000020
146#define DSI_ACTIVE_H_START__MASK 0x00000fff
147#define DSI_ACTIVE_H_START__SHIFT 0
148static inline uint32_t DSI_ACTIVE_H_START(uint32_t val)
149{
150 return ((val) << DSI_ACTIVE_H_START__SHIFT) & DSI_ACTIVE_H_START__MASK;
151}
152#define DSI_ACTIVE_H_END__MASK 0x0fff0000
153#define DSI_ACTIVE_H_END__SHIFT 16
154static inline uint32_t DSI_ACTIVE_H_END(uint32_t val)
155{
156 return ((val) << DSI_ACTIVE_H_END__SHIFT) & DSI_ACTIVE_H_END__MASK;
157}
158
159#define REG_DSI_ACTIVE_V 0x00000024
160#define DSI_ACTIVE_V_START__MASK 0x00000fff
161#define DSI_ACTIVE_V_START__SHIFT 0
162static inline uint32_t DSI_ACTIVE_V_START(uint32_t val)
163{
164 return ((val) << DSI_ACTIVE_V_START__SHIFT) & DSI_ACTIVE_V_START__MASK;
165}
166#define DSI_ACTIVE_V_END__MASK 0x0fff0000
167#define DSI_ACTIVE_V_END__SHIFT 16
168static inline uint32_t DSI_ACTIVE_V_END(uint32_t val)
169{
170 return ((val) << DSI_ACTIVE_V_END__SHIFT) & DSI_ACTIVE_V_END__MASK;
171}
172
173#define REG_DSI_TOTAL 0x00000028
174#define DSI_TOTAL_H_TOTAL__MASK 0x00000fff
175#define DSI_TOTAL_H_TOTAL__SHIFT 0
176static inline uint32_t DSI_TOTAL_H_TOTAL(uint32_t val)
177{
178 return ((val) << DSI_TOTAL_H_TOTAL__SHIFT) & DSI_TOTAL_H_TOTAL__MASK;
179}
180#define DSI_TOTAL_V_TOTAL__MASK 0x0fff0000
181#define DSI_TOTAL_V_TOTAL__SHIFT 16
182static inline uint32_t DSI_TOTAL_V_TOTAL(uint32_t val)
183{
184 return ((val) << DSI_TOTAL_V_TOTAL__SHIFT) & DSI_TOTAL_V_TOTAL__MASK;
185}
186
187#define REG_DSI_ACTIVE_HSYNC 0x0000002c
188#define DSI_ACTIVE_HSYNC_START__MASK 0x00000fff
189#define DSI_ACTIVE_HSYNC_START__SHIFT 0
190static inline uint32_t DSI_ACTIVE_HSYNC_START(uint32_t val)
191{
192 return ((val) << DSI_ACTIVE_HSYNC_START__SHIFT) & DSI_ACTIVE_HSYNC_START__MASK;
193}
194#define DSI_ACTIVE_HSYNC_END__MASK 0x0fff0000
195#define DSI_ACTIVE_HSYNC_END__SHIFT 16
196static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val)
197{
198 return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK;
199}
200
201#define REG_DSI_ACTIVE_VSYNC 0x00000034
202#define DSI_ACTIVE_VSYNC_START__MASK 0x00000fff
203#define DSI_ACTIVE_VSYNC_START__SHIFT 0
204static inline uint32_t DSI_ACTIVE_VSYNC_START(uint32_t val)
205{
206 return ((val) << DSI_ACTIVE_VSYNC_START__SHIFT) & DSI_ACTIVE_VSYNC_START__MASK;
207}
208#define DSI_ACTIVE_VSYNC_END__MASK 0x0fff0000
209#define DSI_ACTIVE_VSYNC_END__SHIFT 16
210static inline uint32_t DSI_ACTIVE_VSYNC_END(uint32_t val)
211{
212 return ((val) << DSI_ACTIVE_VSYNC_END__SHIFT) & DSI_ACTIVE_VSYNC_END__MASK;
213}
214
215#define REG_DSI_CMD_DMA_CTRL 0x00000038
216#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000
217#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000
218
219#define REG_DSI_CMD_CFG0 0x0000003c
220
221#define REG_DSI_CMD_CFG1 0x00000040
222
223#define REG_DSI_DMA_BASE 0x00000044
224
225#define REG_DSI_DMA_LEN 0x00000048
226
227#define REG_DSI_ACK_ERR_STATUS 0x00000064
228
229static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
230
231static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; }
232
233#define REG_DSI_TRIG_CTRL 0x00000080
234#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x0000000f
235#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0
236static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val)
237{
238 return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK;
239}
240#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x000000f0
241#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4
242static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val)
243{
244 return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK;
245}
246#define DSI_TRIG_CTRL_STREAM 0x00000100
247#define DSI_TRIG_CTRL_TE 0x80000000
248
249#define REG_DSI_TRIG_DMA 0x0000008c
250
251#define REG_DSI_DLN0_PHY_ERR 0x000000b0
252
253#define REG_DSI_TIMEOUT_STATUS 0x000000bc
254
255#define REG_DSI_CLKOUT_TIMING_CTRL 0x000000c0
256#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK 0x0000003f
257#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT 0
258static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(uint32_t val)
259{
260 return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK;
261}
262#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK 0x00003f00
263#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT 8
264static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
265{
266 return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK;
267}
268
269#define REG_DSI_EOT_PACKET_CTRL 0x000000c8
270#define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND 0x00000001
271#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010
272
273#define REG_DSI_LANE_SWAP_CTRL 0x000000ac
274
275#define REG_DSI_ERR_INT_MASK0 0x00000108
276
277#define REG_DSI_INTR_CTRL 0x0000010c
278
279#define REG_DSI_RESET 0x00000114
280
281#define REG_DSI_CLK_CTRL 0x00000118
282
283#define REG_DSI_PHY_RESET 0x00000128
284
285#define REG_DSI_PHY_PLL_CTRL_0 0x00000200
286#define DSI_PHY_PLL_CTRL_0_ENABLE 0x00000001
287
288#define REG_DSI_PHY_PLL_CTRL_1 0x00000204
289
290#define REG_DSI_PHY_PLL_CTRL_2 0x00000208
291
292#define REG_DSI_PHY_PLL_CTRL_3 0x0000020c
293
294#define REG_DSI_PHY_PLL_CTRL_4 0x00000210
295
296#define REG_DSI_PHY_PLL_CTRL_5 0x00000214
297
298#define REG_DSI_PHY_PLL_CTRL_6 0x00000218
299
300#define REG_DSI_PHY_PLL_CTRL_7 0x0000021c
301
302#define REG_DSI_PHY_PLL_CTRL_8 0x00000220
303
304#define REG_DSI_PHY_PLL_CTRL_9 0x00000224
305
306#define REG_DSI_PHY_PLL_CTRL_10 0x00000228
307
308#define REG_DSI_PHY_PLL_CTRL_11 0x0000022c
309
310#define REG_DSI_PHY_PLL_CTRL_12 0x00000230
311
312#define REG_DSI_PHY_PLL_CTRL_13 0x00000234
313
314#define REG_DSI_PHY_PLL_CTRL_14 0x00000238
315
316#define REG_DSI_PHY_PLL_CTRL_15 0x0000023c
317
318#define REG_DSI_PHY_PLL_CTRL_16 0x00000240
319
320#define REG_DSI_PHY_PLL_CTRL_17 0x00000244
321
322#define REG_DSI_PHY_PLL_CTRL_18 0x00000248
323
324#define REG_DSI_PHY_PLL_CTRL_19 0x0000024c
325
326#define REG_DSI_PHY_PLL_CTRL_20 0x00000250
327
328#define REG_DSI_PHY_PLL_STATUS 0x00000280
329#define DSI_PHY_PLL_STATUS_PLL_BUSY 0x00000001
330
331#define REG_DSI_8x60_PHY_TPA_CTRL_1 0x00000258
332
333#define REG_DSI_8x60_PHY_TPA_CTRL_2 0x0000025c
334
335#define REG_DSI_8x60_PHY_TIMING_CTRL_0 0x00000260
336
337#define REG_DSI_8x60_PHY_TIMING_CTRL_1 0x00000264
338
339#define REG_DSI_8x60_PHY_TIMING_CTRL_2 0x00000268
340
341#define REG_DSI_8x60_PHY_TIMING_CTRL_3 0x0000026c
342
343#define REG_DSI_8x60_PHY_TIMING_CTRL_4 0x00000270
344
345#define REG_DSI_8x60_PHY_TIMING_CTRL_5 0x00000274
346
347#define REG_DSI_8x60_PHY_TIMING_CTRL_6 0x00000278
348
349#define REG_DSI_8x60_PHY_TIMING_CTRL_7 0x0000027c
350
351#define REG_DSI_8x60_PHY_TIMING_CTRL_8 0x00000280
352
353#define REG_DSI_8x60_PHY_TIMING_CTRL_9 0x00000284
354
355#define REG_DSI_8x60_PHY_TIMING_CTRL_10 0x00000288
356
357#define REG_DSI_8x60_PHY_TIMING_CTRL_11 0x0000028c
358
359#define REG_DSI_8x60_PHY_CTRL_0 0x00000290
360
361#define REG_DSI_8x60_PHY_CTRL_1 0x00000294
362
363#define REG_DSI_8x60_PHY_CTRL_2 0x00000298
364
365#define REG_DSI_8x60_PHY_CTRL_3 0x0000029c
366
367#define REG_DSI_8x60_PHY_STRENGTH_0 0x000002a0
368
369#define REG_DSI_8x60_PHY_STRENGTH_1 0x000002a4
370
371#define REG_DSI_8x60_PHY_STRENGTH_2 0x000002a8
372
373#define REG_DSI_8x60_PHY_STRENGTH_3 0x000002ac
374
375#define REG_DSI_8x60_PHY_REGULATOR_CTRL_0 0x000002cc
376
377#define REG_DSI_8x60_PHY_REGULATOR_CTRL_1 0x000002d0
378
379#define REG_DSI_8x60_PHY_REGULATOR_CTRL_2 0x000002d4
380
381#define REG_DSI_8x60_PHY_REGULATOR_CTRL_3 0x000002d8
382
383#define REG_DSI_8x60_PHY_REGULATOR_CTRL_4 0x000002dc
384
385#define REG_DSI_8x60_PHY_CAL_HW_TRIGGER 0x000000f0
386
387#define REG_DSI_8x60_PHY_CAL_CTRL 0x000000f4
388
389#define REG_DSI_8x60_PHY_CAL_STATUS 0x000000fc
390#define DSI_8x60_PHY_CAL_STATUS_CAL_BUSY 0x10000000
391
392static inline uint32_t REG_DSI_8960_LN(uint32_t i0) { return 0x00000300 + 0x40*i0; }
393
394static inline uint32_t REG_DSI_8960_LN_CFG_0(uint32_t i0) { return 0x00000300 + 0x40*i0; }
395
396static inline uint32_t REG_DSI_8960_LN_CFG_1(uint32_t i0) { return 0x00000304 + 0x40*i0; }
397
398static inline uint32_t REG_DSI_8960_LN_CFG_2(uint32_t i0) { return 0x00000308 + 0x40*i0; }
399
400static inline uint32_t REG_DSI_8960_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000030c + 0x40*i0; }
401
402static inline uint32_t REG_DSI_8960_LN_TEST_STR_0(uint32_t i0) { return 0x00000314 + 0x40*i0; }
403
404static inline uint32_t REG_DSI_8960_LN_TEST_STR_1(uint32_t i0) { return 0x00000318 + 0x40*i0; }
405
406#define REG_DSI_8960_PHY_LNCK_CFG_0 0x00000400
407
408#define REG_DSI_8960_PHY_LNCK_CFG_1 0x00000404
409
410#define REG_DSI_8960_PHY_LNCK_CFG_2 0x00000408
411
412#define REG_DSI_8960_PHY_LNCK_TEST_DATAPATH 0x0000040c
413
414#define REG_DSI_8960_PHY_LNCK_TEST_STR0 0x00000414
415
416#define REG_DSI_8960_PHY_LNCK_TEST_STR1 0x00000418
417
418#define REG_DSI_8960_PHY_TIMING_CTRL_0 0x00000440
419
420#define REG_DSI_8960_PHY_TIMING_CTRL_1 0x00000444
421
422#define REG_DSI_8960_PHY_TIMING_CTRL_2 0x00000448
423
424#define REG_DSI_8960_PHY_TIMING_CTRL_3 0x0000044c
425
426#define REG_DSI_8960_PHY_TIMING_CTRL_4 0x00000450
427
428#define REG_DSI_8960_PHY_TIMING_CTRL_5 0x00000454
429
430#define REG_DSI_8960_PHY_TIMING_CTRL_6 0x00000458
431
432#define REG_DSI_8960_PHY_TIMING_CTRL_7 0x0000045c
433
434#define REG_DSI_8960_PHY_TIMING_CTRL_8 0x00000460
435
436#define REG_DSI_8960_PHY_TIMING_CTRL_9 0x00000464
437
438#define REG_DSI_8960_PHY_TIMING_CTRL_10 0x00000468
439
440#define REG_DSI_8960_PHY_TIMING_CTRL_11 0x0000046c
441
442#define REG_DSI_8960_PHY_CTRL_0 0x00000470
443
444#define REG_DSI_8960_PHY_CTRL_1 0x00000474
445
446#define REG_DSI_8960_PHY_CTRL_2 0x00000478
447
448#define REG_DSI_8960_PHY_CTRL_3 0x0000047c
449
450#define REG_DSI_8960_PHY_STRENGTH_0 0x00000480
451
452#define REG_DSI_8960_PHY_STRENGTH_1 0x00000484
453
454#define REG_DSI_8960_PHY_STRENGTH_2 0x00000488
455
456#define REG_DSI_8960_PHY_BIST_CTRL_0 0x0000048c
457
458#define REG_DSI_8960_PHY_BIST_CTRL_1 0x00000490
459
460#define REG_DSI_8960_PHY_BIST_CTRL_2 0x00000494
461
462#define REG_DSI_8960_PHY_BIST_CTRL_3 0x00000498
463
464#define REG_DSI_8960_PHY_BIST_CTRL_4 0x0000049c
465
466#define REG_DSI_8960_PHY_LDO_CTRL 0x000004b0
467
468#define REG_DSI_8960_PHY_REGULATOR_CTRL_0 0x00000500
469
470#define REG_DSI_8960_PHY_REGULATOR_CTRL_1 0x00000504
471
472#define REG_DSI_8960_PHY_REGULATOR_CTRL_2 0x00000508
473
474#define REG_DSI_8960_PHY_REGULATOR_CTRL_3 0x0000050c
475
476#define REG_DSI_8960_PHY_REGULATOR_CTRL_4 0x00000510
477
478#define REG_DSI_8960_PHY_REGULATOR_CAL_PWR_CFG 0x00000518
479
480#define REG_DSI_8960_PHY_CAL_HW_TRIGGER 0x00000528
481
482#define REG_DSI_8960_PHY_CAL_SW_CFG_0 0x0000052c
483
484#define REG_DSI_8960_PHY_CAL_SW_CFG_1 0x00000530
485
486#define REG_DSI_8960_PHY_CAL_SW_CFG_2 0x00000534
487
488#define REG_DSI_8960_PHY_CAL_HW_CFG_0 0x00000538
489
490#define REG_DSI_8960_PHY_CAL_HW_CFG_1 0x0000053c
491
492#define REG_DSI_8960_PHY_CAL_HW_CFG_2 0x00000540
493
494#define REG_DSI_8960_PHY_CAL_HW_CFG_3 0x00000544
495
496#define REG_DSI_8960_PHY_CAL_HW_CFG_4 0x00000548
497
498#define REG_DSI_8960_PHY_CAL_STATUS 0x00000550
499#define DSI_8960_PHY_CAL_STATUS_CAL_BUSY 0x00000010
500
501
502#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
new file mode 100644
index 000000000000..aefc1b8feae9
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -0,0 +1,114 @@
1#ifndef MMSS_CC_XML
2#define MMSS_CC_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45enum mmss_cc_clk {
46 CLK = 0,
47 PCLK = 1,
48};
49
50#define REG_MMSS_CC_AHB 0x00000008
51
52static inline uint32_t __offset_CLK(enum mmss_cc_clk idx)
53{
54 switch (idx) {
55 case CLK: return 0x0000004c;
56 case PCLK: return 0x00000130;
57 default: return INVALID_IDX(idx);
58 }
59}
60static inline uint32_t REG_MMSS_CC_CLK(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
61
62static inline uint32_t REG_MMSS_CC_CLK_CC(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
63#define MMSS_CC_CLK_CC_CLK_EN 0x00000001
64#define MMSS_CC_CLK_CC_ROOT_EN 0x00000004
65#define MMSS_CC_CLK_CC_MND_EN 0x00000020
66#define MMSS_CC_CLK_CC_MND_MODE__MASK 0x000000c0
67#define MMSS_CC_CLK_CC_MND_MODE__SHIFT 6
68static inline uint32_t MMSS_CC_CLK_CC_MND_MODE(uint32_t val)
69{
70 return ((val) << MMSS_CC_CLK_CC_MND_MODE__SHIFT) & MMSS_CC_CLK_CC_MND_MODE__MASK;
71}
72#define MMSS_CC_CLK_CC_PMXO_SEL__MASK 0x00000300
73#define MMSS_CC_CLK_CC_PMXO_SEL__SHIFT 8
74static inline uint32_t MMSS_CC_CLK_CC_PMXO_SEL(uint32_t val)
75{
76 return ((val) << MMSS_CC_CLK_CC_PMXO_SEL__SHIFT) & MMSS_CC_CLK_CC_PMXO_SEL__MASK;
77}
78
79static inline uint32_t REG_MMSS_CC_CLK_MD(enum mmss_cc_clk i0) { return 0x00000004 + __offset_CLK(i0); }
80#define MMSS_CC_CLK_MD_D__MASK 0x000000ff
81#define MMSS_CC_CLK_MD_D__SHIFT 0
82static inline uint32_t MMSS_CC_CLK_MD_D(uint32_t val)
83{
84 return ((val) << MMSS_CC_CLK_MD_D__SHIFT) & MMSS_CC_CLK_MD_D__MASK;
85}
86#define MMSS_CC_CLK_MD_M__MASK 0x0000ff00
87#define MMSS_CC_CLK_MD_M__SHIFT 8
88static inline uint32_t MMSS_CC_CLK_MD_M(uint32_t val)
89{
90 return ((val) << MMSS_CC_CLK_MD_M__SHIFT) & MMSS_CC_CLK_MD_M__MASK;
91}
92
93static inline uint32_t REG_MMSS_CC_CLK_NS(enum mmss_cc_clk i0) { return 0x00000008 + __offset_CLK(i0); }
94#define MMSS_CC_CLK_NS_SRC__MASK 0x0000000f
95#define MMSS_CC_CLK_NS_SRC__SHIFT 0
96static inline uint32_t MMSS_CC_CLK_NS_SRC(uint32_t val)
97{
98 return ((val) << MMSS_CC_CLK_NS_SRC__SHIFT) & MMSS_CC_CLK_NS_SRC__MASK;
99}
100#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK 0x00fff000
101#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT 12
102static inline uint32_t MMSS_CC_CLK_NS_PRE_DIV_FUNC(uint32_t val)
103{
104 return ((val) << MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT) & MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK;
105}
106#define MMSS_CC_CLK_NS_VAL__MASK 0xff000000
107#define MMSS_CC_CLK_NS_VAL__SHIFT 24
108static inline uint32_t MMSS_CC_CLK_NS_VAL(uint32_t val)
109{
110 return ((val) << MMSS_CC_CLK_NS_VAL__SHIFT) & MMSS_CC_CLK_NS_VAL__MASK;
111}
112
113
114#endif /* MMSS_CC_XML */
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
new file mode 100644
index 000000000000..a225e8170b2a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -0,0 +1,48 @@
1#ifndef SFPB_XML
2#define SFPB_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45#define REG_SFPB_CFG 0x00000058
46
47
48#endif /* SFPB_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
new file mode 100644
index 000000000000..12ecfb928f75
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -0,0 +1,235 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20static struct platform_device *hdmi_pdev;
21
22void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
23{
24 uint32_t ctrl = 0;
25
26 if (power_on) {
27 ctrl |= HDMI_CTRL_ENABLE;
28 if (!hdmi->hdmi_mode) {
29 ctrl |= HDMI_CTRL_HDMI;
30 hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
31 ctrl &= ~HDMI_CTRL_HDMI;
32 } else {
33 ctrl |= HDMI_CTRL_HDMI;
34 }
35 } else {
36 ctrl = HDMI_CTRL_HDMI;
37 }
38
39 hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
40 DBG("HDMI Core: %s, HDMI_CTRL=0x%08x",
41 power_on ? "Enable" : "Disable", ctrl);
42}
43
44static irqreturn_t hdmi_irq(int irq, void *dev_id)
45{
46 struct hdmi *hdmi = dev_id;
47
48 /* Process HPD: */
49 hdmi_connector_irq(hdmi->connector);
50
51 /* Process DDC: */
52 hdmi_i2c_irq(hdmi->i2c);
53
54 /* TODO audio.. */
55
56 return IRQ_HANDLED;
57}
58
59void hdmi_destroy(struct hdmi *hdmi)
60{
61 struct hdmi_phy *phy = hdmi->phy;
62
63 if (phy)
64 phy->funcs->destroy(phy);
65
66 if (hdmi->i2c)
67 hdmi_i2c_destroy(hdmi->i2c);
68
69 put_device(&hdmi->pdev->dev);
70}
71
72/* initialize connector */
73int hdmi_init(struct hdmi *hdmi, struct drm_device *dev,
74 struct drm_connector *connector)
75{
76 struct platform_device *pdev = hdmi_pdev;
77 struct hdmi_platform_config *config;
78 int ret;
79
80 if (!pdev) {
81 dev_err(dev->dev, "no hdmi device\n");
82 ret = -ENXIO;
83 goto fail;
84 }
85
86 config = pdev->dev.platform_data;
87
88 get_device(&pdev->dev);
89
90 hdmi->dev = dev;
91 hdmi->pdev = pdev;
92 hdmi->connector = connector;
93
94 /* not sure about which phy maps to which msm.. probably I miss some */
95 if (config->phy_init)
96 hdmi->phy = config->phy_init(hdmi);
97 else
98 hdmi->phy = ERR_PTR(-ENXIO);
99
100 if (IS_ERR(hdmi->phy)) {
101 ret = PTR_ERR(hdmi->phy);
102 dev_err(dev->dev, "failed to load phy: %d\n", ret);
103 hdmi->phy = NULL;
104 goto fail;
105 }
106
107 hdmi->mmio = msm_ioremap(pdev, "hdmi_msm_hdmi_addr", "HDMI");
108 if (IS_ERR(hdmi->mmio)) {
109 ret = PTR_ERR(hdmi->mmio);
110 goto fail;
111 }
112
113 hdmi->mvs = devm_regulator_get(&pdev->dev, "8901_hdmi_mvs");
114 if (IS_ERR(hdmi->mvs))
115 hdmi->mvs = devm_regulator_get(&pdev->dev, "hdmi_mvs");
116 if (IS_ERR(hdmi->mvs)) {
117 ret = PTR_ERR(hdmi->mvs);
118 dev_err(dev->dev, "failed to get mvs regulator: %d\n", ret);
119 goto fail;
120 }
121
122 hdmi->mpp0 = devm_regulator_get(&pdev->dev, "8901_mpp0");
123 if (IS_ERR(hdmi->mpp0))
124 hdmi->mpp0 = NULL;
125
126 hdmi->clk = devm_clk_get(&pdev->dev, "core_clk");
127 if (IS_ERR(hdmi->clk)) {
128 ret = PTR_ERR(hdmi->clk);
129 dev_err(dev->dev, "failed to get 'clk': %d\n", ret);
130 goto fail;
131 }
132
133 hdmi->m_pclk = devm_clk_get(&pdev->dev, "master_iface_clk");
134 if (IS_ERR(hdmi->m_pclk)) {
135 ret = PTR_ERR(hdmi->m_pclk);
136 dev_err(dev->dev, "failed to get 'm_pclk': %d\n", ret);
137 goto fail;
138 }
139
140 hdmi->s_pclk = devm_clk_get(&pdev->dev, "slave_iface_clk");
141 if (IS_ERR(hdmi->s_pclk)) {
142 ret = PTR_ERR(hdmi->s_pclk);
143 dev_err(dev->dev, "failed to get 's_pclk': %d\n", ret);
144 goto fail;
145 }
146
147 hdmi->i2c = hdmi_i2c_init(hdmi);
148 if (IS_ERR(hdmi->i2c)) {
149 ret = PTR_ERR(hdmi->i2c);
150 dev_err(dev->dev, "failed to get i2c: %d\n", ret);
151 hdmi->i2c = NULL;
152 goto fail;
153 }
154
155 hdmi->irq = platform_get_irq(pdev, 0);
156 if (hdmi->irq < 0) {
157 ret = hdmi->irq;
158 dev_err(dev->dev, "failed to get irq: %d\n", ret);
159 goto fail;
160 }
161
162 ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
163 NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
164 "hdmi_isr", hdmi);
165 if (ret < 0) {
166 dev_err(dev->dev, "failed to request IRQ%u: %d\n",
167 hdmi->irq, ret);
168 goto fail;
169 }
170
171 return 0;
172
173fail:
174 if (hdmi)
175 hdmi_destroy(hdmi);
176
177 return ret;
178}
179
180/*
181 * The hdmi device:
182 */
183
184static int hdmi_dev_probe(struct platform_device *pdev)
185{
186 static struct hdmi_platform_config config = {};
187#ifdef CONFIG_OF
188 /* TODO */
189#else
190 if (cpu_is_apq8064()) {
191 config.phy_init = hdmi_phy_8960_init;
192 config.ddc_clk_gpio = 70;
193 config.ddc_data_gpio = 71;
194 config.hpd_gpio = 72;
195 config.pmic_gpio = 13 + NR_GPIO_IRQS;
196 } else if (cpu_is_msm8960()) {
197 config.phy_init = hdmi_phy_8960_init;
198 config.ddc_clk_gpio = 100;
199 config.ddc_data_gpio = 101;
200 config.hpd_gpio = 102;
201 config.pmic_gpio = -1;
202 } else if (cpu_is_msm8x60()) {
203 config.phy_init = hdmi_phy_8x60_init;
204 config.ddc_clk_gpio = 170;
205 config.ddc_data_gpio = 171;
206 config.hpd_gpio = 172;
207 config.pmic_gpio = -1;
208 }
209#endif
210 pdev->dev.platform_data = &config;
211 hdmi_pdev = pdev;
212 return 0;
213}
214
215static int hdmi_dev_remove(struct platform_device *pdev)
216{
217 hdmi_pdev = NULL;
218 return 0;
219}
220
221static struct platform_driver hdmi_driver = {
222 .probe = hdmi_dev_probe,
223 .remove = hdmi_dev_remove,
224 .driver.name = "hdmi_msm",
225};
226
227void __init hdmi_register(void)
228{
229 platform_driver_register(&hdmi_driver);
230}
231
232void __exit hdmi_unregister(void)
233{
234 platform_driver_unregister(&hdmi_driver);
235}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
new file mode 100644
index 000000000000..34703fea22ca
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -0,0 +1,112 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __HDMI_CONNECTOR_H__
19#define __HDMI_CONNECTOR_H__
20
21#include <linux/i2c.h>
22#include <linux/clk.h>
23#include <linux/platform_device.h>
24#include <linux/regulator/consumer.h>
25
26#include "msm_drv.h"
27#include "hdmi.xml.h"
28
29
30struct hdmi_phy;
31
32struct hdmi {
33 struct drm_device *dev;
34 struct platform_device *pdev;
35
36 void __iomem *mmio;
37
38 struct regulator *mvs; /* HDMI_5V */
39 struct regulator *mpp0; /* External 5V */
40
41 struct clk *clk;
42 struct clk *m_pclk;
43 struct clk *s_pclk;
44
45 struct hdmi_phy *phy;
46 struct i2c_adapter *i2c;
47 struct drm_connector *connector;
48
49 bool hdmi_mode; /* are we in hdmi mode? */
50
51 int irq;
52};
53
54/* platform config data (ie. from DT, or pdata) */
55struct hdmi_platform_config {
56 struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
57 int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, pmic_gpio;
58};
59
60void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
61void hdmi_destroy(struct hdmi *hdmi);
62int hdmi_init(struct hdmi *hdmi, struct drm_device *dev,
63 struct drm_connector *connector);
64
65static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
66{
67 msm_writel(data, hdmi->mmio + reg);
68}
69
70static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
71{
72 return msm_readl(hdmi->mmio + reg);
73}
74
75/*
76 * The phy appears to be different, for example between 8960 and 8x60,
77 * so split the phy related functions out and load the correct one at
78 * runtime:
79 */
80
81struct hdmi_phy_funcs {
82 void (*destroy)(struct hdmi_phy *phy);
83 void (*reset)(struct hdmi_phy *phy);
84 void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock);
85 void (*powerdown)(struct hdmi_phy *phy);
86};
87
88struct hdmi_phy {
89 const struct hdmi_phy_funcs *funcs;
90};
91
92/*
93 * phy can be different on different generations:
94 */
95struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi);
96struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi);
97
98/*
99 * hdmi connector:
100 */
101
102void hdmi_connector_irq(struct drm_connector *connector);
103
104/*
105 * i2c adapter for ddc:
106 */
107
108void hdmi_i2c_irq(struct i2c_adapter *i2c);
109void hdmi_i2c_destroy(struct i2c_adapter *i2c);
110struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
111
112#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
new file mode 100644
index 000000000000..f5fa4865e059
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -0,0 +1,508 @@
1#ifndef HDMI_XML
2#define HDMI_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45enum hdmi_hdcp_key_state {
46 NO_KEYS = 0,
47 NOT_CHECKED = 1,
48 CHECKING = 2,
49 KEYS_VALID = 3,
50 AKSV_INVALID = 4,
51 CHECKSUM_MISMATCH = 5,
52};
53
54enum hdmi_ddc_read_write {
55 DDC_WRITE = 0,
56 DDC_READ = 1,
57};
58
59enum hdmi_acr_cts {
60 ACR_NONE = 0,
61 ACR_32 = 1,
62 ACR_44 = 2,
63 ACR_48 = 3,
64};
65
66#define REG_HDMI_CTRL 0x00000000
67#define HDMI_CTRL_ENABLE 0x00000001
68#define HDMI_CTRL_HDMI 0x00000002
69#define HDMI_CTRL_ENCRYPTED 0x00000004
70
71#define REG_HDMI_AUDIO_PKT_CTRL1 0x00000020
72#define HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND 0x00000001
73
74#define REG_HDMI_ACR_PKT_CTRL 0x00000024
75#define HDMI_ACR_PKT_CTRL_CONT 0x00000001
76#define HDMI_ACR_PKT_CTRL_SEND 0x00000002
77#define HDMI_ACR_PKT_CTRL_SELECT__MASK 0x00000030
78#define HDMI_ACR_PKT_CTRL_SELECT__SHIFT 4
79static inline uint32_t HDMI_ACR_PKT_CTRL_SELECT(enum hdmi_acr_cts val)
80{
81 return ((val) << HDMI_ACR_PKT_CTRL_SELECT__SHIFT) & HDMI_ACR_PKT_CTRL_SELECT__MASK;
82}
83#define HDMI_ACR_PKT_CTRL_SOURCE 0x00000100
84#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK 0x00070000
85#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT 16
86static inline uint32_t HDMI_ACR_PKT_CTRL_N_MULTIPLIER(uint32_t val)
87{
88 return ((val) << HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT) & HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK;
89}
90#define HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY 0x80000000
91
92#define REG_HDMI_VBI_PKT_CTRL 0x00000028
93#define HDMI_VBI_PKT_CTRL_GC_ENABLE 0x00000010
94#define HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME 0x00000020
95#define HDMI_VBI_PKT_CTRL_ISRC_SEND 0x00000100
96#define HDMI_VBI_PKT_CTRL_ISRC_CONTINUOUS 0x00000200
97#define HDMI_VBI_PKT_CTRL_ACP_SEND 0x00001000
98#define HDMI_VBI_PKT_CTRL_ACP_SRC_SW 0x00002000
99
100#define REG_HDMI_INFOFRAME_CTRL0 0x0000002c
101#define HDMI_INFOFRAME_CTRL0_AVI_SEND 0x00000001
102#define HDMI_INFOFRAME_CTRL0_AVI_CONT 0x00000002
103#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND 0x00000010
104#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT 0x00000020
105#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040
106#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080
107
108#define REG_HDMI_GEN_PKT_CTRL 0x00000034
109#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001
110#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002
111#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK 0x0000000c
112#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT 2
113static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE(uint32_t val)
114{
115 return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK;
116}
117#define HDMI_GEN_PKT_CTRL_GENERIC1_SEND 0x00000010
118#define HDMI_GEN_PKT_CTRL_GENERIC1_CONT 0x00000020
119#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK 0x003f0000
120#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT 16
121static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_LINE(uint32_t val)
122{
123 return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK;
124}
125#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK 0x3f000000
126#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT 24
127static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC1_LINE(uint32_t val)
128{
129 return ((val) << HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK;
130}
131
132#define REG_HDMI_GC 0x00000040
133#define HDMI_GC_MUTE 0x00000001
134
135#define REG_HDMI_AUDIO_PKT_CTRL2 0x00000044
136#define HDMI_AUDIO_PKT_CTRL2_OVERRIDE 0x00000001
137#define HDMI_AUDIO_PKT_CTRL2_LAYOUT 0x00000002
138
139static inline uint32_t REG_HDMI_AVI_INFO(uint32_t i0) { return 0x0000006c + 0x4*i0; }
140
141#define REG_HDMI_GENERIC0_HDR 0x00000084
142
143static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*i0; }
144
145#define REG_HDMI_GENERIC1_HDR 0x000000a4
146
147static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
148
149static inline uint32_t REG_HDMI_ACR(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
150
151static inline uint32_t REG_HDMI_ACR_0(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
152#define HDMI_ACR_0_CTS__MASK 0xfffff000
153#define HDMI_ACR_0_CTS__SHIFT 12
154static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
155{
156 return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK;
157}
158
159static inline uint32_t REG_HDMI_ACR_1(uint32_t i0) { return 0x000000c8 + 0x8*i0; }
160#define HDMI_ACR_1_N__MASK 0xffffffff
161#define HDMI_ACR_1_N__SHIFT 0
162static inline uint32_t HDMI_ACR_1_N(uint32_t val)
163{
164 return ((val) << HDMI_ACR_1_N__SHIFT) & HDMI_ACR_1_N__MASK;
165}
166
167#define REG_HDMI_AUDIO_INFO0 0x000000e4
168#define HDMI_AUDIO_INFO0_CHECKSUM__MASK 0x000000ff
169#define HDMI_AUDIO_INFO0_CHECKSUM__SHIFT 0
170static inline uint32_t HDMI_AUDIO_INFO0_CHECKSUM(uint32_t val)
171{
172 return ((val) << HDMI_AUDIO_INFO0_CHECKSUM__SHIFT) & HDMI_AUDIO_INFO0_CHECKSUM__MASK;
173}
174#define HDMI_AUDIO_INFO0_CC__MASK 0x00000700
175#define HDMI_AUDIO_INFO0_CC__SHIFT 8
176static inline uint32_t HDMI_AUDIO_INFO0_CC(uint32_t val)
177{
178 return ((val) << HDMI_AUDIO_INFO0_CC__SHIFT) & HDMI_AUDIO_INFO0_CC__MASK;
179}
180
181#define REG_HDMI_AUDIO_INFO1 0x000000e8
182#define HDMI_AUDIO_INFO1_CA__MASK 0x000000ff
183#define HDMI_AUDIO_INFO1_CA__SHIFT 0
184static inline uint32_t HDMI_AUDIO_INFO1_CA(uint32_t val)
185{
186 return ((val) << HDMI_AUDIO_INFO1_CA__SHIFT) & HDMI_AUDIO_INFO1_CA__MASK;
187}
188#define HDMI_AUDIO_INFO1_LSV__MASK 0x00007800
189#define HDMI_AUDIO_INFO1_LSV__SHIFT 11
190static inline uint32_t HDMI_AUDIO_INFO1_LSV(uint32_t val)
191{
192 return ((val) << HDMI_AUDIO_INFO1_LSV__SHIFT) & HDMI_AUDIO_INFO1_LSV__MASK;
193}
194#define HDMI_AUDIO_INFO1_DM_INH 0x00008000
195
196#define REG_HDMI_HDCP_CTRL 0x00000110
197#define HDMI_HDCP_CTRL_ENABLE 0x00000001
198#define HDMI_HDCP_CTRL_ENCRYPTION_ENABLE 0x00000100
199
200#define REG_HDMI_HDCP_INT_CTRL 0x00000118
201
202#define REG_HDMI_HDCP_LINK0_STATUS 0x0000011c
203#define HDMI_HDCP_LINK0_STATUS_AN_0_READY 0x00000100
204#define HDMI_HDCP_LINK0_STATUS_AN_1_READY 0x00000200
205#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK 0x70000000
206#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT 28
207static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state val)
208{
209 return ((val) << HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT) & HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK;
210}
211
212#define REG_HDMI_HDCP_RESET 0x00000130
213#define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001
214
215#define REG_HDMI_AUDIO_CFG 0x000001d0
216#define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001
217#define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0
218#define HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT 4
219static inline uint32_t HDMI_AUDIO_CFG_FIFO_WATERMARK(uint32_t val)
220{
221 return ((val) << HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT) & HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
222}
223
224#define REG_HDMI_USEC_REFTIMER 0x00000208
225
226#define REG_HDMI_DDC_CTRL 0x0000020c
227#define HDMI_DDC_CTRL_GO 0x00000001
228#define HDMI_DDC_CTRL_SOFT_RESET 0x00000002
229#define HDMI_DDC_CTRL_SEND_RESET 0x00000004
230#define HDMI_DDC_CTRL_SW_STATUS_RESET 0x00000008
231#define HDMI_DDC_CTRL_TRANSACTION_CNT__MASK 0x00300000
232#define HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT 20
233static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val)
234{
235 return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK;
236}
237
238#define REG_HDMI_DDC_INT_CTRL 0x00000214
239#define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001
240#define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002
241#define HDMI_DDC_INT_CTRL_SW_DONE_MASK 0x00000004
242
243#define REG_HDMI_DDC_SW_STATUS 0x00000218
244#define HDMI_DDC_SW_STATUS_NACK0 0x00001000
245#define HDMI_DDC_SW_STATUS_NACK1 0x00002000
246#define HDMI_DDC_SW_STATUS_NACK2 0x00004000
247#define HDMI_DDC_SW_STATUS_NACK3 0x00008000
248
249#define REG_HDMI_DDC_HW_STATUS 0x0000021c
250
251#define REG_HDMI_DDC_SPEED 0x00000220
252#define HDMI_DDC_SPEED_THRESHOLD__MASK 0x00000003
253#define HDMI_DDC_SPEED_THRESHOLD__SHIFT 0
254static inline uint32_t HDMI_DDC_SPEED_THRESHOLD(uint32_t val)
255{
256 return ((val) << HDMI_DDC_SPEED_THRESHOLD__SHIFT) & HDMI_DDC_SPEED_THRESHOLD__MASK;
257}
258#define HDMI_DDC_SPEED_PRESCALE__MASK 0xffff0000
259#define HDMI_DDC_SPEED_PRESCALE__SHIFT 16
260static inline uint32_t HDMI_DDC_SPEED_PRESCALE(uint32_t val)
261{
262 return ((val) << HDMI_DDC_SPEED_PRESCALE__SHIFT) & HDMI_DDC_SPEED_PRESCALE__MASK;
263}
264
265#define REG_HDMI_DDC_SETUP 0x00000224
266#define HDMI_DDC_SETUP_TIMEOUT__MASK 0xff000000
267#define HDMI_DDC_SETUP_TIMEOUT__SHIFT 24
268static inline uint32_t HDMI_DDC_SETUP_TIMEOUT(uint32_t val)
269{
270 return ((val) << HDMI_DDC_SETUP_TIMEOUT__SHIFT) & HDMI_DDC_SETUP_TIMEOUT__MASK;
271}
272
273static inline uint32_t REG_HDMI_I2C_TRANSACTION(uint32_t i0) { return 0x00000228 + 0x4*i0; }
274
275static inline uint32_t REG_HDMI_I2C_TRANSACTION_REG(uint32_t i0) { return 0x00000228 + 0x4*i0; }
276#define HDMI_I2C_TRANSACTION_REG_RW__MASK 0x00000001
277#define HDMI_I2C_TRANSACTION_REG_RW__SHIFT 0
278static inline uint32_t HDMI_I2C_TRANSACTION_REG_RW(enum hdmi_ddc_read_write val)
279{
280 return ((val) << HDMI_I2C_TRANSACTION_REG_RW__SHIFT) & HDMI_I2C_TRANSACTION_REG_RW__MASK;
281}
282#define HDMI_I2C_TRANSACTION_REG_STOP_ON_NACK 0x00000100
283#define HDMI_I2C_TRANSACTION_REG_START 0x00001000
284#define HDMI_I2C_TRANSACTION_REG_STOP 0x00002000
285#define HDMI_I2C_TRANSACTION_REG_CNT__MASK 0x00ff0000
286#define HDMI_I2C_TRANSACTION_REG_CNT__SHIFT 16
287static inline uint32_t HDMI_I2C_TRANSACTION_REG_CNT(uint32_t val)
288{
289 return ((val) << HDMI_I2C_TRANSACTION_REG_CNT__SHIFT) & HDMI_I2C_TRANSACTION_REG_CNT__MASK;
290}
291
292#define REG_HDMI_DDC_DATA 0x00000238
293#define HDMI_DDC_DATA_DATA_RW__MASK 0x00000001
294#define HDMI_DDC_DATA_DATA_RW__SHIFT 0
295static inline uint32_t HDMI_DDC_DATA_DATA_RW(enum hdmi_ddc_read_write val)
296{
297 return ((val) << HDMI_DDC_DATA_DATA_RW__SHIFT) & HDMI_DDC_DATA_DATA_RW__MASK;
298}
299#define HDMI_DDC_DATA_DATA__MASK 0x0000ff00
300#define HDMI_DDC_DATA_DATA__SHIFT 8
301static inline uint32_t HDMI_DDC_DATA_DATA(uint32_t val)
302{
303 return ((val) << HDMI_DDC_DATA_DATA__SHIFT) & HDMI_DDC_DATA_DATA__MASK;
304}
305#define HDMI_DDC_DATA_INDEX__MASK 0x00ff0000
306#define HDMI_DDC_DATA_INDEX__SHIFT 16
307static inline uint32_t HDMI_DDC_DATA_INDEX(uint32_t val)
308{
309 return ((val) << HDMI_DDC_DATA_INDEX__SHIFT) & HDMI_DDC_DATA_INDEX__MASK;
310}
311#define HDMI_DDC_DATA_INDEX_WRITE 0x80000000
312
313#define REG_HDMI_HPD_INT_STATUS 0x00000250
314#define HDMI_HPD_INT_STATUS_INT 0x00000001
315#define HDMI_HPD_INT_STATUS_CABLE_DETECTED 0x00000002
316
317#define REG_HDMI_HPD_INT_CTRL 0x00000254
318#define HDMI_HPD_INT_CTRL_INT_ACK 0x00000001
319#define HDMI_HPD_INT_CTRL_INT_CONNECT 0x00000002
320#define HDMI_HPD_INT_CTRL_INT_EN 0x00000004
321#define HDMI_HPD_INT_CTRL_RX_INT_ACK 0x00000010
322#define HDMI_HPD_INT_CTRL_RX_INT_EN 0x00000020
323#define HDMI_HPD_INT_CTRL_RCV_PLUGIN_DET_MASK 0x00000200
324
325#define REG_HDMI_HPD_CTRL 0x00000258
326#define HDMI_HPD_CTRL_TIMEOUT__MASK 0x00001fff
327#define HDMI_HPD_CTRL_TIMEOUT__SHIFT 0
328static inline uint32_t HDMI_HPD_CTRL_TIMEOUT(uint32_t val)
329{
330 return ((val) << HDMI_HPD_CTRL_TIMEOUT__SHIFT) & HDMI_HPD_CTRL_TIMEOUT__MASK;
331}
332#define HDMI_HPD_CTRL_ENABLE 0x10000000
333
334#define REG_HDMI_DDC_REF 0x0000027c
335#define HDMI_DDC_REF_REFTIMER_ENABLE 0x00010000
336#define HDMI_DDC_REF_REFTIMER__MASK 0x0000ffff
337#define HDMI_DDC_REF_REFTIMER__SHIFT 0
338static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
339{
340 return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK;
341}
342
343#define REG_HDMI_ACTIVE_HSYNC 0x000002b4
344#define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff
345#define HDMI_ACTIVE_HSYNC_START__SHIFT 0
346static inline uint32_t HDMI_ACTIVE_HSYNC_START(uint32_t val)
347{
348 return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) & HDMI_ACTIVE_HSYNC_START__MASK;
349}
350#define HDMI_ACTIVE_HSYNC_END__MASK 0x0fff0000
351#define HDMI_ACTIVE_HSYNC_END__SHIFT 16
352static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val)
353{
354 return ((val) << HDMI_ACTIVE_HSYNC_END__SHIFT) & HDMI_ACTIVE_HSYNC_END__MASK;
355}
356
357#define REG_HDMI_ACTIVE_VSYNC 0x000002b8
358#define HDMI_ACTIVE_VSYNC_START__MASK 0x00000fff
359#define HDMI_ACTIVE_VSYNC_START__SHIFT 0
360static inline uint32_t HDMI_ACTIVE_VSYNC_START(uint32_t val)
361{
362 return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & HDMI_ACTIVE_VSYNC_START__MASK;
363}
364#define HDMI_ACTIVE_VSYNC_END__MASK 0x0fff0000
365#define HDMI_ACTIVE_VSYNC_END__SHIFT 16
366static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val)
367{
368 return ((val) << HDMI_ACTIVE_VSYNC_END__SHIFT) & HDMI_ACTIVE_VSYNC_END__MASK;
369}
370
371#define REG_HDMI_VSYNC_ACTIVE_F2 0x000002bc
372#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00000fff
373#define HDMI_VSYNC_ACTIVE_F2_START__SHIFT 0
374static inline uint32_t HDMI_VSYNC_ACTIVE_F2_START(uint32_t val)
375{
376 return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & HDMI_VSYNC_ACTIVE_F2_START__MASK;
377}
378#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x0fff0000
379#define HDMI_VSYNC_ACTIVE_F2_END__SHIFT 16
380static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
381{
382 return ((val) << HDMI_VSYNC_ACTIVE_F2_END__SHIFT) & HDMI_VSYNC_ACTIVE_F2_END__MASK;
383}
384
385#define REG_HDMI_TOTAL 0x000002c0
386#define HDMI_TOTAL_H_TOTAL__MASK 0x00000fff
387#define HDMI_TOTAL_H_TOTAL__SHIFT 0
388static inline uint32_t HDMI_TOTAL_H_TOTAL(uint32_t val)
389{
390 return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & HDMI_TOTAL_H_TOTAL__MASK;
391}
392#define HDMI_TOTAL_V_TOTAL__MASK 0x0fff0000
393#define HDMI_TOTAL_V_TOTAL__SHIFT 16
394static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val)
395{
396 return ((val) << HDMI_TOTAL_V_TOTAL__SHIFT) & HDMI_TOTAL_V_TOTAL__MASK;
397}
398
399#define REG_HDMI_VSYNC_TOTAL_F2 0x000002c4
400#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00000fff
401#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT 0
402static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
403{
404 return ((val) << HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT) & HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK;
405}
406
407#define REG_HDMI_FRAME_CTRL 0x000002c8
408#define HDMI_FRAME_CTRL_RGB_MUX_SEL_BGR 0x00001000
409#define HDMI_FRAME_CTRL_VSYNC_LOW 0x10000000
410#define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000
411#define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000
412
413#define REG_HDMI_PHY_CTRL 0x000002d4
414#define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001
415#define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002
416#define HDMI_PHY_CTRL_SW_RESET 0x00000004
417#define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008
418
419#define REG_HDMI_AUD_INT 0x000002cc
420#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001
421#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002
422#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004
423#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008
424
425#define REG_HDMI_8x60_PHY_REG0 0x00000300
426#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c
427#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2
428static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val)
429{
430 return ((val) << HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT) & HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK;
431}
432
433#define REG_HDMI_8x60_PHY_REG1 0x00000304
434#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK 0x000000f0
435#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT 4
436static inline uint32_t HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(uint32_t val)
437{
438 return ((val) << HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT) & HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK;
439}
440#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK 0x0000000f
441#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT 0
442static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
443{
444 return ((val) << HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT) & HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK;
445}
446
447#define REG_HDMI_8x60_PHY_REG2 0x00000308
448#define HDMI_8x60_PHY_REG2_PD_DESER 0x00000001
449#define HDMI_8x60_PHY_REG2_PD_DRIVE_1 0x00000002
450#define HDMI_8x60_PHY_REG2_PD_DRIVE_2 0x00000004
451#define HDMI_8x60_PHY_REG2_PD_DRIVE_3 0x00000008
452#define HDMI_8x60_PHY_REG2_PD_DRIVE_4 0x00000010
453#define HDMI_8x60_PHY_REG2_PD_PLL 0x00000020
454#define HDMI_8x60_PHY_REG2_PD_PWRGEN 0x00000040
455#define HDMI_8x60_PHY_REG2_RCV_SENSE_EN 0x00000080
456
457#define REG_HDMI_8x60_PHY_REG3 0x0000030c
458#define HDMI_8x60_PHY_REG3_PLL_ENABLE 0x00000001
459
460#define REG_HDMI_8x60_PHY_REG4 0x00000310
461
462#define REG_HDMI_8x60_PHY_REG5 0x00000314
463
464#define REG_HDMI_8x60_PHY_REG6 0x00000318
465
466#define REG_HDMI_8x60_PHY_REG7 0x0000031c
467
468#define REG_HDMI_8x60_PHY_REG8 0x00000320
469
470#define REG_HDMI_8x60_PHY_REG9 0x00000324
471
472#define REG_HDMI_8x60_PHY_REG10 0x00000328
473
474#define REG_HDMI_8x60_PHY_REG11 0x0000032c
475
476#define REG_HDMI_8x60_PHY_REG12 0x00000330
477#define HDMI_8x60_PHY_REG12_RETIMING_EN 0x00000001
478#define HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN 0x00000002
479#define HDMI_8x60_PHY_REG12_FORCE_LOCK 0x00000010
480
481#define REG_HDMI_8960_PHY_REG0 0x00000400
482
483#define REG_HDMI_8960_PHY_REG1 0x00000404
484
485#define REG_HDMI_8960_PHY_REG2 0x00000408
486
487#define REG_HDMI_8960_PHY_REG3 0x0000040c
488
489#define REG_HDMI_8960_PHY_REG4 0x00000410
490
491#define REG_HDMI_8960_PHY_REG5 0x00000414
492
493#define REG_HDMI_8960_PHY_REG6 0x00000418
494
495#define REG_HDMI_8960_PHY_REG7 0x0000041c
496
497#define REG_HDMI_8960_PHY_REG8 0x00000420
498
499#define REG_HDMI_8960_PHY_REG9 0x00000424
500
501#define REG_HDMI_8960_PHY_REG10 0x00000428
502
503#define REG_HDMI_8960_PHY_REG11 0x0000042c
504
505#define REG_HDMI_8960_PHY_REG12 0x00000430
506
507
508#endif /* HDMI_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
new file mode 100644
index 000000000000..7d63f5ffa7ba
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -0,0 +1,461 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/gpio.h>
19
20#include "msm_connector.h"
21#include "hdmi.h"
22
23struct hdmi_connector {
24 struct msm_connector base;
25 struct hdmi hdmi;
26 unsigned long int pixclock;
27 bool enabled;
28};
29#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
30
31static int gpio_config(struct hdmi *hdmi, bool on)
32{
33 struct drm_device *dev = hdmi->dev;
34 struct hdmi_platform_config *config =
35 hdmi->pdev->dev.platform_data;
36 int ret;
37
38 if (on) {
39 ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
40 if (ret) {
41 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
42 "HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
43 goto error1;
44 }
45 ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
46 if (ret) {
47 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
48 "HDMI_DDC_DATA", config->ddc_data_gpio, ret);
49 goto error2;
50 }
51 ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
52 if (ret) {
53 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
54 "HDMI_HPD", config->hpd_gpio, ret);
55 goto error3;
56 }
57 if (config->pmic_gpio != -1) {
58 ret = gpio_request(config->pmic_gpio, "PMIC_HDMI_MUX_SEL");
59 if (ret) {
60 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
61 "PMIC_HDMI_MUX_SEL", config->pmic_gpio, ret);
62 goto error4;
63 }
64 gpio_set_value_cansleep(config->pmic_gpio, 0);
65 }
66 DBG("gpio on");
67 } else {
68 gpio_free(config->ddc_clk_gpio);
69 gpio_free(config->ddc_data_gpio);
70 gpio_free(config->hpd_gpio);
71
72 if (config->pmic_gpio != -1) {
73 gpio_set_value_cansleep(config->pmic_gpio, 1);
74 gpio_free(config->pmic_gpio);
75 }
76 DBG("gpio off");
77 }
78
79 return 0;
80
81error4:
82 gpio_free(config->hpd_gpio);
83error3:
84 gpio_free(config->ddc_data_gpio);
85error2:
86 gpio_free(config->ddc_clk_gpio);
87error1:
88 return ret;
89}
90
91static int hpd_enable(struct hdmi_connector *hdmi_connector)
92{
93 struct hdmi *hdmi = &hdmi_connector->hdmi;
94 struct drm_device *dev = hdmi_connector->base.base.dev;
95 struct hdmi_phy *phy = hdmi->phy;
96 uint32_t hpd_ctrl;
97 int ret;
98
99 ret = gpio_config(hdmi, true);
100 if (ret) {
101 dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
102 goto fail;
103 }
104
105 ret = clk_prepare_enable(hdmi->clk);
106 if (ret) {
107 dev_err(dev->dev, "failed to enable 'clk': %d\n", ret);
108 goto fail;
109 }
110
111 ret = clk_prepare_enable(hdmi->m_pclk);
112 if (ret) {
113 dev_err(dev->dev, "failed to enable 'm_pclk': %d\n", ret);
114 goto fail;
115 }
116
117 ret = clk_prepare_enable(hdmi->s_pclk);
118 if (ret) {
119 dev_err(dev->dev, "failed to enable 's_pclk': %d\n", ret);
120 goto fail;
121 }
122
123 if (hdmi->mpp0)
124 ret = regulator_enable(hdmi->mpp0);
125 if (!ret)
126 ret = regulator_enable(hdmi->mvs);
127 if (ret) {
128 dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
129 goto fail;
130 }
131
132 hdmi_set_mode(hdmi, false);
133 phy->funcs->reset(phy);
134 hdmi_set_mode(hdmi, true);
135
136 hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
137
138 /* enable HPD events: */
139 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
140 HDMI_HPD_INT_CTRL_INT_CONNECT |
141 HDMI_HPD_INT_CTRL_INT_EN);
142
143 /* set timeout to 4.1ms (max) for hardware debounce */
144 hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
145 hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
146
147 /* Toggle HPD circuit to trigger HPD sense */
148 hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
149 ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
150 hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
151 HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
152
153 return 0;
154
155fail:
156 return ret;
157}
158
159static int hdp_disable(struct hdmi_connector *hdmi_connector)
160{
161 struct hdmi *hdmi = &hdmi_connector->hdmi;
162 struct drm_device *dev = hdmi_connector->base.base.dev;
163 int ret = 0;
164
165 /* Disable HPD interrupt */
166 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
167
168 hdmi_set_mode(hdmi, false);
169
170 if (hdmi->mpp0)
171 ret = regulator_disable(hdmi->mpp0);
172 if (!ret)
173 ret = regulator_disable(hdmi->mvs);
174 if (ret) {
175 dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
176 goto fail;
177 }
178
179 clk_disable_unprepare(hdmi->clk);
180 clk_disable_unprepare(hdmi->m_pclk);
181 clk_disable_unprepare(hdmi->s_pclk);
182
183 ret = gpio_config(hdmi, false);
184 if (ret) {
185 dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
186 goto fail;
187 }
188
189 return 0;
190
191fail:
192 return ret;
193}
194
195void hdmi_connector_irq(struct drm_connector *connector)
196{
197 struct msm_connector *msm_connector = to_msm_connector(connector);
198 struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
199 struct hdmi *hdmi = &hdmi_connector->hdmi;
200 uint32_t hpd_int_status, hpd_int_ctrl;
201
202 /* Process HPD: */
203 hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
204 hpd_int_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL);
205
206 if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) &&
207 (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
208 bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
209
210 DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
211
212 /* ack the irq: */
213 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
214 hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
215
216 drm_helper_hpd_irq_event(connector->dev);
217
218 /* detect disconnect if we are connected or visa versa: */
219 hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
220 if (!detected)
221 hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
222 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
223 }
224}
225
226static enum drm_connector_status hdmi_connector_detect(
227 struct drm_connector *connector, bool force)
228{
229 struct msm_connector *msm_connector = to_msm_connector(connector);
230 struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
231 struct hdmi *hdmi = &hdmi_connector->hdmi;
232 uint32_t hpd_int_status;
233 int retry = 20;
234
235 hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
236
237 /* sense seems to in some cases be momentarily de-asserted, don't
238 * let that trick us into thinking the monitor is gone:
239 */
240 while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) {
241 mdelay(10);
242 hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
243 DBG("status=%08x", hpd_int_status);
244 }
245
246 return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
247 connector_status_connected : connector_status_disconnected;
248}
249
250static void hdmi_connector_destroy(struct drm_connector *connector)
251{
252 struct msm_connector *msm_connector = to_msm_connector(connector);
253 struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
254
255 hdp_disable(hdmi_connector);
256
257 drm_sysfs_connector_remove(connector);
258 drm_connector_cleanup(connector);
259
260 hdmi_destroy(&hdmi_connector->hdmi);
261
262 kfree(hdmi_connector);
263}
264
265static int hdmi_connector_get_modes(struct drm_connector *connector)
266{
267 struct msm_connector *msm_connector = to_msm_connector(connector);
268 struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
269 struct hdmi *hdmi = &hdmi_connector->hdmi;
270 struct edid *edid;
271 uint32_t hdmi_ctrl;
272 int ret = 0;
273
274 hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
275 hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
276
277 edid = drm_get_edid(connector, hdmi->i2c);
278
279 hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
280
281 drm_mode_connector_update_edid_property(connector, edid);
282
283 if (edid) {
284 ret = drm_add_edid_modes(connector, edid);
285 kfree(edid);
286 }
287
288 return ret;
289}
290
291static int hdmi_connector_mode_valid(struct drm_connector *connector,
292 struct drm_display_mode *mode)
293{
294 struct msm_connector *msm_connector = to_msm_connector(connector);
295 struct msm_drm_private *priv = connector->dev->dev_private;
296 struct msm_kms *kms = priv->kms;
297 long actual, requested;
298
299 requested = 1000 * mode->clock;
300 actual = kms->funcs->round_pixclk(kms,
301 requested, msm_connector->encoder);
302
303 DBG("requested=%ld, actual=%ld", requested, actual);
304
305 if (actual != requested)
306 return MODE_CLOCK_RANGE;
307
308 return 0;
309}
310
311static const struct drm_connector_funcs hdmi_connector_funcs = {
312 .dpms = drm_helper_connector_dpms,
313 .detect = hdmi_connector_detect,
314 .fill_modes = drm_helper_probe_single_connector_modes,
315 .destroy = hdmi_connector_destroy,
316};
317
318static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
319 .get_modes = hdmi_connector_get_modes,
320 .mode_valid = hdmi_connector_mode_valid,
321 .best_encoder = msm_connector_attached_encoder,
322};
323
324static void hdmi_connector_dpms(struct msm_connector *msm_connector, int mode)
325{
326 struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
327 struct hdmi *hdmi = &hdmi_connector->hdmi;
328 struct hdmi_phy *phy = hdmi->phy;
329 bool enabled = (mode == DRM_MODE_DPMS_ON);
330
331 DBG("mode=%d", mode);
332
333 if (enabled == hdmi_connector->enabled)
334 return;
335
336 if (enabled) {
337 phy->funcs->powerup(phy, hdmi_connector->pixclock);
338 hdmi_set_mode(hdmi, true);
339 } else {
340 hdmi_set_mode(hdmi, false);
341 phy->funcs->powerdown(phy);
342 }
343
344 hdmi_connector->enabled = enabled;
345}
346
347static void hdmi_connector_mode_set(struct msm_connector *msm_connector,
348 struct drm_display_mode *mode)
349{
350 struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
351 struct hdmi *hdmi = &hdmi_connector->hdmi;
352 int hstart, hend, vstart, vend;
353 uint32_t frame_ctrl;
354
355 hdmi_connector->pixclock = mode->clock * 1000;
356
357 hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1;
358
359 hstart = mode->htotal - mode->hsync_start;
360 hend = mode->htotal - mode->hsync_start + mode->hdisplay;
361
362 vstart = mode->vtotal - mode->vsync_start - 1;
363 vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
364
365 DBG("htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
366 mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
367
368 hdmi_write(hdmi, REG_HDMI_TOTAL,
369 HDMI_TOTAL_H_TOTAL(mode->htotal - 1) |
370 HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
371
372 hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC,
373 HDMI_ACTIVE_HSYNC_START(hstart) |
374 HDMI_ACTIVE_HSYNC_END(hend));
375 hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC,
376 HDMI_ACTIVE_VSYNC_START(vstart) |
377 HDMI_ACTIVE_VSYNC_END(vend));
378
379 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
380 hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
381 HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal));
382 hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
383 HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) |
384 HDMI_VSYNC_ACTIVE_F2_END(vend + 1));
385 } else {
386 hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
387 HDMI_VSYNC_TOTAL_F2_V_TOTAL(0));
388 hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
389 HDMI_VSYNC_ACTIVE_F2_START(0) |
390 HDMI_VSYNC_ACTIVE_F2_END(0));
391 }
392
393 frame_ctrl = 0;
394 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
395 frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW;
396 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
397 frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW;
398 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
399 frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
400 DBG("frame_ctrl=%08x", frame_ctrl);
401 hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
402
403 // TODO until we have audio, this might be safest:
404 if (hdmi->hdmi_mode)
405 hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
406}
407
408static const struct msm_connector_funcs msm_connector_funcs = {
409 .dpms = hdmi_connector_dpms,
410 .mode_set = hdmi_connector_mode_set,
411};
412
413/* initialize connector */
414struct drm_connector *hdmi_connector_init(struct drm_device *dev,
415 struct drm_encoder *encoder)
416{
417 struct drm_connector *connector = NULL;
418 struct hdmi_connector *hdmi_connector;
419 int ret;
420
421 hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
422 if (!hdmi_connector) {
423 ret = -ENOMEM;
424 goto fail;
425 }
426
427 connector = &hdmi_connector->base.base;
428
429 msm_connector_init(&hdmi_connector->base,
430 &msm_connector_funcs, encoder);
431 drm_connector_init(dev, connector, &hdmi_connector_funcs,
432 DRM_MODE_CONNECTOR_HDMIA);
433 drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
434
435 connector->polled = DRM_CONNECTOR_POLL_HPD;
436
437 connector->interlace_allowed = 1;
438 connector->doublescan_allowed = 0;
439
440 drm_sysfs_connector_add(connector);
441
442 ret = hdmi_init(&hdmi_connector->hdmi, dev, connector);
443 if (ret)
444 goto fail;
445
446 ret = hpd_enable(hdmi_connector);
447 if (ret) {
448 dev_err(dev->dev, "failed to enable HPD: %d\n", ret);
449 goto fail;
450 }
451
452 drm_mode_connector_attach_encoder(connector, encoder);
453
454 return connector;
455
456fail:
457 if (connector)
458 hdmi_connector_destroy(connector);
459
460 return ERR_PTR(ret);
461}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
new file mode 100644
index 000000000000..f4ab7f70fed1
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -0,0 +1,281 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20struct hdmi_i2c_adapter {
21 struct i2c_adapter base;
22 struct hdmi *hdmi;
23 bool sw_done;
24 wait_queue_head_t ddc_event;
25};
26#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base)
27
28static void init_ddc(struct hdmi_i2c_adapter *hdmi_i2c)
29{
30 struct hdmi *hdmi = hdmi_i2c->hdmi;
31
32 hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
33 HDMI_DDC_CTRL_SW_STATUS_RESET);
34 hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
35 HDMI_DDC_CTRL_SOFT_RESET);
36
37 hdmi_write(hdmi, REG_HDMI_DDC_SPEED,
38 HDMI_DDC_SPEED_THRESHOLD(2) |
39 HDMI_DDC_SPEED_PRESCALE(10));
40
41 hdmi_write(hdmi, REG_HDMI_DDC_SETUP,
42 HDMI_DDC_SETUP_TIMEOUT(0xff));
43
44 /* enable reference timer for 27us */
45 hdmi_write(hdmi, REG_HDMI_DDC_REF,
46 HDMI_DDC_REF_REFTIMER_ENABLE |
47 HDMI_DDC_REF_REFTIMER(27));
48}
49
50static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
51{
52 struct hdmi *hdmi = hdmi_i2c->hdmi;
53 struct drm_device *dev = hdmi->dev;
54 uint32_t retry = 0xffff;
55 uint32_t ddc_int_ctrl;
56
57 do {
58 --retry;
59
60 hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
61 HDMI_DDC_INT_CTRL_SW_DONE_ACK |
62 HDMI_DDC_INT_CTRL_SW_DONE_MASK);
63
64 ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
65
66 } while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
67
68 if (!retry) {
69 dev_err(dev->dev, "timeout waiting for DDC\n");
70 return -ETIMEDOUT;
71 }
72
73 hdmi_i2c->sw_done = false;
74
75 return 0;
76}
77
78#define MAX_TRANSACTIONS 4
79
80static bool sw_done(struct hdmi_i2c_adapter *hdmi_i2c)
81{
82 struct hdmi *hdmi = hdmi_i2c->hdmi;
83
84 if (!hdmi_i2c->sw_done) {
85 uint32_t ddc_int_ctrl;
86
87 ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
88
89 if ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_MASK) &&
90 (ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT)) {
91 hdmi_i2c->sw_done = true;
92 hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
93 HDMI_DDC_INT_CTRL_SW_DONE_ACK);
94 }
95 }
96
97 return hdmi_i2c->sw_done;
98}
99
100static int hdmi_i2c_xfer(struct i2c_adapter *i2c,
101 struct i2c_msg *msgs, int num)
102{
103 struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
104 struct hdmi *hdmi = hdmi_i2c->hdmi;
105 struct drm_device *dev = hdmi->dev;
106 static const uint32_t nack[] = {
107 HDMI_DDC_SW_STATUS_NACK0, HDMI_DDC_SW_STATUS_NACK1,
108 HDMI_DDC_SW_STATUS_NACK2, HDMI_DDC_SW_STATUS_NACK3,
109 };
110 int indices[MAX_TRANSACTIONS];
111 int ret, i, j, index = 0;
112 uint32_t ddc_status, ddc_data, i2c_trans;
113
114 num = min(num, MAX_TRANSACTIONS);
115
116 WARN_ON(!(hdmi_read(hdmi, REG_HDMI_CTRL) & HDMI_CTRL_ENABLE));
117
118 if (num == 0)
119 return num;
120
121 init_ddc(hdmi_i2c);
122
123 ret = ddc_clear_irq(hdmi_i2c);
124 if (ret)
125 return ret;
126
127 for (i = 0; i < num; i++) {
128 struct i2c_msg *p = &msgs[i];
129 uint32_t raw_addr = p->addr << 1;
130
131 if (p->flags & I2C_M_RD)
132 raw_addr |= 1;
133
134 ddc_data = HDMI_DDC_DATA_DATA(raw_addr) |
135 HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
136
137 if (i == 0) {
138 ddc_data |= HDMI_DDC_DATA_INDEX(0) |
139 HDMI_DDC_DATA_INDEX_WRITE;
140 }
141
142 hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
143 index++;
144
145 indices[i] = index;
146
147 if (p->flags & I2C_M_RD) {
148 index += p->len;
149 } else {
150 for (j = 0; j < p->len; j++) {
151 ddc_data = HDMI_DDC_DATA_DATA(p->buf[j]) |
152 HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
153 hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
154 index++;
155 }
156 }
157
158 i2c_trans = HDMI_I2C_TRANSACTION_REG_CNT(p->len) |
159 HDMI_I2C_TRANSACTION_REG_RW(
160 (p->flags & I2C_M_RD) ? DDC_READ : DDC_WRITE) |
161 HDMI_I2C_TRANSACTION_REG_START;
162
163 if (i == (num - 1))
164 i2c_trans |= HDMI_I2C_TRANSACTION_REG_STOP;
165
166 hdmi_write(hdmi, REG_HDMI_I2C_TRANSACTION(i), i2c_trans);
167 }
168
169 /* trigger the transfer: */
170 hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
171 HDMI_DDC_CTRL_TRANSACTION_CNT(num - 1) |
172 HDMI_DDC_CTRL_GO);
173
174 ret = wait_event_timeout(hdmi_i2c->ddc_event, sw_done(hdmi_i2c), HZ/4);
175 if (ret <= 0) {
176 if (ret == 0)
177 ret = -ETIMEDOUT;
178 dev_warn(dev->dev, "DDC timeout: %d\n", ret);
179 DBG("sw_status=%08x, hw_status=%08x, int_ctrl=%08x",
180 hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS),
181 hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS),
182 hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL));
183 return ret;
184 }
185
186 ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS);
187
188 /* read back results of any read transactions: */
189 for (i = 0; i < num; i++) {
190 struct i2c_msg *p = &msgs[i];
191
192 if (!(p->flags & I2C_M_RD))
193 continue;
194
195 /* check for NACK: */
196 if (ddc_status & nack[i]) {
197 DBG("ddc_status=%08x", ddc_status);
198 break;
199 }
200
201 ddc_data = HDMI_DDC_DATA_DATA_RW(DDC_READ) |
202 HDMI_DDC_DATA_INDEX(indices[i]) |
203 HDMI_DDC_DATA_INDEX_WRITE;
204
205 hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
206
207 /* discard first byte: */
208 hdmi_read(hdmi, REG_HDMI_DDC_DATA);
209
210 for (j = 0; j < p->len; j++) {
211 ddc_data = hdmi_read(hdmi, REG_HDMI_DDC_DATA);
212 p->buf[j] = FIELD(ddc_data, HDMI_DDC_DATA_DATA);
213 }
214 }
215
216 return i;
217}
218
219static u32 hdmi_i2c_func(struct i2c_adapter *adapter)
220{
221 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
222}
223
224static const struct i2c_algorithm hdmi_i2c_algorithm = {
225 .master_xfer = hdmi_i2c_xfer,
226 .functionality = hdmi_i2c_func,
227};
228
229void hdmi_i2c_irq(struct i2c_adapter *i2c)
230{
231 struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
232
233 if (sw_done(hdmi_i2c))
234 wake_up_all(&hdmi_i2c->ddc_event);
235}
236
237void hdmi_i2c_destroy(struct i2c_adapter *i2c)
238{
239 struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
240 i2c_del_adapter(i2c);
241 kfree(hdmi_i2c);
242}
243
244struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi)
245{
246 struct drm_device *dev = hdmi->dev;
247 struct hdmi_i2c_adapter *hdmi_i2c;
248 struct i2c_adapter *i2c = NULL;
249 int ret;
250
251 hdmi_i2c = kzalloc(sizeof(*hdmi_i2c), GFP_KERNEL);
252 if (!hdmi_i2c) {
253 ret = -ENOMEM;
254 goto fail;
255 }
256
257 i2c = &hdmi_i2c->base;
258
259 hdmi_i2c->hdmi = hdmi;
260 init_waitqueue_head(&hdmi_i2c->ddc_event);
261
262
263 i2c->owner = THIS_MODULE;
264 i2c->class = I2C_CLASS_DDC;
265 snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c");
266 i2c->dev.parent = &hdmi->pdev->dev;
267 i2c->algo = &hdmi_i2c_algorithm;
268
269 ret = i2c_add_adapter(i2c);
270 if (ret) {
271 dev_err(dev->dev, "failed to register hdmi i2c: %d\n", ret);
272 goto fail;
273 }
274
275 return i2c;
276
277fail:
278 if (i2c)
279 hdmi_i2c_destroy(i2c);
280 return ERR_PTR(ret);
281}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
new file mode 100644
index 000000000000..e5b7ed5b8f01
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -0,0 +1,141 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20struct hdmi_phy_8960 {
21 struct hdmi_phy base;
22 struct hdmi *hdmi;
23};
24#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
25
26static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
27{
28 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
29 kfree(phy_8960);
30}
31
32static void hdmi_phy_8960_reset(struct hdmi_phy *phy)
33{
34 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
35 struct hdmi *hdmi = phy_8960->hdmi;
36 unsigned int val;
37
38 val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
39
40 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
41 /* pull low */
42 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
43 val & ~HDMI_PHY_CTRL_SW_RESET);
44 } else {
45 /* pull high */
46 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
47 val | HDMI_PHY_CTRL_SW_RESET);
48 }
49
50 if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
51 /* pull low */
52 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
53 val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
54 } else {
55 /* pull high */
56 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
57 val | HDMI_PHY_CTRL_SW_RESET_PLL);
58 }
59
60 msleep(100);
61
62 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
63 /* pull high */
64 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
65 val | HDMI_PHY_CTRL_SW_RESET);
66 } else {
67 /* pull low */
68 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
69 val & ~HDMI_PHY_CTRL_SW_RESET);
70 }
71
72 if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
73 /* pull high */
74 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
75 val | HDMI_PHY_CTRL_SW_RESET_PLL);
76 } else {
77 /* pull low */
78 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
79 val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
80 }
81}
82
83static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
84 unsigned long int pixclock)
85{
86 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
87 struct hdmi *hdmi = phy_8960->hdmi;
88
89 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b);
90 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2);
91 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00);
92 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG5, 0x00);
93 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG6, 0x00);
94 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG7, 0x00);
95 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG8, 0x00);
96 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG9, 0x00);
97 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG10, 0x00);
98 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG11, 0x00);
99 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG3, 0x20);
100}
101
102static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
103{
104 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
105 struct hdmi *hdmi = phy_8960->hdmi;
106
107 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f);
108}
109
110static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = {
111 .destroy = hdmi_phy_8960_destroy,
112 .reset = hdmi_phy_8960_reset,
113 .powerup = hdmi_phy_8960_powerup,
114 .powerdown = hdmi_phy_8960_powerdown,
115};
116
117struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
118{
119 struct hdmi_phy_8960 *phy_8960;
120 struct hdmi_phy *phy = NULL;
121 int ret;
122
123 phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
124 if (!phy_8960) {
125 ret = -ENOMEM;
126 goto fail;
127 }
128
129 phy = &phy_8960->base;
130
131 phy->funcs = &hdmi_phy_8960_funcs;
132
133 phy_8960->hdmi = hdmi;
134
135 return phy;
136
137fail:
138 if (phy)
139 hdmi_phy_8960_destroy(phy);
140 return ERR_PTR(ret);
141}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
new file mode 100644
index 000000000000..391433c1af7c
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -0,0 +1,214 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "hdmi.h"
19
20struct hdmi_phy_8x60 {
21 struct hdmi_phy base;
22 struct hdmi *hdmi;
23};
24#define to_hdmi_phy_8x60(x) container_of(x, struct hdmi_phy_8x60, base)
25
26static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy)
27{
28 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
29 kfree(phy_8x60);
30}
31
32static void hdmi_phy_8x60_reset(struct hdmi_phy *phy)
33{
34 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
35 struct hdmi *hdmi = phy_8x60->hdmi;
36 unsigned int val;
37
38 val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
39
40 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
41 /* pull low */
42 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
43 val & ~HDMI_PHY_CTRL_SW_RESET);
44 } else {
45 /* pull high */
46 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
47 val | HDMI_PHY_CTRL_SW_RESET);
48 }
49
50 msleep(100);
51
52 if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
53 /* pull high */
54 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
55 val | HDMI_PHY_CTRL_SW_RESET);
56 } else {
57 /* pull low */
58 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
59 val & ~HDMI_PHY_CTRL_SW_RESET);
60 }
61}
62
63static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
64 unsigned long int pixclock)
65{
66 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
67 struct hdmi *hdmi = phy_8x60->hdmi;
68
69 /* De-serializer delay D/C for non-lbk mode: */
70 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG0,
71 HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3));
72
73 if (pixclock == 27000000) {
74 /* video_format == HDMI_VFRMT_720x480p60_16_9 */
75 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
76 HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
77 HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3));
78 } else {
79 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
80 HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
81 HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4));
82 }
83
84 /* No matter what, start from the power down mode: */
85 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
86 HDMI_8x60_PHY_REG2_PD_PWRGEN |
87 HDMI_8x60_PHY_REG2_PD_PLL |
88 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
89 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
90 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
91 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
92 HDMI_8x60_PHY_REG2_PD_DESER);
93
94 /* Turn PowerGen on: */
95 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
96 HDMI_8x60_PHY_REG2_PD_PLL |
97 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
98 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
99 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
100 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
101 HDMI_8x60_PHY_REG2_PD_DESER);
102
103 /* Turn PLL power on: */
104 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
105 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
106 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
107 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
108 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
109 HDMI_8x60_PHY_REG2_PD_DESER);
110
111 /* Write to HIGH after PLL power down de-assert: */
112 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3,
113 HDMI_8x60_PHY_REG3_PLL_ENABLE);
114
115 /* ASIC power on; PHY REG9 = 0 */
116 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
117
118 /* Enable PLL lock detect, PLL lock det will go high after lock
119 * Enable the re-time logic
120 */
121 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
122 HDMI_8x60_PHY_REG12_RETIMING_EN |
123 HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN);
124
125 /* Drivers are on: */
126 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
127 HDMI_8x60_PHY_REG2_PD_DESER);
128
129 /* If the RX detector is needed: */
130 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
131 HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
132 HDMI_8x60_PHY_REG2_PD_DESER);
133
134 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG4, 0);
135 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG5, 0);
136 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG6, 0);
137 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG7, 0);
138 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG8, 0);
139 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
140 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG10, 0);
141 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG11, 0);
142
143 /* If we want to use lock enable based on counting: */
144 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
145 HDMI_8x60_PHY_REG12_RETIMING_EN |
146 HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN |
147 HDMI_8x60_PHY_REG12_FORCE_LOCK);
148}
149
150static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
151{
152 struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
153 struct hdmi *hdmi = phy_8x60->hdmi;
154
155 /* Assert RESET PHY from controller */
156 hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
157 HDMI_PHY_CTRL_SW_RESET);
158 udelay(10);
159 /* De-assert RESET PHY from controller */
160 hdmi_write(hdmi, REG_HDMI_PHY_CTRL, 0);
161 /* Turn off Driver */
162 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
163 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
164 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
165 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
166 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
167 HDMI_8x60_PHY_REG2_PD_DESER);
168 udelay(10);
169 /* Disable PLL */
170 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, 0);
171 /* Power down PHY, but keep RX-sense: */
172 hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
173 HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
174 HDMI_8x60_PHY_REG2_PD_PWRGEN |
175 HDMI_8x60_PHY_REG2_PD_PLL |
176 HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
177 HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
178 HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
179 HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
180 HDMI_8x60_PHY_REG2_PD_DESER);
181}
182
183static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = {
184 .destroy = hdmi_phy_8x60_destroy,
185 .reset = hdmi_phy_8x60_reset,
186 .powerup = hdmi_phy_8x60_powerup,
187 .powerdown = hdmi_phy_8x60_powerdown,
188};
189
190struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi)
191{
192 struct hdmi_phy_8x60 *phy_8x60;
193 struct hdmi_phy *phy = NULL;
194 int ret;
195
196 phy_8x60 = kzalloc(sizeof(*phy_8x60), GFP_KERNEL);
197 if (!phy_8x60) {
198 ret = -ENOMEM;
199 goto fail;
200 }
201
202 phy = &phy_8x60->base;
203
204 phy->funcs = &hdmi_phy_8x60_funcs;
205
206 phy_8x60->hdmi = hdmi;
207
208 return phy;
209
210fail:
211 if (phy)
212 hdmi_phy_8x60_destroy(phy);
213 return ERR_PTR(ret);
214}
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
new file mode 100644
index 000000000000..bee36363bcd0
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -0,0 +1,50 @@
1#ifndef QFPROM_XML
2#define QFPROM_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45#define REG_QFPROM_CONFIG_ROW0_LSB 0x00000238
46#define QFPROM_CONFIG_ROW0_LSB_HDMI_DISABLE 0x00200000
47#define QFPROM_CONFIG_ROW0_LSB_HDCP_DISABLE 0x00400000
48
49
50#endif /* QFPROM_XML */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
new file mode 100644
index 000000000000..bbeeebe2db55
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
@@ -0,0 +1,1061 @@
1#ifndef MDP4_XML
2#define MDP4_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
14- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
15- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
17- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
18- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
19
20Copyright (C) 2013 by the following authors:
21- Rob Clark <robdclark@gmail.com> (robclark)
22
23Permission is hereby granted, free of charge, to any person obtaining
24a copy of this software and associated documentation files (the
25"Software"), to deal in the Software without restriction, including
26without limitation the rights to use, copy, modify, merge, publish,
27distribute, sublicense, and/or sell copies of the Software, and to
28permit persons to whom the Software is furnished to do so, subject to
29the following conditions:
30
31The above copyright notice and this permission notice (including the
32next paragraph) shall be included in all copies or substantial
33portions of the Software.
34
35THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
36EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
37MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
38IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
39LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
40OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
41WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/
43
44
45enum mpd4_bpc {
46 BPC1 = 0,
47 BPC5 = 1,
48 BPC6 = 2,
49 BPC8 = 3,
50};
51
52enum mpd4_bpc_alpha {
53 BPC1A = 0,
54 BPC4A = 1,
55 BPC6A = 2,
56 BPC8A = 3,
57};
58
59enum mpd4_alpha_type {
60 FG_CONST = 0,
61 BG_CONST = 1,
62 FG_PIXEL = 2,
63 BG_PIXEL = 3,
64};
65
66enum mpd4_pipe {
67 VG1 = 0,
68 VG2 = 1,
69 RGB1 = 2,
70 RGB2 = 3,
71 RGB3 = 4,
72 VG3 = 5,
73 VG4 = 6,
74};
75
76enum mpd4_mixer {
77 MIXER0 = 0,
78 MIXER1 = 1,
79 MIXER2 = 2,
80};
81
82enum mpd4_mixer_stage_id {
83 STAGE_UNUSED = 0,
84 STAGE_BASE = 1,
85 STAGE0 = 2,
86 STAGE1 = 3,
87 STAGE2 = 4,
88 STAGE3 = 5,
89};
90
91enum mdp4_intf {
92 INTF_LCDC_DTV = 0,
93 INTF_DSI_VIDEO = 1,
94 INTF_DSI_CMD = 2,
95 INTF_EBI2_TV = 3,
96};
97
98enum mdp4_cursor_format {
99 CURSOR_ARGB = 1,
100 CURSOR_XRGB = 2,
101};
102
103enum mdp4_dma {
104 DMA_P = 0,
105 DMA_S = 1,
106 DMA_E = 2,
107};
108
109#define MDP4_IRQ_OVERLAY0_DONE 0x00000001
110#define MDP4_IRQ_OVERLAY1_DONE 0x00000002
111#define MDP4_IRQ_DMA_S_DONE 0x00000004
112#define MDP4_IRQ_DMA_E_DONE 0x00000008
113#define MDP4_IRQ_DMA_P_DONE 0x00000010
114#define MDP4_IRQ_VG1_HISTOGRAM 0x00000020
115#define MDP4_IRQ_VG2_HISTOGRAM 0x00000040
116#define MDP4_IRQ_PRIMARY_VSYNC 0x00000080
117#define MDP4_IRQ_PRIMARY_INTF_UDERRUN 0x00000100
118#define MDP4_IRQ_EXTERNAL_VSYNC 0x00000200
119#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN 0x00000400
120#define MDP4_IRQ_PRIMARY_RDPTR 0x00000800
121#define MDP4_IRQ_DMA_P_HISTOGRAM 0x00020000
122#define MDP4_IRQ_DMA_S_HISTOGRAM 0x04000000
123#define MDP4_IRQ_OVERLAY2_DONE 0x40000000
124#define REG_MDP4_VERSION 0x00000000
125#define MDP4_VERSION_MINOR__MASK 0x00ff0000
126#define MDP4_VERSION_MINOR__SHIFT 16
127static inline uint32_t MDP4_VERSION_MINOR(uint32_t val)
128{
129 return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK;
130}
131#define MDP4_VERSION_MAJOR__MASK 0xff000000
132#define MDP4_VERSION_MAJOR__SHIFT 24
133static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val)
134{
135 return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK;
136}
137
138#define REG_MDP4_OVLP0_KICK 0x00000004
139
140#define REG_MDP4_OVLP1_KICK 0x00000008
141
142#define REG_MDP4_OVLP2_KICK 0x000000d0
143
144#define REG_MDP4_DMA_P_KICK 0x0000000c
145
146#define REG_MDP4_DMA_S_KICK 0x00000010
147
148#define REG_MDP4_DMA_E_KICK 0x00000014
149
150#define REG_MDP4_DISP_STATUS 0x00000018
151
152#define REG_MDP4_DISP_INTF_SEL 0x00000038
153#define MDP4_DISP_INTF_SEL_PRIM__MASK 0x00000003
154#define MDP4_DISP_INTF_SEL_PRIM__SHIFT 0
155static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val)
156{
157 return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK;
158}
159#define MDP4_DISP_INTF_SEL_SEC__MASK 0x0000000c
160#define MDP4_DISP_INTF_SEL_SEC__SHIFT 2
161static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val)
162{
163 return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK;
164}
165#define MDP4_DISP_INTF_SEL_EXT__MASK 0x00000030
166#define MDP4_DISP_INTF_SEL_EXT__SHIFT 4
167static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
168{
169 return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK;
170}
171#define MDP4_DISP_INTF_SEL_DSI_VIDEO 0x00000040
172#define MDP4_DISP_INTF_SEL_DSI_CMD 0x00000080
173
174#define REG_MDP4_RESET_STATUS 0x0000003c
175
176#define REG_MDP4_READ_CNFG 0x0000004c
177
178#define REG_MDP4_INTR_ENABLE 0x00000050
179
180#define REG_MDP4_INTR_STATUS 0x00000054
181
182#define REG_MDP4_INTR_CLEAR 0x00000058
183
184#define REG_MDP4_EBI2_LCD0 0x00000060
185
186#define REG_MDP4_EBI2_LCD1 0x00000064
187
188#define REG_MDP4_PORTMAP_MODE 0x00000070
189
190#define REG_MDP4_CS_CONTROLLER0 0x000000c0
191
192#define REG_MDP4_CS_CONTROLLER1 0x000000c4
193
194#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
195#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
196#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
197static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
198{
199 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
200}
201#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
202#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
203#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
204static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
205{
206 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
207}
208#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
209#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
210#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
211static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
212{
213 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
214}
215#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
216#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
217#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
218static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
219{
220 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
221}
222#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
223#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
224#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
225static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
226{
227 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
228}
229#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
230#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
231#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
232static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
233{
234 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
235}
236#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
237#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
238#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
239static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
240{
241 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
242}
243#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
244#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
245#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
246static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
247{
248 return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
249}
250#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1 0x80000000
251
252#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD 0x000100fc
253
254#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
255#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
256#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
257static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
258{
259 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
260}
261#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
262#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
263#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
264static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
265{
266 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
267}
268#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
269#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
270#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
271static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
272{
273 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
274}
275#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
276#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
277#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
278static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
279{
280 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
281}
282#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
283#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
284#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
285static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
286{
287 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
288}
289#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
290#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
291#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
292static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
293{
294 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
295}
296#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
297#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
298#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
299static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
300{
301 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
302}
303#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
304#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
305#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
306static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
307{
308 return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
309}
310#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1 0x80000000
311
312#define REG_MDP4_VG2_SRC_FORMAT 0x00030050
313
314#define REG_MDP4_VG2_CONST_COLOR 0x00031008
315
316#define REG_MDP4_OVERLAY_FLUSH 0x00018000
317#define MDP4_OVERLAY_FLUSH_OVLP0 0x00000001
318#define MDP4_OVERLAY_FLUSH_OVLP1 0x00000002
319#define MDP4_OVERLAY_FLUSH_VG1 0x00000004
320#define MDP4_OVERLAY_FLUSH_VG2 0x00000008
321#define MDP4_OVERLAY_FLUSH_RGB1 0x00000010
322#define MDP4_OVERLAY_FLUSH_RGB2 0x00000020
323
324static inline uint32_t __offset_OVLP(uint32_t idx)
325{
326 switch (idx) {
327 case 0: return 0x00010000;
328 case 1: return 0x00018000;
329 case 2: return 0x00088000;
330 default: return INVALID_IDX(idx);
331 }
332}
333static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); }
334
335static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); }
336
337static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); }
338#define MDP4_OVLP_SIZE_HEIGHT__MASK 0xffff0000
339#define MDP4_OVLP_SIZE_HEIGHT__SHIFT 16
340static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val)
341{
342 return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK;
343}
344#define MDP4_OVLP_SIZE_WIDTH__MASK 0x0000ffff
345#define MDP4_OVLP_SIZE_WIDTH__SHIFT 0
346static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val)
347{
348 return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK;
349}
350
351static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); }
352
353static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); }
354
355static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); }
356
357static inline uint32_t __offset_STAGE(uint32_t idx)
358{
359 switch (idx) {
360 case 0: return 0x00000104;
361 case 1: return 0x00000124;
362 case 2: return 0x00000144;
363 case 3: return 0x00000160;
364 default: return INVALID_IDX(idx);
365 }
366}
367static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
368
369static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
370#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
371#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
372static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mpd4_alpha_type val)
373{
374 return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
375}
376#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA 0x00000004
377#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
378#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
379#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
380static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mpd4_alpha_type val)
381{
382 return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
383}
384#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA 0x00000040
385#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA 0x00000080
386#define MDP4_OVLP_STAGE_OP_FG_TRANSP 0x00000100
387#define MDP4_OVLP_STAGE_OP_BG_TRANSP 0x00000200
388
389static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); }
390
391static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); }
392
393static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); }
394
395static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); }
396
397static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); }
398
399static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); }
400
401static inline uint32_t __offset_STAGE_CO3(uint32_t idx)
402{
403 switch (idx) {
404 case 0: return 0x00001004;
405 case 1: return 0x00001404;
406 case 2: return 0x00001804;
407 case 3: return 0x00001b84;
408 default: return INVALID_IDX(idx);
409 }
410}
411static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
412
413static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
414#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA 0x00000001
415
416static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); }
417
418static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); }
419
420static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); }
421
422static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); }
423
424static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); }
425
426static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); }
427
428
429static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
430
431static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
432
433static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
434
435static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
436
437static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
438
439static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
440
441static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
442
443static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
444
445static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
446
447static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
448
449#define REG_MDP4_DMA_P_OP_MODE 0x00090070
450
451static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; }
452
453static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
454
455static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
456
457#define REG_MDP4_DMA_S_OP_MODE 0x000a0028
458
459static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; }
460
461static inline uint32_t __offset_DMA(enum mdp4_dma idx)
462{
463 switch (idx) {
464 case DMA_P: return 0x00090000;
465 case DMA_S: return 0x000a0000;
466 case DMA_E: return 0x000b0000;
467 default: return INVALID_IDX(idx);
468 }
469}
470static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
471
472static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
473#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
474#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
475static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mpd4_bpc val)
476{
477 return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
478}
479#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
480#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
481static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mpd4_bpc val)
482{
483 return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
484}
485#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
486#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
487static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mpd4_bpc val)
488{
489 return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
490}
491#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB 0x00000080
492#define MDP4_DMA_CONFIG_PACK__MASK 0x0000ff00
493#define MDP4_DMA_CONFIG_PACK__SHIFT 8
494static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val)
495{
496 return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK;
497}
498#define MDP4_DMA_CONFIG_DEFLKR_EN 0x01000000
499#define MDP4_DMA_CONFIG_DITHER_EN 0x01000000
500
501static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); }
502#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK 0xffff0000
503#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT 16
504static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val)
505{
506 return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK;
507}
508#define MDP4_DMA_SRC_SIZE_WIDTH__MASK 0x0000ffff
509#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT 0
510static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val)
511{
512 return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK;
513}
514
515static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); }
516
517static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); }
518
519static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); }
520#define MDP4_DMA_DST_SIZE_HEIGHT__MASK 0xffff0000
521#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT 16
522static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val)
523{
524 return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK;
525}
526#define MDP4_DMA_DST_SIZE_WIDTH__MASK 0x0000ffff
527#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT 0
528static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val)
529{
530 return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK;
531}
532
533static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); }
534#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK 0x0000007f
535#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT 0
536static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val)
537{
538 return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK;
539}
540#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK 0x007f0000
541#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT 16
542static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val)
543{
544 return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK;
545}
546
547static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); }
548
549static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); }
550#define MDP4_DMA_CURSOR_POS_X__MASK 0x0000ffff
551#define MDP4_DMA_CURSOR_POS_X__SHIFT 0
552static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val)
553{
554 return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK;
555}
556#define MDP4_DMA_CURSOR_POS_Y__MASK 0xffff0000
557#define MDP4_DMA_CURSOR_POS_Y__SHIFT 16
558static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val)
559{
560 return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK;
561}
562
563static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); }
564#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN 0x00000001
565#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK 0x00000006
566#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT 1
567static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val)
568{
569 return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK;
570}
571#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN 0x00000008
572
573static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); }
574
575static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); }
576
577static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); }
578
579static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); }
580
581static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); }
582
583
584static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
585
586static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
587
588static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
589
590static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
591
592static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
593
594static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
595
596static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
597
598static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
599
600static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
601
602static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
603
604static inline uint32_t REG_MDP4_PIPE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
605
606static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
607#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
608#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
609static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
610{
611 return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK;
612}
613#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
614#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT 0
615static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val)
616{
617 return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
618}
619
620static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mpd4_pipe i0) { return 0x00020004 + 0x10000*i0; }
621#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000
622#define MDP4_PIPE_SRC_XY_Y__SHIFT 16
623static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
624{
625 return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK;
626}
627#define MDP4_PIPE_SRC_XY_X__MASK 0x0000ffff
628#define MDP4_PIPE_SRC_XY_X__SHIFT 0
629static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val)
630{
631 return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
632}
633
634static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mpd4_pipe i0) { return 0x00020008 + 0x10000*i0; }
635#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000
636#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16
637static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
638{
639 return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK;
640}
641#define MDP4_PIPE_DST_SIZE_WIDTH__MASK 0x0000ffff
642#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT 0
643static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val)
644{
645 return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
646}
647
648static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mpd4_pipe i0) { return 0x0002000c + 0x10000*i0; }
649#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000
650#define MDP4_PIPE_DST_XY_Y__SHIFT 16
651static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
652{
653 return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK;
654}
655#define MDP4_PIPE_DST_XY_X__MASK 0x0000ffff
656#define MDP4_PIPE_DST_XY_X__SHIFT 0
657static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val)
658{
659 return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
660}
661
662static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mpd4_pipe i0) { return 0x00020010 + 0x10000*i0; }
663
664static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mpd4_pipe i0) { return 0x00020014 + 0x10000*i0; }
665
666static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mpd4_pipe i0) { return 0x00020018 + 0x10000*i0; }
667
668static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mpd4_pipe i0) { return 0x00020040 + 0x10000*i0; }
669#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
670#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0
671static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
672{
673 return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK;
674}
675#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
676#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT 16
677static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val)
678{
679 return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
680}
681
682static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mpd4_pipe i0) { return 0x00020044 + 0x10000*i0; }
683#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
684#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0
685static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
686{
687 return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK;
688}
689#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
690#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT 16
691static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
692{
693 return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
694}
695
696static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mpd4_pipe i0) { return 0x00020048 + 0x10000*i0; }
697#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK 0xffff0000
698#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT 16
699static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val)
700{
701 return ((val) << MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK;
702}
703#define MDP4_PIPE_FRAME_SIZE_WIDTH__MASK 0x0000ffff
704#define MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT 0
705static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val)
706{
707 return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK;
708}
709
710static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mpd4_pipe i0) { return 0x00020050 + 0x10000*i0; }
711#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
712#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
713static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mpd4_bpc val)
714{
715 return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
716}
717#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
718#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
719static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mpd4_bpc val)
720{
721 return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
722}
723#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
724#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
725static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mpd4_bpc val)
726{
727 return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
728}
729#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
730#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
731static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mpd4_bpc_alpha val)
732{
733 return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
734}
735#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
736#define MDP4_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
737#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT 9
738static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val)
739{
740 return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK;
741}
742#define MDP4_PIPE_SRC_FORMAT_ROTATED_90 0x00001000
743#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00006000
744#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 13
745static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
746{
747 return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
748}
749#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
750#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
751#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000
752
753static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mpd4_pipe i0) { return 0x00020054 + 0x10000*i0; }
754#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
755#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
756static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
757{
758 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK;
759}
760#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
761#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
762static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
763{
764 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK;
765}
766#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
767#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
768static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
769{
770 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK;
771}
772#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
773#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
774static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
775{
776 return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
777}
778
779static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mpd4_pipe i0) { return 0x00020058 + 0x10000*i0; }
780#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001
781#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002
782#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200
783#define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400
784#define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800
785#define MDP4_PIPE_OP_MODE_FLIP_LR 0x00002000
786#define MDP4_PIPE_OP_MODE_FLIP_UD 0x00004000
787#define MDP4_PIPE_OP_MODE_DITHER_EN 0x00008000
788#define MDP4_PIPE_OP_MODE_IGC_LUT_EN 0x00010000
789#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000
790#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000
791
792static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mpd4_pipe i0) { return 0x0002005c + 0x10000*i0; }
793
794static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mpd4_pipe i0) { return 0x00020060 + 0x10000*i0; }
795
796static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mpd4_pipe i0) { return 0x00021004 + 0x10000*i0; }
797
798static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mpd4_pipe i0) { return 0x00021008 + 0x10000*i0; }
799
800static inline uint32_t REG_MDP4_PIPE_CSC(enum mpd4_pipe i0) { return 0x00024000 + 0x10000*i0; }
801
802
803static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
804
805static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
806
807static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
808
809static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
810
811static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
812
813static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
814
815static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
816
817static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
818
819static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
820
821static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
822
823#define REG_MDP4_LCDC 0x000c0000
824
825#define REG_MDP4_LCDC_ENABLE 0x000c0000
826
827#define REG_MDP4_LCDC_HSYNC_CTRL 0x000c0004
828#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
829#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT 0
830static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val)
831{
832 return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK;
833}
834#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK 0xffff0000
835#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT 16
836static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val)
837{
838 return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK;
839}
840
841#define REG_MDP4_LCDC_VSYNC_PERIOD 0x000c0008
842
843#define REG_MDP4_LCDC_VSYNC_LEN 0x000c000c
844
845#define REG_MDP4_LCDC_DISPLAY_HCTRL 0x000c0010
846#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK 0x0000ffff
847#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT 0
848static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val)
849{
850 return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK;
851}
852#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK 0xffff0000
853#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT 16
854static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val)
855{
856 return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK;
857}
858
859#define REG_MDP4_LCDC_DISPLAY_VSTART 0x000c0014
860
861#define REG_MDP4_LCDC_DISPLAY_VEND 0x000c0018
862
863#define REG_MDP4_LCDC_ACTIVE_HCTL 0x000c001c
864#define MDP4_LCDC_ACTIVE_HCTL_START__MASK 0x00007fff
865#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT 0
866static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val)
867{
868 return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK;
869}
870#define MDP4_LCDC_ACTIVE_HCTL_END__MASK 0x7fff0000
871#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT 16
872static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val)
873{
874 return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK;
875}
876#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
877
878#define REG_MDP4_LCDC_ACTIVE_VSTART 0x000c0020
879
880#define REG_MDP4_LCDC_ACTIVE_VEND 0x000c0024
881
882#define REG_MDP4_LCDC_BORDER_CLR 0x000c0028
883
884#define REG_MDP4_LCDC_UNDERFLOW_CLR 0x000c002c
885#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
886#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT 0
887static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val)
888{
889 return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK;
890}
891#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
892
893#define REG_MDP4_LCDC_HSYNC_SKEW 0x000c0030
894
895#define REG_MDP4_LCDC_TEST_CNTL 0x000c0034
896
897#define REG_MDP4_LCDC_CTRL_POLARITY 0x000c0038
898#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW 0x00000001
899#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002
900#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004
901
902#define REG_MDP4_DTV 0x000d0000
903
904#define REG_MDP4_DTV_ENABLE 0x000d0000
905
906#define REG_MDP4_DTV_HSYNC_CTRL 0x000d0004
907#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
908#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT 0
909static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val)
910{
911 return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK;
912}
913#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK 0xffff0000
914#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT 16
915static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val)
916{
917 return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK;
918}
919
920#define REG_MDP4_DTV_VSYNC_PERIOD 0x000d0008
921
922#define REG_MDP4_DTV_VSYNC_LEN 0x000d000c
923
924#define REG_MDP4_DTV_DISPLAY_HCTRL 0x000d0018
925#define MDP4_DTV_DISPLAY_HCTRL_START__MASK 0x0000ffff
926#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT 0
927static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val)
928{
929 return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK;
930}
931#define MDP4_DTV_DISPLAY_HCTRL_END__MASK 0xffff0000
932#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT 16
933static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val)
934{
935 return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK;
936}
937
938#define REG_MDP4_DTV_DISPLAY_VSTART 0x000d001c
939
940#define REG_MDP4_DTV_DISPLAY_VEND 0x000d0020
941
942#define REG_MDP4_DTV_ACTIVE_HCTL 0x000d002c
943#define MDP4_DTV_ACTIVE_HCTL_START__MASK 0x00007fff
944#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT 0
945static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val)
946{
947 return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK;
948}
949#define MDP4_DTV_ACTIVE_HCTL_END__MASK 0x7fff0000
950#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT 16
951static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val)
952{
953 return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK;
954}
955#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
956
957#define REG_MDP4_DTV_ACTIVE_VSTART 0x000d0030
958
959#define REG_MDP4_DTV_ACTIVE_VEND 0x000d0038
960
961#define REG_MDP4_DTV_BORDER_CLR 0x000d0040
962
963#define REG_MDP4_DTV_UNDERFLOW_CLR 0x000d0044
964#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
965#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT 0
966static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val)
967{
968 return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK;
969}
970#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
971
972#define REG_MDP4_DTV_HSYNC_SKEW 0x000d0048
973
974#define REG_MDP4_DTV_TEST_CNTL 0x000d004c
975
976#define REG_MDP4_DTV_CTRL_POLARITY 0x000d0050
977#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW 0x00000001
978#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW 0x00000002
979#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW 0x00000004
980
981#define REG_MDP4_DSI 0x000e0000
982
983#define REG_MDP4_DSI_ENABLE 0x000e0000
984
985#define REG_MDP4_DSI_HSYNC_CTRL 0x000e0004
986#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
987#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT 0
988static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val)
989{
990 return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK;
991}
992#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK 0xffff0000
993#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT 16
994static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val)
995{
996 return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK;
997}
998
999#define REG_MDP4_DSI_VSYNC_PERIOD 0x000e0008
1000
1001#define REG_MDP4_DSI_VSYNC_LEN 0x000e000c
1002
1003#define REG_MDP4_DSI_DISPLAY_HCTRL 0x000e0010
1004#define MDP4_DSI_DISPLAY_HCTRL_START__MASK 0x0000ffff
1005#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT 0
1006static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val)
1007{
1008 return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK;
1009}
1010#define MDP4_DSI_DISPLAY_HCTRL_END__MASK 0xffff0000
1011#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT 16
1012static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val)
1013{
1014 return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK;
1015}
1016
1017#define REG_MDP4_DSI_DISPLAY_VSTART 0x000e0014
1018
1019#define REG_MDP4_DSI_DISPLAY_VEND 0x000e0018
1020
1021#define REG_MDP4_DSI_ACTIVE_HCTL 0x000e001c
1022#define MDP4_DSI_ACTIVE_HCTL_START__MASK 0x00007fff
1023#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT 0
1024static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val)
1025{
1026 return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK;
1027}
1028#define MDP4_DSI_ACTIVE_HCTL_END__MASK 0x7fff0000
1029#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT 16
1030static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val)
1031{
1032 return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK;
1033}
1034#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
1035
1036#define REG_MDP4_DSI_ACTIVE_VSTART 0x000e0020
1037
1038#define REG_MDP4_DSI_ACTIVE_VEND 0x000e0024
1039
1040#define REG_MDP4_DSI_BORDER_CLR 0x000e0028
1041
1042#define REG_MDP4_DSI_UNDERFLOW_CLR 0x000e002c
1043#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
1044#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT 0
1045static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val)
1046{
1047 return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK;
1048}
1049#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
1050
1051#define REG_MDP4_DSI_HSYNC_SKEW 0x000e0030
1052
1053#define REG_MDP4_DSI_TEST_CNTL 0x000e0034
1054
1055#define REG_MDP4_DSI_CTRL_POLARITY 0x000e0038
1056#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW 0x00000001
1057#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW 0x00000002
1058#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW 0x00000004
1059
1060
1061#endif /* MDP4_XML */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
new file mode 100644
index 000000000000..bda0fc40b207
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
@@ -0,0 +1,684 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "mdp4_kms.h"
19
20#include <drm/drm_mode.h>
21#include "drm_crtc.h"
22#include "drm_crtc_helper.h"
23#include "drm_flip_work.h"
24
25struct mdp4_crtc {
26 struct drm_crtc base;
27 char name[8];
28 struct drm_plane *plane;
29 int id;
30 int ovlp;
31 enum mdp4_dma dma;
32 bool enabled;
33
34 /* which mixer/encoder we route output to: */
35 int mixer;
36
37 struct {
38 spinlock_t lock;
39 bool stale;
40 uint32_t width, height;
41
42 /* next cursor to scan-out: */
43 uint32_t next_iova;
44 struct drm_gem_object *next_bo;
45
46 /* current cursor being scanned out: */
47 struct drm_gem_object *scanout_bo;
48 } cursor;
49
50
51 /* if there is a pending flip, these will be non-null: */
52 struct drm_pending_vblank_event *event;
53 struct work_struct pageflip_work;
54
55 /* the fb that we currently hold a scanout ref to: */
56 struct drm_framebuffer *fb;
57
58 /* for unref'ing framebuffers after scanout completes: */
59 struct drm_flip_work unref_fb_work;
60
61 /* for unref'ing cursor bo's after scanout completes: */
62 struct drm_flip_work unref_cursor_work;
63
64 struct mdp4_irq vblank;
65 struct mdp4_irq err;
66};
67#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
68
69static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
70{
71 struct msm_drm_private *priv = crtc->dev->dev_private;
72 return to_mdp4_kms(priv->kms);
73}
74
75static void update_fb(struct drm_crtc *crtc, bool async,
76 struct drm_framebuffer *new_fb)
77{
78 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
79 struct drm_framebuffer *old_fb = mdp4_crtc->fb;
80
81 if (old_fb)
82 drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
83
84 /* grab reference to incoming scanout fb: */
85 drm_framebuffer_reference(new_fb);
86 mdp4_crtc->base.fb = new_fb;
87 mdp4_crtc->fb = new_fb;
88
89 if (!async) {
90 /* enable vblank to pick up the old_fb */
91 mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
92 }
93}
94
95static void complete_flip(struct drm_crtc *crtc, bool canceled)
96{
97 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
98 struct drm_device *dev = crtc->dev;
99 struct drm_pending_vblank_event *event;
100 unsigned long flags;
101
102 spin_lock_irqsave(&dev->event_lock, flags);
103 event = mdp4_crtc->event;
104 if (event) {
105 mdp4_crtc->event = NULL;
106 if (canceled)
107 event->base.destroy(&event->base);
108 else
109 drm_send_vblank_event(dev, mdp4_crtc->id, event);
110 }
111 spin_unlock_irqrestore(&dev->event_lock, flags);
112}
113
114static void crtc_flush(struct drm_crtc *crtc)
115{
116 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
117 struct mdp4_kms *mdp4_kms = get_kms(crtc);
118 uint32_t flush = 0;
119
120 flush |= pipe2flush(mdp4_plane_pipe(mdp4_crtc->plane));
121 flush |= ovlp2flush(mdp4_crtc->ovlp);
122
123 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
124
125 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
126}
127
128static void pageflip_worker(struct work_struct *work)
129{
130 struct mdp4_crtc *mdp4_crtc =
131 container_of(work, struct mdp4_crtc, pageflip_work);
132 struct drm_crtc *crtc = &mdp4_crtc->base;
133
134 mdp4_plane_set_scanout(mdp4_crtc->plane, crtc->fb);
135 crtc_flush(crtc);
136
137 /* enable vblank to complete flip: */
138 mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
139}
140
141static void unref_fb_worker(struct drm_flip_work *work, void *val)
142{
143 struct mdp4_crtc *mdp4_crtc =
144 container_of(work, struct mdp4_crtc, unref_fb_work);
145 struct drm_device *dev = mdp4_crtc->base.dev;
146
147 mutex_lock(&dev->mode_config.mutex);
148 drm_framebuffer_unreference(val);
149 mutex_unlock(&dev->mode_config.mutex);
150}
151
152static void unref_cursor_worker(struct drm_flip_work *work, void *val)
153{
154 struct mdp4_crtc *mdp4_crtc =
155 container_of(work, struct mdp4_crtc, unref_cursor_work);
156 struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
157
158 msm_gem_put_iova(val, mdp4_kms->id);
159 drm_gem_object_unreference_unlocked(val);
160}
161
162static void mdp4_crtc_destroy(struct drm_crtc *crtc)
163{
164 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
165
166 mdp4_crtc->plane->funcs->destroy(mdp4_crtc->plane);
167
168 drm_crtc_cleanup(crtc);
169 drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
170 drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
171
172 kfree(mdp4_crtc);
173}
174
175static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
176{
177 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
178 struct mdp4_kms *mdp4_kms = get_kms(crtc);
179 bool enabled = (mode == DRM_MODE_DPMS_ON);
180
181 DBG("%s: mode=%d", mdp4_crtc->name, mode);
182
183 if (enabled != mdp4_crtc->enabled) {
184 if (enabled) {
185 mdp4_enable(mdp4_kms);
186 mdp4_irq_register(mdp4_kms, &mdp4_crtc->err);
187 } else {
188 mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err);
189 mdp4_disable(mdp4_kms);
190 }
191 mdp4_crtc->enabled = enabled;
192 }
193}
194
195static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
196 const struct drm_display_mode *mode,
197 struct drm_display_mode *adjusted_mode)
198{
199 return true;
200}
201
202static void blend_setup(struct drm_crtc *crtc)
203{
204 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
205 struct mdp4_kms *mdp4_kms = get_kms(crtc);
206 int i, ovlp = mdp4_crtc->ovlp;
207 uint32_t mixer_cfg = 0;
208
209 /*
210 * This probably would also need to be triggered by any attached
211 * plane when it changes.. for now since we are only using a single
212 * private plane, the configuration is hard-coded:
213 */
214
215 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
216 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
217 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
218 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
219
220 for (i = 0; i < 4; i++) {
221 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0);
222 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0);
223 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i),
224 MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
225 MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST));
226 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 0);
227 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
228 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
229 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
230 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
231 }
232
233 /* TODO single register for all CRTCs, so this won't work properly
234 * when multiple CRTCs are active..
235 */
236 switch (mdp4_plane_pipe(mdp4_crtc->plane)) {
237 case VG1:
238 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(STAGE_BASE) |
239 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
240 break;
241 case VG2:
242 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(STAGE_BASE) |
243 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
244 break;
245 case RGB1:
246 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(STAGE_BASE) |
247 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
248 break;
249 case RGB2:
250 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(STAGE_BASE) |
251 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
252 break;
253 case RGB3:
254 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(STAGE_BASE) |
255 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
256 break;
257 case VG3:
258 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(STAGE_BASE) |
259 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
260 break;
261 case VG4:
262 mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(STAGE_BASE) |
263 COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
264 break;
265 default:
266 WARN_ON("invalid pipe");
267 break;
268 }
269 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
270}
271
272static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
273 struct drm_display_mode *mode,
274 struct drm_display_mode *adjusted_mode,
275 int x, int y,
276 struct drm_framebuffer *old_fb)
277{
278 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
279 struct mdp4_kms *mdp4_kms = get_kms(crtc);
280 enum mdp4_dma dma = mdp4_crtc->dma;
281 int ret, ovlp = mdp4_crtc->ovlp;
282
283 mode = adjusted_mode;
284
285 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
286 mdp4_crtc->name, mode->base.id, mode->name,
287 mode->vrefresh, mode->clock,
288 mode->hdisplay, mode->hsync_start,
289 mode->hsync_end, mode->htotal,
290 mode->vdisplay, mode->vsync_start,
291 mode->vsync_end, mode->vtotal,
292 mode->type, mode->flags);
293
294 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
295 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
296 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
297
298 /* take data from pipe: */
299 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
300 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
301 crtc->fb->pitches[0]);
302 mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
303 MDP4_DMA_DST_SIZE_WIDTH(0) |
304 MDP4_DMA_DST_SIZE_HEIGHT(0));
305
306 mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
307 mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
308 MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
309 MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
310 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
311 crtc->fb->pitches[0]);
312
313 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
314
315 update_fb(crtc, false, crtc->fb);
316
317 ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
318 0, 0, mode->hdisplay, mode->vdisplay,
319 x << 16, y << 16,
320 mode->hdisplay << 16, mode->vdisplay << 16);
321 if (ret) {
322 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
323 mdp4_crtc->name, ret);
324 return ret;
325 }
326
327 if (dma == DMA_E) {
328 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
329 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
330 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
331 }
332
333 return 0;
334}
335
336static void mdp4_crtc_prepare(struct drm_crtc *crtc)
337{
338 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
339 DBG("%s", mdp4_crtc->name);
340 /* make sure we hold a ref to mdp clks while setting up mode: */
341 mdp4_enable(get_kms(crtc));
342 mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
343}
344
345static void mdp4_crtc_commit(struct drm_crtc *crtc)
346{
347 mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
348 crtc_flush(crtc);
349 /* drop the ref to mdp clk's that we got in prepare: */
350 mdp4_disable(get_kms(crtc));
351}
352
353static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
354 struct drm_framebuffer *old_fb)
355{
356 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
357 struct drm_plane *plane = mdp4_crtc->plane;
358 struct drm_display_mode *mode = &crtc->mode;
359
360 update_fb(crtc, false, crtc->fb);
361
362 return mdp4_plane_mode_set(plane, crtc, crtc->fb,
363 0, 0, mode->hdisplay, mode->vdisplay,
364 x << 16, y << 16,
365 mode->hdisplay << 16, mode->vdisplay << 16);
366}
367
368static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
369{
370}
371
372static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
373 struct drm_framebuffer *new_fb,
374 struct drm_pending_vblank_event *event)
375{
376 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
377 struct drm_device *dev = crtc->dev;
378 struct drm_gem_object *obj;
379
380 if (mdp4_crtc->event) {
381 dev_err(dev->dev, "already pending flip!\n");
382 return -EBUSY;
383 }
384
385 obj = msm_framebuffer_bo(new_fb, 0);
386
387 mdp4_crtc->event = event;
388 update_fb(crtc, true, new_fb);
389
390 return msm_gem_queue_inactive_work(obj,
391 &mdp4_crtc->pageflip_work);
392}
393
394static int mdp4_crtc_set_property(struct drm_crtc *crtc,
395 struct drm_property *property, uint64_t val)
396{
397 // XXX
398 return -EINVAL;
399}
400
401#define CURSOR_WIDTH 64
402#define CURSOR_HEIGHT 64
403
404/* called from IRQ to update cursor related registers (if needed). The
405 * cursor registers, other than x/y position, appear not to be double
406 * buffered, and changing them other than from vblank seems to trigger
407 * underflow.
408 */
409static void update_cursor(struct drm_crtc *crtc)
410{
411 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
412 enum mdp4_dma dma = mdp4_crtc->dma;
413 unsigned long flags;
414
415 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
416 if (mdp4_crtc->cursor.stale) {
417 struct mdp4_kms *mdp4_kms = get_kms(crtc);
418 struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
419 struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
420 uint32_t iova = mdp4_crtc->cursor.next_iova;
421
422 if (next_bo) {
423 /* take a obj ref + iova ref when we start scanning out: */
424 drm_gem_object_reference(next_bo);
425 msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
426
427 /* enable cursor: */
428 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
429 MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
430 MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
431 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
432 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
433 MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
434 MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
435 } else {
436 /* disable cursor: */
437 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0);
438 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
439 MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
440 }
441
442 /* and drop the iova ref + obj rev when done scanning out: */
443 if (prev_bo)
444 drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
445
446 mdp4_crtc->cursor.scanout_bo = next_bo;
447 mdp4_crtc->cursor.stale = false;
448 }
449 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
450}
451
452static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
453 struct drm_file *file_priv, uint32_t handle,
454 uint32_t width, uint32_t height)
455{
456 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
457 struct mdp4_kms *mdp4_kms = get_kms(crtc);
458 struct drm_device *dev = crtc->dev;
459 struct drm_gem_object *cursor_bo, *old_bo;
460 unsigned long flags;
461 uint32_t iova;
462 int ret;
463
464 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
465 dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
466 return -EINVAL;
467 }
468
469 if (handle) {
470 cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
471 if (!cursor_bo)
472 return -ENOENT;
473 } else {
474 cursor_bo = NULL;
475 }
476
477 if (cursor_bo) {
478 ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
479 if (ret)
480 goto fail;
481 } else {
482 iova = 0;
483 }
484
485 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
486 old_bo = mdp4_crtc->cursor.next_bo;
487 mdp4_crtc->cursor.next_bo = cursor_bo;
488 mdp4_crtc->cursor.next_iova = iova;
489 mdp4_crtc->cursor.width = width;
490 mdp4_crtc->cursor.height = height;
491 mdp4_crtc->cursor.stale = true;
492 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
493
494 if (old_bo) {
495 /* drop our previous reference: */
496 msm_gem_put_iova(old_bo, mdp4_kms->id);
497 drm_gem_object_unreference_unlocked(old_bo);
498 }
499
500 return 0;
501
502fail:
503 drm_gem_object_unreference_unlocked(cursor_bo);
504 return ret;
505}
506
507static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
508{
509 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
510 struct mdp4_kms *mdp4_kms = get_kms(crtc);
511 enum mdp4_dma dma = mdp4_crtc->dma;
512
513 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
514 MDP4_DMA_CURSOR_POS_X(x) |
515 MDP4_DMA_CURSOR_POS_Y(y));
516
517 return 0;
518}
519
520static const struct drm_crtc_funcs mdp4_crtc_funcs = {
521 .set_config = drm_crtc_helper_set_config,
522 .destroy = mdp4_crtc_destroy,
523 .page_flip = mdp4_crtc_page_flip,
524 .set_property = mdp4_crtc_set_property,
525 .cursor_set = mdp4_crtc_cursor_set,
526 .cursor_move = mdp4_crtc_cursor_move,
527};
528
529static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
530 .dpms = mdp4_crtc_dpms,
531 .mode_fixup = mdp4_crtc_mode_fixup,
532 .mode_set = mdp4_crtc_mode_set,
533 .prepare = mdp4_crtc_prepare,
534 .commit = mdp4_crtc_commit,
535 .mode_set_base = mdp4_crtc_mode_set_base,
536 .load_lut = mdp4_crtc_load_lut,
537};
538
539static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
540{
541 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
542 struct drm_crtc *crtc = &mdp4_crtc->base;
543 struct msm_drm_private *priv = crtc->dev->dev_private;
544
545 update_cursor(crtc);
546 complete_flip(crtc, false);
547 mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
548
549 drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
550 drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
551}
552
553static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
554{
555 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
556 struct drm_crtc *crtc = &mdp4_crtc->base;
557 DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
558 crtc_flush(crtc);
559}
560
561uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
562{
563 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
564 return mdp4_crtc->vblank.irqmask;
565}
566
567void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc)
568{
569 complete_flip(crtc, true);
570}
571
572/* set dma config, ie. the format the encoder wants. */
573void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
574{
575 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
576 struct mdp4_kms *mdp4_kms = get_kms(crtc);
577
578 mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
579}
580
581/* set interface for routing crtc->encoder: */
582void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
583{
584 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
585 struct mdp4_kms *mdp4_kms = get_kms(crtc);
586 uint32_t intf_sel;
587
588 intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
589
590 switch (mdp4_crtc->dma) {
591 case DMA_P:
592 intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
593 intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
594 break;
595 case DMA_S:
596 intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
597 intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
598 break;
599 case DMA_E:
600 intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
601 intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
602 break;
603 }
604
605 if (intf == INTF_DSI_VIDEO) {
606 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
607 intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
608 mdp4_crtc->mixer = 0;
609 } else if (intf == INTF_DSI_CMD) {
610 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
611 intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
612 mdp4_crtc->mixer = 0;
613 } else if (intf == INTF_LCDC_DTV){
614 mdp4_crtc->mixer = 1;
615 }
616
617 blend_setup(crtc);
618
619 DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
620
621 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
622}
623
624static const char *dma_names[] = {
625 "DMA_P", "DMA_S", "DMA_E",
626};
627
628/* initialize crtc */
629struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
630 struct drm_plane *plane, int id, int ovlp_id,
631 enum mdp4_dma dma_id)
632{
633 struct drm_crtc *crtc = NULL;
634 struct mdp4_crtc *mdp4_crtc;
635 int ret;
636
637 mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
638 if (!mdp4_crtc) {
639 ret = -ENOMEM;
640 goto fail;
641 }
642
643 crtc = &mdp4_crtc->base;
644
645 mdp4_crtc->plane = plane;
646 mdp4_crtc->plane->crtc = crtc;
647
648 mdp4_crtc->ovlp = ovlp_id;
649 mdp4_crtc->dma = dma_id;
650
651 mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
652 mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
653
654 mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
655 mdp4_crtc->err.irq = mdp4_crtc_err_irq;
656
657 snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
658 dma_names[dma_id], ovlp_id);
659
660 spin_lock_init(&mdp4_crtc->cursor.lock);
661
662 ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16,
663 "unref fb", unref_fb_worker);
664 if (ret)
665 goto fail;
666
667 ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
668 "unref cursor", unref_cursor_worker);
669
670 INIT_WORK(&mdp4_crtc->pageflip_work, pageflip_worker);
671
672 drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
673 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
674
675 mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
676
677 return crtc;
678
679fail:
680 if (crtc)
681 mdp4_crtc_destroy(crtc);
682
683 return ERR_PTR(ret);
684}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
new file mode 100644
index 000000000000..06d49e309d34
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
@@ -0,0 +1,317 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <mach/clk.h>
19
20#include "mdp4_kms.h"
21#include "msm_connector.h"
22
23#include "drm_crtc.h"
24#include "drm_crtc_helper.h"
25
26
27struct mdp4_dtv_encoder {
28 struct drm_encoder base;
29 struct clk *src_clk;
30 struct clk *hdmi_clk;
31 struct clk *mdp_clk;
32 unsigned long int pixclock;
33 bool enabled;
34 uint32_t bsc;
35};
36#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base)
37
38static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
39{
40 struct msm_drm_private *priv = encoder->dev->dev_private;
41 return to_mdp4_kms(priv->kms);
42}
43
44#ifdef CONFIG_MSM_BUS_SCALING
45#include <mach/board.h>
46/* not ironically named at all.. no, really.. */
47static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
48{
49 struct drm_device *dev = mdp4_dtv_encoder->base.dev;
50 struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
51
52 if (!dtv_pdata) {
53 dev_err(dev->dev, "could not find dtv pdata\n");
54 return;
55 }
56
57 if (dtv_pdata->bus_scale_table) {
58 mdp4_dtv_encoder->bsc = msm_bus_scale_register_client(
59 dtv_pdata->bus_scale_table);
60 DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc);
61 DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save);
62 if (dtv_pdata->lcdc_power_save)
63 dtv_pdata->lcdc_power_save(1);
64 }
65}
66
67static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
68{
69 if (mdp4_dtv_encoder->bsc) {
70 msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc);
71 mdp4_dtv_encoder->bsc = 0;
72 }
73}
74
75static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx)
76{
77 if (mdp4_dtv_encoder->bsc) {
78 DBG("set bus scaling: %d", idx);
79 msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx);
80 }
81}
82#else
83static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
84static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
85static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {}
86#endif
87
88static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
89{
90 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
91 bs_fini(mdp4_dtv_encoder);
92 drm_encoder_cleanup(encoder);
93 kfree(mdp4_dtv_encoder);
94}
95
96static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
97 .destroy = mdp4_dtv_encoder_destroy,
98};
99
100static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode)
101{
102 struct drm_device *dev = encoder->dev;
103 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
104 struct msm_connector *msm_connector = get_connector(encoder);
105 struct mdp4_kms *mdp4_kms = get_kms(encoder);
106 bool enabled = (mode == DRM_MODE_DPMS_ON);
107
108 DBG("mode=%d", mode);
109
110 if (enabled == mdp4_dtv_encoder->enabled)
111 return;
112
113 if (enabled) {
114 unsigned long pc = mdp4_dtv_encoder->pixclock;
115 int ret;
116
117 bs_set(mdp4_dtv_encoder, 1);
118
119 if (msm_connector)
120 msm_connector->funcs->dpms(msm_connector, mode);
121
122 DBG("setting src_clk=%lu", pc);
123
124 ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc);
125 if (ret)
126 dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret);
127 clk_prepare_enable(mdp4_dtv_encoder->src_clk);
128 ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
129 if (ret)
130 dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
131 ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
132 if (ret)
133 dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
134
135 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
136 } else {
137 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
138
139 /*
140 * Wait for a vsync so we know the ENABLE=0 latched before
141 * the (connector) source of the vsync's gets disabled,
142 * otherwise we end up in a funny state if we re-enable
143 * before the disable latches, which results that some of
144 * the settings changes for the new modeset (like new
145 * scanout buffer) don't latch properly..
146 */
147 mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC);
148
149 clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
150 clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
151 clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
152
153 if (msm_connector)
154 msm_connector->funcs->dpms(msm_connector, mode);
155
156 bs_set(mdp4_dtv_encoder, 0);
157 }
158
159 mdp4_dtv_encoder->enabled = enabled;
160}
161
162static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder,
163 const struct drm_display_mode *mode,
164 struct drm_display_mode *adjusted_mode)
165{
166 return true;
167}
168
169static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
170 struct drm_display_mode *mode,
171 struct drm_display_mode *adjusted_mode)
172{
173 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
174 struct msm_connector *msm_connector = get_connector(encoder);
175 struct mdp4_kms *mdp4_kms = get_kms(encoder);
176 uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
177 uint32_t display_v_start, display_v_end;
178 uint32_t hsync_start_x, hsync_end_x;
179
180 mode = adjusted_mode;
181
182 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
183 mode->base.id, mode->name,
184 mode->vrefresh, mode->clock,
185 mode->hdisplay, mode->hsync_start,
186 mode->hsync_end, mode->htotal,
187 mode->vdisplay, mode->vsync_start,
188 mode->vsync_end, mode->vtotal,
189 mode->type, mode->flags);
190
191 mdp4_dtv_encoder->pixclock = mode->clock * 1000;
192
193 DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock);
194
195 ctrl_pol = 0;
196 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
197 ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW;
198 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
199 ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW;
200 /* probably need to get DATA_EN polarity from panel.. */
201
202 dtv_hsync_skew = 0; /* get this from panel? */
203
204 hsync_start_x = (mode->htotal - mode->hsync_start);
205 hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
206
207 vsync_period = mode->vtotal * mode->htotal;
208 vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
209 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
210 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
211
212 mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL,
213 MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
214 MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal));
215 mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period);
216 mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len);
217 mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL,
218 MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) |
219 MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x));
220 mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start);
221 mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end);
222 mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0);
223 mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR,
224 MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY |
225 MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff));
226 mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew);
227 mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol);
228 mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL,
229 MDP4_DTV_ACTIVE_HCTL_START(0) |
230 MDP4_DTV_ACTIVE_HCTL_END(0));
231 mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0);
232 mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0);
233
234 if (msm_connector)
235 msm_connector->funcs->mode_set(msm_connector, mode);
236}
237
238static void mdp4_dtv_encoder_prepare(struct drm_encoder *encoder)
239{
240 mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
241}
242
243static void mdp4_dtv_encoder_commit(struct drm_encoder *encoder)
244{
245 mdp4_crtc_set_config(encoder->crtc,
246 MDP4_DMA_CONFIG_R_BPC(BPC8) |
247 MDP4_DMA_CONFIG_G_BPC(BPC8) |
248 MDP4_DMA_CONFIG_B_BPC(BPC8) |
249 MDP4_DMA_CONFIG_PACK(0x21));
250 mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV);
251 mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
252}
253
254static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
255 .dpms = mdp4_dtv_encoder_dpms,
256 .mode_fixup = mdp4_dtv_encoder_mode_fixup,
257 .mode_set = mdp4_dtv_encoder_mode_set,
258 .prepare = mdp4_dtv_encoder_prepare,
259 .commit = mdp4_dtv_encoder_commit,
260};
261
262long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
263{
264 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
265 return clk_round_rate(mdp4_dtv_encoder->src_clk, rate);
266}
267
268/* initialize encoder */
269struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
270{
271 struct drm_encoder *encoder = NULL;
272 struct mdp4_dtv_encoder *mdp4_dtv_encoder;
273 int ret;
274
275 mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL);
276 if (!mdp4_dtv_encoder) {
277 ret = -ENOMEM;
278 goto fail;
279 }
280
281 encoder = &mdp4_dtv_encoder->base;
282
283 drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
284 DRM_MODE_ENCODER_TMDS);
285 drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
286
287 mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
288 if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
289 dev_err(dev->dev, "failed to get src_clk\n");
290 ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
291 goto fail;
292 }
293
294 mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
295 if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
296 dev_err(dev->dev, "failed to get hdmi_clk\n");
297 ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
298 goto fail;
299 }
300
301 mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk");
302 if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
303 dev_err(dev->dev, "failed to get mdp_clk\n");
304 ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
305 goto fail;
306 }
307
308 bs_init(mdp4_dtv_encoder);
309
310 return encoder;
311
312fail:
313 if (encoder)
314 mdp4_dtv_encoder_destroy(encoder);
315
316 return ERR_PTR(ret);
317}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
new file mode 100644
index 000000000000..7b645f2e837a
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "msm_drv.h"
20#include "mdp4_kms.h"
21
22#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \
23 .base = { .pixel_format = DRM_FORMAT_ ## name }, \
24 .bpc_a = BPC ## a ## A, \
25 .bpc_r = BPC ## r, \
26 .bpc_g = BPC ## g, \
27 .bpc_b = BPC ## b, \
28 .unpack = { e0, e1, e2, e3 }, \
29 .alpha_enable = alpha, \
30 .unpack_tight = tight, \
31 .cpp = c, \
32 .unpack_count = cnt, \
33 }
34
35#define BPC0A 0
36
37static const struct mdp4_format formats[] = {
38 /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */
39 FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4),
40 FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4),
41 FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3),
42 FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3),
43 FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3),
44 FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3),
45};
46
47const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format)
48{
49 int i;
50 for (i = 0; i < ARRAY_SIZE(formats); i++) {
51 const struct mdp4_format *f = &formats[i];
52 if (f->base.pixel_format == format)
53 return &f->base;
54 }
55 return NULL;
56}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
new file mode 100644
index 000000000000..5c6b7fca4edd
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
@@ -0,0 +1,203 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "msm_drv.h"
20#include "mdp4_kms.h"
21
22
23struct mdp4_irq_wait {
24 struct mdp4_irq irq;
25 int count;
26};
27
28static DECLARE_WAIT_QUEUE_HEAD(wait_event);
29
30static DEFINE_SPINLOCK(list_lock);
31
32static void update_irq(struct mdp4_kms *mdp4_kms)
33{
34 struct mdp4_irq *irq;
35 uint32_t irqmask = mdp4_kms->vblank_mask;
36
37 BUG_ON(!spin_is_locked(&list_lock));
38
39 list_for_each_entry(irq, &mdp4_kms->irq_list, node)
40 irqmask |= irq->irqmask;
41
42 mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask);
43}
44
45static void update_irq_unlocked(struct mdp4_kms *mdp4_kms)
46{
47 unsigned long flags;
48 spin_lock_irqsave(&list_lock, flags);
49 update_irq(mdp4_kms);
50 spin_unlock_irqrestore(&list_lock, flags);
51}
52
53static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus)
54{
55 DRM_ERROR("errors: %08x\n", irqstatus);
56}
57
58void mdp4_irq_preinstall(struct msm_kms *kms)
59{
60 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
61 mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
62}
63
64int mdp4_irq_postinstall(struct msm_kms *kms)
65{
66 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
67 struct mdp4_irq *error_handler = &mdp4_kms->error_handler;
68
69 INIT_LIST_HEAD(&mdp4_kms->irq_list);
70
71 error_handler->irq = mdp4_irq_error_handler;
72 error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
73 MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
74
75 mdp4_irq_register(mdp4_kms, error_handler);
76
77 return 0;
78}
79
80void mdp4_irq_uninstall(struct msm_kms *kms)
81{
82 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
83 mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
84}
85
86irqreturn_t mdp4_irq(struct msm_kms *kms)
87{
88 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
89 struct drm_device *dev = mdp4_kms->dev;
90 struct msm_drm_private *priv = dev->dev_private;
91 struct mdp4_irq *handler, *n;
92 unsigned long flags;
93 unsigned int id;
94 uint32_t status;
95
96 status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
97 mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
98
99 VERB("status=%08x", status);
100
101 for (id = 0; id < priv->num_crtcs; id++)
102 if (status & mdp4_crtc_vblank(priv->crtcs[id]))
103 drm_handle_vblank(dev, id);
104
105 spin_lock_irqsave(&list_lock, flags);
106 mdp4_kms->in_irq = true;
107 list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) {
108 if (handler->irqmask & status) {
109 spin_unlock_irqrestore(&list_lock, flags);
110 handler->irq(handler, handler->irqmask & status);
111 spin_lock_irqsave(&list_lock, flags);
112 }
113 }
114 mdp4_kms->in_irq = false;
115 update_irq(mdp4_kms);
116 spin_unlock_irqrestore(&list_lock, flags);
117
118 return IRQ_HANDLED;
119}
120
121int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
122{
123 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
124 unsigned long flags;
125
126 spin_lock_irqsave(&list_lock, flags);
127 mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc);
128 update_irq(mdp4_kms);
129 spin_unlock_irqrestore(&list_lock, flags);
130
131 return 0;
132}
133
134void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
135{
136 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
137 unsigned long flags;
138
139 spin_lock_irqsave(&list_lock, flags);
140 mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc);
141 update_irq(mdp4_kms);
142 spin_unlock_irqrestore(&list_lock, flags);
143}
144
145static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus)
146{
147 struct mdp4_irq_wait *wait =
148 container_of(irq, struct mdp4_irq_wait, irq);
149 wait->count--;
150 wake_up_all(&wait_event);
151}
152
153void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask)
154{
155 struct mdp4_irq_wait wait = {
156 .irq = {
157 .irq = wait_irq,
158 .irqmask = irqmask,
159 },
160 .count = 1,
161 };
162 mdp4_irq_register(mdp4_kms, &wait.irq);
163 wait_event(wait_event, (wait.count <= 0));
164 mdp4_irq_unregister(mdp4_kms, &wait.irq);
165}
166
167void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
168{
169 unsigned long flags;
170 bool needs_update = false;
171
172 spin_lock_irqsave(&list_lock, flags);
173
174 if (!irq->registered) {
175 irq->registered = true;
176 list_add(&irq->node, &mdp4_kms->irq_list);
177 needs_update = !mdp4_kms->in_irq;
178 }
179
180 spin_unlock_irqrestore(&list_lock, flags);
181
182 if (needs_update)
183 update_irq_unlocked(mdp4_kms);
184}
185
186void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
187{
188 unsigned long flags;
189 bool needs_update = false;
190
191 spin_lock_irqsave(&list_lock, flags);
192
193 if (irq->registered) {
194 irq->registered = false;
195 list_del(&irq->node);
196 needs_update = !mdp4_kms->in_irq;
197 }
198
199 spin_unlock_irqrestore(&list_lock, flags);
200
201 if (needs_update)
202 update_irq_unlocked(mdp4_kms);
203}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
new file mode 100644
index 000000000000..960cd894da78
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -0,0 +1,368 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "msm_drv.h"
20#include "mdp4_kms.h"
21
22#include <mach/iommu.h>
23
24static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
25
26static int mdp4_hw_init(struct msm_kms *kms)
27{
28 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
29 struct drm_device *dev = mdp4_kms->dev;
30 uint32_t version, major, minor, dmap_cfg, vg_cfg;
31 unsigned long clk;
32 int ret = 0;
33
34 pm_runtime_get_sync(dev->dev);
35
36 version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
37
38 major = FIELD(version, MDP4_VERSION_MAJOR);
39 minor = FIELD(version, MDP4_VERSION_MINOR);
40
41 DBG("found MDP version v%d.%d", major, minor);
42
43 if (major != 4) {
44 dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
45 major, minor);
46 ret = -ENXIO;
47 goto out;
48 }
49
50 mdp4_kms->rev = minor;
51
52 if (mdp4_kms->dsi_pll_vdda) {
53 if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) {
54 ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda,
55 1200000, 1200000);
56 if (ret) {
57 dev_err(dev->dev,
58 "failed to set dsi_pll_vdda voltage: %d\n", ret);
59 goto out;
60 }
61 }
62 }
63
64 if (mdp4_kms->dsi_pll_vddio) {
65 if (mdp4_kms->rev == 2) {
66 ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio,
67 1800000, 1800000);
68 if (ret) {
69 dev_err(dev->dev,
70 "failed to set dsi_pll_vddio voltage: %d\n", ret);
71 goto out;
72 }
73 }
74 }
75
76 if (mdp4_kms->rev > 1) {
77 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
78 mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
79 }
80
81 mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
82
83 /* max read pending cmd config, 3 pending requests: */
84 mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
85
86 clk = clk_get_rate(mdp4_kms->clk);
87
88 if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
89 dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */
90 vg_cfg = 0x47; /* 16 bytes-burs x 8 req */
91 } else {
92 dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */
93 vg_cfg = 0x43; /* 16 bytes-burst x 4 req */
94 }
95
96 DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
97
98 mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
99 mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
100
101 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
102 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
103 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
104 mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
105
106 if (mdp4_kms->rev >= 2)
107 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
108
109 /* disable CSC matrix / YUV by default: */
110 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
111 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
112 mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
113 mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
114 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
115 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
116
117 if (mdp4_kms->rev > 1)
118 mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
119
120out:
121 pm_runtime_put_sync(dev->dev);
122
123 return ret;
124}
125
126static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
127 struct drm_encoder *encoder)
128{
129 /* if we had >1 encoder, we'd need something more clever: */
130 return mdp4_dtv_round_pixclk(encoder, rate);
131}
132
133static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
134{
135 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
136 struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
137 unsigned i;
138
139 for (i = 0; i < priv->num_crtcs; i++)
140 mdp4_crtc_cancel_pending_flip(priv->crtcs[i]);
141}
142
143static void mdp4_destroy(struct msm_kms *kms)
144{
145 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
146 kfree(mdp4_kms);
147}
148
149static const struct msm_kms_funcs kms_funcs = {
150 .hw_init = mdp4_hw_init,
151 .irq_preinstall = mdp4_irq_preinstall,
152 .irq_postinstall = mdp4_irq_postinstall,
153 .irq_uninstall = mdp4_irq_uninstall,
154 .irq = mdp4_irq,
155 .enable_vblank = mdp4_enable_vblank,
156 .disable_vblank = mdp4_disable_vblank,
157 .get_format = mdp4_get_format,
158 .round_pixclk = mdp4_round_pixclk,
159 .preclose = mdp4_preclose,
160 .destroy = mdp4_destroy,
161};
162
163int mdp4_disable(struct mdp4_kms *mdp4_kms)
164{
165 DBG("");
166
167 clk_disable_unprepare(mdp4_kms->clk);
168 if (mdp4_kms->pclk)
169 clk_disable_unprepare(mdp4_kms->pclk);
170 clk_disable_unprepare(mdp4_kms->lut_clk);
171
172 return 0;
173}
174
175int mdp4_enable(struct mdp4_kms *mdp4_kms)
176{
177 DBG("");
178
179 clk_prepare_enable(mdp4_kms->clk);
180 if (mdp4_kms->pclk)
181 clk_prepare_enable(mdp4_kms->pclk);
182 clk_prepare_enable(mdp4_kms->lut_clk);
183
184 return 0;
185}
186
187static int modeset_init(struct mdp4_kms *mdp4_kms)
188{
189 struct drm_device *dev = mdp4_kms->dev;
190 struct msm_drm_private *priv = dev->dev_private;
191 struct drm_plane *plane;
192 struct drm_crtc *crtc;
193 struct drm_encoder *encoder;
194 struct drm_connector *connector;
195 int ret;
196
197 /*
198 * NOTE: this is a bit simplistic until we add support
199 * for more than just RGB1->DMA_E->DTV->HDMI
200 */
201
202 /* the CRTCs get constructed with a private plane: */
203 plane = mdp4_plane_init(dev, RGB1, true);
204 if (IS_ERR(plane)) {
205 dev_err(dev->dev, "failed to construct plane for RGB1\n");
206 ret = PTR_ERR(plane);
207 goto fail;
208 }
209
210 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E);
211 if (IS_ERR(crtc)) {
212 dev_err(dev->dev, "failed to construct crtc for DMA_E\n");
213 ret = PTR_ERR(crtc);
214 goto fail;
215 }
216 priv->crtcs[priv->num_crtcs++] = crtc;
217
218 encoder = mdp4_dtv_encoder_init(dev);
219 if (IS_ERR(encoder)) {
220 dev_err(dev->dev, "failed to construct DTV encoder\n");
221 ret = PTR_ERR(encoder);
222 goto fail;
223 }
224 encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */
225 priv->encoders[priv->num_encoders++] = encoder;
226
227 connector = hdmi_connector_init(dev, encoder);
228 if (IS_ERR(connector)) {
229 dev_err(dev->dev, "failed to construct HDMI connector\n");
230 ret = PTR_ERR(connector);
231 goto fail;
232 }
233 priv->connectors[priv->num_connectors++] = connector;
234
235 return 0;
236
237fail:
238 return ret;
239}
240
241static const char *iommu_ports[] = {
242 "mdp_port0_cb0", "mdp_port1_cb0",
243};
244
245struct msm_kms *mdp4_kms_init(struct drm_device *dev)
246{
247 struct platform_device *pdev = dev->platformdev;
248 struct mdp4_platform_config *config = mdp4_get_config(pdev);
249 struct mdp4_kms *mdp4_kms;
250 struct msm_kms *kms = NULL;
251 int ret;
252
253 mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
254 if (!mdp4_kms) {
255 dev_err(dev->dev, "failed to allocate kms\n");
256 ret = -ENOMEM;
257 goto fail;
258 }
259
260 kms = &mdp4_kms->base;
261 kms->funcs = &kms_funcs;
262
263 mdp4_kms->dev = dev;
264
265 mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
266 if (IS_ERR(mdp4_kms->mmio)) {
267 ret = PTR_ERR(mdp4_kms->mmio);
268 goto fail;
269 }
270
271 mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda");
272 if (IS_ERR(mdp4_kms->dsi_pll_vdda))
273 mdp4_kms->dsi_pll_vdda = NULL;
274
275 mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio");
276 if (IS_ERR(mdp4_kms->dsi_pll_vddio))
277 mdp4_kms->dsi_pll_vddio = NULL;
278
279 mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
280 if (IS_ERR(mdp4_kms->vdd))
281 mdp4_kms->vdd = NULL;
282
283 if (mdp4_kms->vdd) {
284 ret = regulator_enable(mdp4_kms->vdd);
285 if (ret) {
286 dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
287 goto fail;
288 }
289 }
290
291 mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
292 if (IS_ERR(mdp4_kms->clk)) {
293 dev_err(dev->dev, "failed to get core_clk\n");
294 ret = PTR_ERR(mdp4_kms->clk);
295 goto fail;
296 }
297
298 mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
299 if (IS_ERR(mdp4_kms->pclk))
300 mdp4_kms->pclk = NULL;
301
302 // XXX if (rev >= MDP_REV_42) { ???
303 mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
304 if (IS_ERR(mdp4_kms->lut_clk)) {
305 dev_err(dev->dev, "failed to get lut_clk\n");
306 ret = PTR_ERR(mdp4_kms->lut_clk);
307 goto fail;
308 }
309
310 clk_set_rate(mdp4_kms->clk, config->max_clk);
311 clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
312
313 if (!config->iommu) {
314 dev_err(dev->dev, "no iommu\n");
315 ret = -ENXIO;
316 goto fail;
317 }
318
319 /* make sure things are off before attaching iommu (bootloader could
320 * have left things on, in which case we'll start getting faults if
321 * we don't disable):
322 */
323 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
324 mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
325 mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
326 mdelay(16);
327
328 ret = msm_iommu_attach(dev, config->iommu,
329 iommu_ports, ARRAY_SIZE(iommu_ports));
330 if (ret)
331 goto fail;
332
333 mdp4_kms->id = msm_register_iommu(dev, config->iommu);
334 if (mdp4_kms->id < 0) {
335 ret = mdp4_kms->id;
336 dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
337 goto fail;
338 }
339
340 ret = modeset_init(mdp4_kms);
341 if (ret) {
342 dev_err(dev->dev, "modeset_init failed: %d\n", ret);
343 goto fail;
344 }
345
346 return kms;
347
348fail:
349 if (kms)
350 mdp4_destroy(kms);
351 return ERR_PTR(ret);
352}
353
354static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
355{
356 static struct mdp4_platform_config config = {};
357#ifdef CONFIG_OF
358 /* TODO */
359#else
360 if (cpu_is_apq8064())
361 config.max_clk = 266667000;
362 else
363 config.max_clk = 200000000;
364
365 config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
366#endif
367 return &config;
368}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
new file mode 100644
index 000000000000..1e83554955f3
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
@@ -0,0 +1,194 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MDP4_KMS_H__
19#define __MDP4_KMS_H__
20
21#include <linux/clk.h>
22#include <linux/platform_device.h>
23#include <linux/regulator/consumer.h>
24
25#include "msm_drv.h"
26#include "mdp4.xml.h"
27
28
29/* For transiently registering for different MDP4 irqs that various parts
30 * of the KMS code need during setup/configuration. We these are not
31 * necessarily the same as what drm_vblank_get/put() are requesting, and
32 * the hysteresis in drm_vblank_put() is not necessarily desirable for
33 * internal housekeeping related irq usage.
34 */
35struct mdp4_irq {
36 struct list_head node;
37 uint32_t irqmask;
38 bool registered;
39 void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus);
40};
41
42struct mdp4_kms {
43 struct msm_kms base;
44
45 struct drm_device *dev;
46
47 int rev;
48
49 /* mapper-id used to request GEM buffer mapped for scanout: */
50 int id;
51
52 void __iomem *mmio;
53
54 struct regulator *dsi_pll_vdda;
55 struct regulator *dsi_pll_vddio;
56 struct regulator *vdd;
57
58 struct clk *clk;
59 struct clk *pclk;
60 struct clk *lut_clk;
61
62 /* irq handling: */
63 bool in_irq;
64 struct list_head irq_list; /* list of mdp4_irq */
65 uint32_t vblank_mask; /* irq bits set for userspace vblank */
66 struct mdp4_irq error_handler;
67};
68#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
69
70/* platform config data (ie. from DT, or pdata) */
71struct mdp4_platform_config {
72 struct iommu_domain *iommu;
73 uint32_t max_clk;
74};
75
76struct mdp4_format {
77 struct msm_format base;
78 enum mpd4_bpc bpc_r, bpc_g, bpc_b;
79 enum mpd4_bpc_alpha bpc_a;
80 uint8_t unpack[4];
81 bool alpha_enable, unpack_tight;
82 uint8_t cpp, unpack_count;
83};
84#define to_mdp4_format(x) container_of(x, struct mdp4_format, base)
85
86static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
87{
88 msm_writel(data, mdp4_kms->mmio + reg);
89}
90
91static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
92{
93 return msm_readl(mdp4_kms->mmio + reg);
94}
95
96static inline uint32_t pipe2flush(enum mpd4_pipe pipe)
97{
98 switch (pipe) {
99 case VG1: return MDP4_OVERLAY_FLUSH_VG1;
100 case VG2: return MDP4_OVERLAY_FLUSH_VG2;
101 case RGB1: return MDP4_OVERLAY_FLUSH_RGB1;
102 case RGB2: return MDP4_OVERLAY_FLUSH_RGB1;
103 default: return 0;
104 }
105}
106
107static inline uint32_t ovlp2flush(int ovlp)
108{
109 switch (ovlp) {
110 case 0: return MDP4_OVERLAY_FLUSH_OVLP0;
111 case 1: return MDP4_OVERLAY_FLUSH_OVLP1;
112 default: return 0;
113 }
114}
115
116static inline uint32_t dma2irq(enum mdp4_dma dma)
117{
118 switch (dma) {
119 case DMA_P: return MDP4_IRQ_DMA_P_DONE;
120 case DMA_S: return MDP4_IRQ_DMA_S_DONE;
121 case DMA_E: return MDP4_IRQ_DMA_E_DONE;
122 default: return 0;
123 }
124}
125
126static inline uint32_t dma2err(enum mdp4_dma dma)
127{
128 switch (dma) {
129 case DMA_P: return MDP4_IRQ_PRIMARY_INTF_UDERRUN;
130 case DMA_S: return 0; // ???
131 case DMA_E: return MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
132 default: return 0;
133 }
134}
135
136int mdp4_disable(struct mdp4_kms *mdp4_kms);
137int mdp4_enable(struct mdp4_kms *mdp4_kms);
138
139void mdp4_irq_preinstall(struct msm_kms *kms);
140int mdp4_irq_postinstall(struct msm_kms *kms);
141void mdp4_irq_uninstall(struct msm_kms *kms);
142irqreturn_t mdp4_irq(struct msm_kms *kms);
143void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask);
144void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
145void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
146int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
147void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
148
149const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format);
150
151void mdp4_plane_install_properties(struct drm_plane *plane,
152 struct drm_mode_object *obj);
153void mdp4_plane_set_scanout(struct drm_plane *plane,
154 struct drm_framebuffer *fb);
155int mdp4_plane_mode_set(struct drm_plane *plane,
156 struct drm_crtc *crtc, struct drm_framebuffer *fb,
157 int crtc_x, int crtc_y,
158 unsigned int crtc_w, unsigned int crtc_h,
159 uint32_t src_x, uint32_t src_y,
160 uint32_t src_w, uint32_t src_h);
161enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane);
162struct drm_plane *mdp4_plane_init(struct drm_device *dev,
163 enum mpd4_pipe pipe_id, bool private_plane);
164
165uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
166void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc);
167void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
168void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf);
169struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
170 struct drm_plane *plane, int id, int ovlp_id,
171 enum mdp4_dma dma_id);
172
173long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
174struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
175
176#ifdef CONFIG_MSM_BUS_SCALING
177static inline int match_dev_name(struct device *dev, void *data)
178{
179 return !strcmp(dev_name(dev), data);
180}
181/* bus scaling data is associated with extra pointless platform devices,
182 * "dtv", etc.. this is a bit of a hack, but we need a way for encoders
183 * to find their pdata to make the bus-scaling stuff work.
184 */
185static inline void *mdp4_find_pdata(const char *devname)
186{
187 struct device *dev;
188 dev = bus_find_device(&platform_bus_type, NULL,
189 (void *)devname, match_dev_name);
190 return dev ? dev->platform_data : NULL;
191}
192#endif
193
194#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
new file mode 100644
index 000000000000..3468229d58b3
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
@@ -0,0 +1,243 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "mdp4_kms.h"
19
20
21struct mdp4_plane {
22 struct drm_plane base;
23 const char *name;
24
25 enum mpd4_pipe pipe;
26
27 uint32_t nformats;
28 uint32_t formats[32];
29
30 bool enabled;
31};
32#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
33
34static struct mdp4_kms *get_kms(struct drm_plane *plane)
35{
36 struct msm_drm_private *priv = plane->dev->dev_private;
37 return to_mdp4_kms(priv->kms);
38}
39
40static int mdp4_plane_update(struct drm_plane *plane,
41 struct drm_crtc *crtc, struct drm_framebuffer *fb,
42 int crtc_x, int crtc_y,
43 unsigned int crtc_w, unsigned int crtc_h,
44 uint32_t src_x, uint32_t src_y,
45 uint32_t src_w, uint32_t src_h)
46{
47 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
48
49 mdp4_plane->enabled = true;
50
51 if (plane->fb)
52 drm_framebuffer_unreference(plane->fb);
53
54 drm_framebuffer_reference(fb);
55
56 return mdp4_plane_mode_set(plane, crtc, fb,
57 crtc_x, crtc_y, crtc_w, crtc_h,
58 src_x, src_y, src_w, src_h);
59}
60
61static int mdp4_plane_disable(struct drm_plane *plane)
62{
63 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
64 DBG("%s: TODO", mdp4_plane->name); // XXX
65 return 0;
66}
67
68static void mdp4_plane_destroy(struct drm_plane *plane)
69{
70 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
71
72 mdp4_plane_disable(plane);
73 drm_plane_cleanup(plane);
74
75 kfree(mdp4_plane);
76}
77
78/* helper to install properties which are common to planes and crtcs */
79void mdp4_plane_install_properties(struct drm_plane *plane,
80 struct drm_mode_object *obj)
81{
82 // XXX
83}
84
85int mdp4_plane_set_property(struct drm_plane *plane,
86 struct drm_property *property, uint64_t val)
87{
88 // XXX
89 return -EINVAL;
90}
91
92static const struct drm_plane_funcs mdp4_plane_funcs = {
93 .update_plane = mdp4_plane_update,
94 .disable_plane = mdp4_plane_disable,
95 .destroy = mdp4_plane_destroy,
96 .set_property = mdp4_plane_set_property,
97};
98
99void mdp4_plane_set_scanout(struct drm_plane *plane,
100 struct drm_framebuffer *fb)
101{
102 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
103 struct mdp4_kms *mdp4_kms = get_kms(plane);
104 enum mpd4_pipe pipe = mdp4_plane->pipe;
105 uint32_t iova;
106
107 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
108 MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
109 MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
110
111 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe),
112 MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
113 MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
114
115 msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
116 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
117
118 plane->fb = fb;
119}
120
121#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
122
123int mdp4_plane_mode_set(struct drm_plane *plane,
124 struct drm_crtc *crtc, struct drm_framebuffer *fb,
125 int crtc_x, int crtc_y,
126 unsigned int crtc_w, unsigned int crtc_h,
127 uint32_t src_x, uint32_t src_y,
128 uint32_t src_w, uint32_t src_h)
129{
130 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
131 struct mdp4_kms *mdp4_kms = get_kms(plane);
132 enum mpd4_pipe pipe = mdp4_plane->pipe;
133 const struct mdp4_format *format;
134 uint32_t op_mode = 0;
135 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
136 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
137
138 /* src values are in Q16 fixed point, convert to integer: */
139 src_x = src_x >> 16;
140 src_y = src_y >> 16;
141 src_w = src_w >> 16;
142 src_h = src_h >> 16;
143
144 if (src_w != crtc_w) {
145 op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
146 /* TODO calc phasex_step */
147 }
148
149 if (src_h != crtc_h) {
150 op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN;
151 /* TODO calc phasey_step */
152 }
153
154 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe),
155 MDP4_PIPE_SRC_SIZE_WIDTH(src_w) |
156 MDP4_PIPE_SRC_SIZE_HEIGHT(src_h));
157
158 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe),
159 MDP4_PIPE_SRC_XY_X(src_x) |
160 MDP4_PIPE_SRC_XY_Y(src_y));
161
162 mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe),
163 MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) |
164 MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
165
166 mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
167 MDP4_PIPE_SRC_XY_X(crtc_x) |
168 MDP4_PIPE_SRC_XY_Y(crtc_y));
169
170 mdp4_plane_set_scanout(plane, fb);
171
172 format = to_mdp4_format(msm_framebuffer_format(fb));
173
174 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
175 MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
176 MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
177 MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
178 MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
179 COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
180 MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
181 MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
182 COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT));
183
184 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe),
185 MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
186 MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
187 MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
188 MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
189
190 mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode);
191 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
192 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
193
194 plane->crtc = crtc;
195
196 return 0;
197}
198
199static const char *pipe_names[] = {
200 "VG1", "VG2",
201 "RGB1", "RGB2", "RGB3",
202 "VG3", "VG4",
203};
204
205enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane)
206{
207 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
208 return mdp4_plane->pipe;
209}
210
211/* initialize plane */
212struct drm_plane *mdp4_plane_init(struct drm_device *dev,
213 enum mpd4_pipe pipe_id, bool private_plane)
214{
215 struct msm_drm_private *priv = dev->dev_private;
216 struct drm_plane *plane = NULL;
217 struct mdp4_plane *mdp4_plane;
218 int ret;
219
220 mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL);
221 if (!mdp4_plane) {
222 ret = -ENOMEM;
223 goto fail;
224 }
225
226 plane = &mdp4_plane->base;
227
228 mdp4_plane->pipe = pipe_id;
229 mdp4_plane->name = pipe_names[pipe_id];
230
231 drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &mdp4_plane_funcs,
232 mdp4_plane->formats, mdp4_plane->nformats, private_plane);
233
234 mdp4_plane_install_properties(plane, &plane->base);
235
236 return plane;
237
238fail:
239 if (plane)
240 mdp4_plane_destroy(plane);
241
242 return ERR_PTR(ret);
243}
diff --git a/drivers/gpu/drm/msm/msm_connector.c b/drivers/gpu/drm/msm/msm_connector.c
new file mode 100644
index 000000000000..aeea8879e36f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_connector.c
@@ -0,0 +1,34 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_connector.h"
20
21void msm_connector_init(struct msm_connector *connector,
22 const struct msm_connector_funcs *funcs,
23 struct drm_encoder *encoder)
24{
25 connector->funcs = funcs;
26 connector->encoder = encoder;
27}
28
29struct drm_encoder *msm_connector_attached_encoder(
30 struct drm_connector *connector)
31{
32 struct msm_connector *msm_connector = to_msm_connector(connector);
33 return msm_connector->encoder;
34}
diff --git a/drivers/gpu/drm/msm/msm_connector.h b/drivers/gpu/drm/msm/msm_connector.h
new file mode 100644
index 000000000000..0b41866adc08
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_connector.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_CONNECTOR_H__
19#define __MSM_CONNECTOR_H__
20
21#include "msm_drv.h"
22
23/*
24 * Base class for MSM connectors. Typically a connector is a bit more
25 * passive. But with the split between (for example) DTV within MDP4,
26 * and HDMI encoder, we really need two parts to an encoder. Instead
27 * what we do is have the part external to the display controller block
28 * in the connector, which is called from the encoder to delegate the
29 * appropriate parts of modeset.
30 */
31
32struct msm_connector;
33
34struct msm_connector_funcs {
35 void (*dpms)(struct msm_connector *connector, int mode);
36 void (*mode_set)(struct msm_connector *connector,
37 struct drm_display_mode *mode);
38};
39
40struct msm_connector {
41 struct drm_connector base;
42 struct drm_encoder *encoder;
43 const struct msm_connector_funcs *funcs;
44};
45#define to_msm_connector(x) container_of(x, struct msm_connector, base)
46
47void msm_connector_init(struct msm_connector *connector,
48 const struct msm_connector_funcs *funcs,
49 struct drm_encoder *encoder);
50
51struct drm_encoder *msm_connector_attached_encoder(
52 struct drm_connector *connector);
53
54static inline struct msm_connector *get_connector(struct drm_encoder *encoder)
55{
56 struct msm_drm_private *priv = encoder->dev->dev_private;
57 int i;
58
59 for (i = 0; i < priv->num_connectors; i++) {
60 struct drm_connector *connector = priv->connectors[i];
61 if (msm_connector_attached_encoder(connector) == encoder)
62 return to_msm_connector(connector);
63 }
64
65 return NULL;
66}
67
68#endif /* __MSM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
new file mode 100644
index 000000000000..864c9773636b
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -0,0 +1,776 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_gpu.h"
20
21#include <mach/iommu.h>
22
23static void msm_fb_output_poll_changed(struct drm_device *dev)
24{
25 struct msm_drm_private *priv = dev->dev_private;
26 if (priv->fbdev)
27 drm_fb_helper_hotplug_event(priv->fbdev);
28}
29
30static const struct drm_mode_config_funcs mode_config_funcs = {
31 .fb_create = msm_framebuffer_create,
32 .output_poll_changed = msm_fb_output_poll_changed,
33};
34
35static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
36 unsigned long iova, int flags, void *arg)
37{
38 DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
39 return 0;
40}
41
42int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
43{
44 struct msm_drm_private *priv = dev->dev_private;
45 int idx = priv->num_iommus++;
46
47 if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
48 return -EINVAL;
49
50 priv->iommus[idx] = iommu;
51
52 iommu_set_fault_handler(iommu, msm_fault_handler, dev);
53
54 /* need to iommu_attach_device() somewhere?? on resume?? */
55
56 return idx;
57}
58
59int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
60 const char **names, int cnt)
61{
62 int i, ret;
63
64 for (i = 0; i < cnt; i++) {
65 struct device *ctx = msm_iommu_get_ctx(names[i]);
66 if (!ctx)
67 continue;
68 ret = iommu_attach_device(iommu, ctx);
69 if (ret) {
70 dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
71 return ret;
72 }
73 }
74 return 0;
75}
76
77#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
78static bool reglog = false;
79MODULE_PARM_DESC(reglog, "Enable register read/write logging");
80module_param(reglog, bool, 0600);
81#else
82#define reglog 0
83#endif
84
85void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
86 const char *dbgname)
87{
88 struct resource *res;
89 unsigned long size;
90 void __iomem *ptr;
91
92 if (name)
93 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
94 else
95 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
96
97 if (!res) {
98 dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
99 return ERR_PTR(-EINVAL);
100 }
101
102 size = resource_size(res);
103
104 ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
105 if (!ptr) {
106 dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
107 return ERR_PTR(-ENOMEM);
108 }
109
110 if (reglog)
111 printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size);
112
113 return ptr;
114}
115
116void msm_writel(u32 data, void __iomem *addr)
117{
118 if (reglog)
119 printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
120 writel(data, addr);
121}
122
123u32 msm_readl(const void __iomem *addr)
124{
125 u32 val = readl(addr);
126 if (reglog)
127 printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
128 return val;
129}
130
131/*
132 * DRM operations:
133 */
134
135static int msm_unload(struct drm_device *dev)
136{
137 struct msm_drm_private *priv = dev->dev_private;
138 struct msm_kms *kms = priv->kms;
139 struct msm_gpu *gpu = priv->gpu;
140
141 drm_kms_helper_poll_fini(dev);
142 drm_mode_config_cleanup(dev);
143 drm_vblank_cleanup(dev);
144
145 pm_runtime_get_sync(dev->dev);
146 drm_irq_uninstall(dev);
147 pm_runtime_put_sync(dev->dev);
148
149 flush_workqueue(priv->wq);
150 destroy_workqueue(priv->wq);
151
152 if (kms) {
153 pm_runtime_disable(dev->dev);
154 kms->funcs->destroy(kms);
155 }
156
157 if (gpu) {
158 mutex_lock(&dev->struct_mutex);
159 gpu->funcs->pm_suspend(gpu);
160 gpu->funcs->destroy(gpu);
161 mutex_unlock(&dev->struct_mutex);
162 }
163
164 dev->dev_private = NULL;
165
166 kfree(priv);
167
168 return 0;
169}
170
171static int msm_load(struct drm_device *dev, unsigned long flags)
172{
173 struct platform_device *pdev = dev->platformdev;
174 struct msm_drm_private *priv;
175 struct msm_kms *kms;
176 int ret;
177
178 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
179 if (!priv) {
180 dev_err(dev->dev, "failed to allocate private data\n");
181 return -ENOMEM;
182 }
183
184 dev->dev_private = priv;
185
186 priv->wq = alloc_ordered_workqueue("msm", 0);
187 init_waitqueue_head(&priv->fence_event);
188
189 INIT_LIST_HEAD(&priv->inactive_list);
190
191 drm_mode_config_init(dev);
192
193 kms = mdp4_kms_init(dev);
194 if (IS_ERR(kms)) {
195 /*
196 * NOTE: once we have GPU support, having no kms should not
197 * be considered fatal.. ideally we would still support gpu
198 * and (for example) use dmabuf/prime to share buffers with
199 * imx drm driver on iMX5
200 */
201 dev_err(dev->dev, "failed to load kms\n");
202 ret = PTR_ERR(priv->kms);
203 goto fail;
204 }
205
206 priv->kms = kms;
207
208 if (kms) {
209 pm_runtime_enable(dev->dev);
210 ret = kms->funcs->hw_init(kms);
211 if (ret) {
212 dev_err(dev->dev, "kms hw init failed: %d\n", ret);
213 goto fail;
214 }
215 }
216
217 dev->mode_config.min_width = 0;
218 dev->mode_config.min_height = 0;
219 dev->mode_config.max_width = 2048;
220 dev->mode_config.max_height = 2048;
221 dev->mode_config.funcs = &mode_config_funcs;
222
223 ret = drm_vblank_init(dev, 1);
224 if (ret < 0) {
225 dev_err(dev->dev, "failed to initialize vblank\n");
226 goto fail;
227 }
228
229 pm_runtime_get_sync(dev->dev);
230 ret = drm_irq_install(dev);
231 pm_runtime_put_sync(dev->dev);
232 if (ret < 0) {
233 dev_err(dev->dev, "failed to install IRQ handler\n");
234 goto fail;
235 }
236
237 platform_set_drvdata(pdev, dev);
238
239#ifdef CONFIG_DRM_MSM_FBDEV
240 priv->fbdev = msm_fbdev_init(dev);
241#endif
242
243 drm_kms_helper_poll_init(dev);
244
245 return 0;
246
247fail:
248 msm_unload(dev);
249 return ret;
250}
251
252static void load_gpu(struct drm_device *dev)
253{
254 struct msm_drm_private *priv = dev->dev_private;
255 struct msm_gpu *gpu;
256
257 if (priv->gpu)
258 return;
259
260 mutex_lock(&dev->struct_mutex);
261 gpu = a3xx_gpu_init(dev);
262 if (IS_ERR(gpu)) {
263 dev_warn(dev->dev, "failed to load a3xx gpu\n");
264 gpu = NULL;
265 /* not fatal */
266 }
267 mutex_unlock(&dev->struct_mutex);
268
269 if (gpu) {
270 int ret;
271 gpu->funcs->pm_resume(gpu);
272 ret = gpu->funcs->hw_init(gpu);
273 if (ret) {
274 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
275 gpu->funcs->destroy(gpu);
276 gpu = NULL;
277 }
278 }
279
280 priv->gpu = gpu;
281}
282
283static int msm_open(struct drm_device *dev, struct drm_file *file)
284{
285 struct msm_file_private *ctx;
286
287 /* For now, load gpu on open.. to avoid the requirement of having
288 * firmware in the initrd.
289 */
290 load_gpu(dev);
291
292 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
293 if (!ctx)
294 return -ENOMEM;
295
296 file->driver_priv = ctx;
297
298 return 0;
299}
300
301static void msm_preclose(struct drm_device *dev, struct drm_file *file)
302{
303 struct msm_drm_private *priv = dev->dev_private;
304 struct msm_file_private *ctx = file->driver_priv;
305 struct msm_kms *kms = priv->kms;
306
307 if (kms)
308 kms->funcs->preclose(kms, file);
309
310 mutex_lock(&dev->struct_mutex);
311 if (ctx == priv->lastctx)
312 priv->lastctx = NULL;
313 mutex_unlock(&dev->struct_mutex);
314
315 kfree(ctx);
316}
317
318static void msm_lastclose(struct drm_device *dev)
319{
320 struct msm_drm_private *priv = dev->dev_private;
321 if (priv->fbdev) {
322 drm_modeset_lock_all(dev);
323 drm_fb_helper_restore_fbdev_mode(priv->fbdev);
324 drm_modeset_unlock_all(dev);
325 }
326}
327
328static irqreturn_t msm_irq(DRM_IRQ_ARGS)
329{
330 struct drm_device *dev = arg;
331 struct msm_drm_private *priv = dev->dev_private;
332 struct msm_kms *kms = priv->kms;
333 BUG_ON(!kms);
334 return kms->funcs->irq(kms);
335}
336
337static void msm_irq_preinstall(struct drm_device *dev)
338{
339 struct msm_drm_private *priv = dev->dev_private;
340 struct msm_kms *kms = priv->kms;
341 BUG_ON(!kms);
342 kms->funcs->irq_preinstall(kms);
343}
344
345static int msm_irq_postinstall(struct drm_device *dev)
346{
347 struct msm_drm_private *priv = dev->dev_private;
348 struct msm_kms *kms = priv->kms;
349 BUG_ON(!kms);
350 return kms->funcs->irq_postinstall(kms);
351}
352
353static void msm_irq_uninstall(struct drm_device *dev)
354{
355 struct msm_drm_private *priv = dev->dev_private;
356 struct msm_kms *kms = priv->kms;
357 BUG_ON(!kms);
358 kms->funcs->irq_uninstall(kms);
359}
360
361static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
362{
363 struct msm_drm_private *priv = dev->dev_private;
364 struct msm_kms *kms = priv->kms;
365 if (!kms)
366 return -ENXIO;
367 DBG("dev=%p, crtc=%d", dev, crtc_id);
368 return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
369}
370
371static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
372{
373 struct msm_drm_private *priv = dev->dev_private;
374 struct msm_kms *kms = priv->kms;
375 if (!kms)
376 return;
377 DBG("dev=%p, crtc=%d", dev, crtc_id);
378 kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
379}
380
381/*
382 * DRM debugfs:
383 */
384
385#ifdef CONFIG_DEBUG_FS
386static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
387{
388 struct msm_drm_private *priv = dev->dev_private;
389 struct msm_gpu *gpu = priv->gpu;
390
391 if (gpu) {
392 seq_printf(m, "%s Status:\n", gpu->name);
393 gpu->funcs->show(gpu, m);
394 }
395
396 return 0;
397}
398
399static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
400{
401 struct msm_drm_private *priv = dev->dev_private;
402 struct msm_gpu *gpu = priv->gpu;
403
404 if (gpu) {
405 seq_printf(m, "Active Objects (%s):\n", gpu->name);
406 msm_gem_describe_objects(&gpu->active_list, m);
407 }
408
409 seq_printf(m, "Inactive Objects:\n");
410 msm_gem_describe_objects(&priv->inactive_list, m);
411
412 return 0;
413}
414
415static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
416{
417 return drm_mm_dump_table(m, dev->mm_private);
418}
419
420static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
421{
422 struct msm_drm_private *priv = dev->dev_private;
423 struct drm_framebuffer *fb, *fbdev_fb = NULL;
424
425 if (priv->fbdev) {
426 seq_printf(m, "fbcon ");
427 fbdev_fb = priv->fbdev->fb;
428 msm_framebuffer_describe(fbdev_fb, m);
429 }
430
431 mutex_lock(&dev->mode_config.fb_lock);
432 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
433 if (fb == fbdev_fb)
434 continue;
435
436 seq_printf(m, "user ");
437 msm_framebuffer_describe(fb, m);
438 }
439 mutex_unlock(&dev->mode_config.fb_lock);
440
441 return 0;
442}
443
444static int show_locked(struct seq_file *m, void *arg)
445{
446 struct drm_info_node *node = (struct drm_info_node *) m->private;
447 struct drm_device *dev = node->minor->dev;
448 int (*show)(struct drm_device *dev, struct seq_file *m) =
449 node->info_ent->data;
450 int ret;
451
452 ret = mutex_lock_interruptible(&dev->struct_mutex);
453 if (ret)
454 return ret;
455
456 ret = show(dev, m);
457
458 mutex_unlock(&dev->struct_mutex);
459
460 return ret;
461}
462
463static struct drm_info_list msm_debugfs_list[] = {
464 {"gpu", show_locked, 0, msm_gpu_show},
465 {"gem", show_locked, 0, msm_gem_show},
466 { "mm", show_locked, 0, msm_mm_show },
467 { "fb", show_locked, 0, msm_fb_show },
468};
469
470static int msm_debugfs_init(struct drm_minor *minor)
471{
472 struct drm_device *dev = minor->dev;
473 int ret;
474
475 ret = drm_debugfs_create_files(msm_debugfs_list,
476 ARRAY_SIZE(msm_debugfs_list),
477 minor->debugfs_root, minor);
478
479 if (ret) {
480 dev_err(dev->dev, "could not install msm_debugfs_list\n");
481 return ret;
482 }
483
484 return ret;
485}
486
487static void msm_debugfs_cleanup(struct drm_minor *minor)
488{
489 drm_debugfs_remove_files(msm_debugfs_list,
490 ARRAY_SIZE(msm_debugfs_list), minor);
491}
492#endif
493
494/*
495 * Fences:
496 */
497
498int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
499 struct timespec *timeout)
500{
501 struct msm_drm_private *priv = dev->dev_private;
502 unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
503 unsigned long start_jiffies = jiffies;
504 unsigned long remaining_jiffies;
505 int ret;
506
507 if (time_after(start_jiffies, timeout_jiffies))
508 remaining_jiffies = 0;
509 else
510 remaining_jiffies = timeout_jiffies - start_jiffies;
511
512 ret = wait_event_interruptible_timeout(priv->fence_event,
513 priv->completed_fence >= fence,
514 remaining_jiffies);
515 if (ret == 0) {
516 DBG("timeout waiting for fence: %u (completed: %u)",
517 fence, priv->completed_fence);
518 ret = -ETIMEDOUT;
519 } else if (ret != -ERESTARTSYS) {
520 ret = 0;
521 }
522
523 return ret;
524}
525
526/* call under struct_mutex */
527void msm_update_fence(struct drm_device *dev, uint32_t fence)
528{
529 struct msm_drm_private *priv = dev->dev_private;
530
531 if (fence > priv->completed_fence) {
532 priv->completed_fence = fence;
533 wake_up_all(&priv->fence_event);
534 }
535}
536
537/*
538 * DRM ioctls:
539 */
540
541static int msm_ioctl_get_param(struct drm_device *dev, void *data,
542 struct drm_file *file)
543{
544 struct msm_drm_private *priv = dev->dev_private;
545 struct drm_msm_param *args = data;
546 struct msm_gpu *gpu;
547
548 /* for now, we just have 3d pipe.. eventually this would need to
549 * be more clever to dispatch to appropriate gpu module:
550 */
551 if (args->pipe != MSM_PIPE_3D0)
552 return -EINVAL;
553
554 gpu = priv->gpu;
555
556 if (!gpu)
557 return -ENXIO;
558
559 return gpu->funcs->get_param(gpu, args->param, &args->value);
560}
561
562static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
563 struct drm_file *file)
564{
565 struct drm_msm_gem_new *args = data;
566 return msm_gem_new_handle(dev, file, args->size,
567 args->flags, &args->handle);
568}
569
570#define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
571
572static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
573 struct drm_file *file)
574{
575 struct drm_msm_gem_cpu_prep *args = data;
576 struct drm_gem_object *obj;
577 int ret;
578
579 obj = drm_gem_object_lookup(dev, file, args->handle);
580 if (!obj)
581 return -ENOENT;
582
583 ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout));
584
585 drm_gem_object_unreference_unlocked(obj);
586
587 return ret;
588}
589
590static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
591 struct drm_file *file)
592{
593 struct drm_msm_gem_cpu_fini *args = data;
594 struct drm_gem_object *obj;
595 int ret;
596
597 obj = drm_gem_object_lookup(dev, file, args->handle);
598 if (!obj)
599 return -ENOENT;
600
601 ret = msm_gem_cpu_fini(obj);
602
603 drm_gem_object_unreference_unlocked(obj);
604
605 return ret;
606}
607
608static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
609 struct drm_file *file)
610{
611 struct drm_msm_gem_info *args = data;
612 struct drm_gem_object *obj;
613 int ret = 0;
614
615 if (args->pad)
616 return -EINVAL;
617
618 obj = drm_gem_object_lookup(dev, file, args->handle);
619 if (!obj)
620 return -ENOENT;
621
622 args->offset = msm_gem_mmap_offset(obj);
623
624 drm_gem_object_unreference_unlocked(obj);
625
626 return ret;
627}
628
629static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
630 struct drm_file *file)
631{
632 struct drm_msm_wait_fence *args = data;
633 return msm_wait_fence_interruptable(dev, args->fence, &TS(args->timeout));
634}
635
636static const struct drm_ioctl_desc msm_ioctls[] = {
637 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
638 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
639 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
640 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
641 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
642 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH),
643 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH),
644};
645
646static const struct vm_operations_struct vm_ops = {
647 .fault = msm_gem_fault,
648 .open = drm_gem_vm_open,
649 .close = drm_gem_vm_close,
650};
651
652static const struct file_operations fops = {
653 .owner = THIS_MODULE,
654 .open = drm_open,
655 .release = drm_release,
656 .unlocked_ioctl = drm_ioctl,
657#ifdef CONFIG_COMPAT
658 .compat_ioctl = drm_compat_ioctl,
659#endif
660 .poll = drm_poll,
661 .read = drm_read,
662 .llseek = no_llseek,
663 .mmap = msm_gem_mmap,
664};
665
666static struct drm_driver msm_driver = {
667 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
668 .load = msm_load,
669 .unload = msm_unload,
670 .open = msm_open,
671 .preclose = msm_preclose,
672 .lastclose = msm_lastclose,
673 .irq_handler = msm_irq,
674 .irq_preinstall = msm_irq_preinstall,
675 .irq_postinstall = msm_irq_postinstall,
676 .irq_uninstall = msm_irq_uninstall,
677 .get_vblank_counter = drm_vblank_count,
678 .enable_vblank = msm_enable_vblank,
679 .disable_vblank = msm_disable_vblank,
680 .gem_free_object = msm_gem_free_object,
681 .gem_vm_ops = &vm_ops,
682 .dumb_create = msm_gem_dumb_create,
683 .dumb_map_offset = msm_gem_dumb_map_offset,
684 .dumb_destroy = msm_gem_dumb_destroy,
685#ifdef CONFIG_DEBUG_FS
686 .debugfs_init = msm_debugfs_init,
687 .debugfs_cleanup = msm_debugfs_cleanup,
688#endif
689 .ioctls = msm_ioctls,
690 .num_ioctls = DRM_MSM_NUM_IOCTLS,
691 .fops = &fops,
692 .name = "msm",
693 .desc = "MSM Snapdragon DRM",
694 .date = "20130625",
695 .major = 1,
696 .minor = 0,
697};
698
699#ifdef CONFIG_PM_SLEEP
700static int msm_pm_suspend(struct device *dev)
701{
702 struct drm_device *ddev = dev_get_drvdata(dev);
703
704 drm_kms_helper_poll_disable(ddev);
705
706 return 0;
707}
708
709static int msm_pm_resume(struct device *dev)
710{
711 struct drm_device *ddev = dev_get_drvdata(dev);
712
713 drm_kms_helper_poll_enable(ddev);
714
715 return 0;
716}
717#endif
718
719static const struct dev_pm_ops msm_pm_ops = {
720 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
721};
722
723/*
724 * Platform driver:
725 */
726
727static int msm_pdev_probe(struct platform_device *pdev)
728{
729 return drm_platform_init(&msm_driver, pdev);
730}
731
732static int msm_pdev_remove(struct platform_device *pdev)
733{
734 drm_platform_exit(&msm_driver, pdev);
735
736 return 0;
737}
738
739static const struct platform_device_id msm_id[] = {
740 { "mdp", 0 },
741 { }
742};
743
744static struct platform_driver msm_platform_driver = {
745 .probe = msm_pdev_probe,
746 .remove = msm_pdev_remove,
747 .driver = {
748 .owner = THIS_MODULE,
749 .name = "msm",
750 .pm = &msm_pm_ops,
751 },
752 .id_table = msm_id,
753};
754
755static int __init msm_drm_register(void)
756{
757 DBG("init");
758 hdmi_register();
759 a3xx_register();
760 return platform_driver_register(&msm_platform_driver);
761}
762
763static void __exit msm_drm_unregister(void)
764{
765 DBG("fini");
766 platform_driver_unregister(&msm_platform_driver);
767 hdmi_unregister();
768 a3xx_unregister();
769}
770
771module_init(msm_drm_register);
772module_exit(msm_drm_unregister);
773
774MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
775MODULE_DESCRIPTION("MSM DRM Driver");
776MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
new file mode 100644
index 000000000000..34c36b2911d9
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -0,0 +1,211 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_DRV_H__
19#define __MSM_DRV_H__
20
21#include <linux/kernel.h>
22#include <linux/clk.h>
23#include <linux/cpufreq.h>
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/pm.h>
27#include <linux/pm_runtime.h>
28#include <linux/slab.h>
29#include <linux/list.h>
30#include <linux/iommu.h>
31#include <linux/types.h>
32#include <asm/sizes.h>
33
34#ifndef CONFIG_OF
35#include <mach/board.h>
36#include <mach/socinfo.h>
37#include <mach/iommu_domains.h>
38#endif
39
40#include <drm/drmP.h>
41#include <drm/drm_crtc_helper.h>
42#include <drm/drm_fb_helper.h>
43#include <drm/msm_drm.h>
44
45struct msm_kms;
46struct msm_gpu;
47
48#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
49
50struct msm_file_private {
51 /* currently we don't do anything useful with this.. but when
52 * per-context address spaces are supported we'd keep track of
53 * the context's page-tables here.
54 */
55 int dummy;
56};
57
58struct msm_drm_private {
59
60 struct msm_kms *kms;
61
62 /* when we have more than one 'msm_gpu' these need to be an array: */
63 struct msm_gpu *gpu;
64 struct msm_file_private *lastctx;
65
66 struct drm_fb_helper *fbdev;
67
68 uint32_t next_fence, completed_fence;
69 wait_queue_head_t fence_event;
70
71 /* list of GEM objects: */
72 struct list_head inactive_list;
73
74 struct workqueue_struct *wq;
75
76 /* registered IOMMU domains: */
77 unsigned int num_iommus;
78 struct iommu_domain *iommus[NUM_DOMAINS];
79
80 unsigned int num_crtcs;
81 struct drm_crtc *crtcs[8];
82
83 unsigned int num_encoders;
84 struct drm_encoder *encoders[8];
85
86 unsigned int num_connectors;
87 struct drm_connector *connectors[8];
88};
89
90struct msm_format {
91 uint32_t pixel_format;
92};
93
94/* As there are different display controller blocks depending on the
95 * snapdragon version, the kms support is split out and the appropriate
96 * implementation is loaded at runtime. The kms module is responsible
97 * for constructing the appropriate planes/crtcs/encoders/connectors.
98 */
99struct msm_kms_funcs {
100 /* hw initialization: */
101 int (*hw_init)(struct msm_kms *kms);
102 /* irq handling: */
103 void (*irq_preinstall)(struct msm_kms *kms);
104 int (*irq_postinstall)(struct msm_kms *kms);
105 void (*irq_uninstall)(struct msm_kms *kms);
106 irqreturn_t (*irq)(struct msm_kms *kms);
107 int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
108 void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
109 /* misc: */
110 const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
111 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
112 struct drm_encoder *encoder);
113 /* cleanup: */
114 void (*preclose)(struct msm_kms *kms, struct drm_file *file);
115 void (*destroy)(struct msm_kms *kms);
116};
117
118struct msm_kms {
119 const struct msm_kms_funcs *funcs;
120};
121
122struct msm_kms *mdp4_kms_init(struct drm_device *dev);
123
124int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu);
125int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
126 const char **names, int cnt);
127
128int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
129 struct timespec *timeout);
130void msm_update_fence(struct drm_device *dev, uint32_t fence);
131
132int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
133 struct drm_file *file);
134
135int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
136int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
137uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
138int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
139 uint32_t *iova);
140int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
141void msm_gem_put_iova(struct drm_gem_object *obj, int id);
142int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
143 struct drm_mode_create_dumb *args);
144int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
145 uint32_t handle);
146int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
147 uint32_t handle, uint64_t *offset);
148void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
149void *msm_gem_vaddr(struct drm_gem_object *obj);
150int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
151 struct work_struct *work);
152void msm_gem_move_to_active(struct drm_gem_object *obj,
153 struct msm_gpu *gpu, uint32_t fence);
154void msm_gem_move_to_inactive(struct drm_gem_object *obj);
155int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
156 struct timespec *timeout);
157int msm_gem_cpu_fini(struct drm_gem_object *obj);
158void msm_gem_free_object(struct drm_gem_object *obj);
159int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
160 uint32_t size, uint32_t flags, uint32_t *handle);
161struct drm_gem_object *msm_gem_new(struct drm_device *dev,
162 uint32_t size, uint32_t flags);
163
164struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
165const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
166struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
167 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
168struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
169 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd);
170
171struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
172
173struct drm_connector *hdmi_connector_init(struct drm_device *dev,
174 struct drm_encoder *encoder);
175void __init hdmi_register(void);
176void __exit hdmi_unregister(void);
177
178#ifdef CONFIG_DEBUG_FS
179void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
180void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
181void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
182#endif
183
184void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
185 const char *dbgname);
186void msm_writel(u32 data, void __iomem *addr);
187u32 msm_readl(const void __iomem *addr);
188
189#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
190#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
191
192static inline int align_pitch(int width, int bpp)
193{
194 int bytespp = (bpp + 7) / 8;
195 /* adreno needs pitch aligned to 32 pixels: */
196 return bytespp * ALIGN(width, 32);
197}
198
199/* for the generated headers: */
200#define INVALID_IDX(idx) ({BUG(); 0;})
201#define fui(x) ({BUG(); 0;})
202#define util_float_to_half(x) ({BUG(); 0;})
203
204
205#define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
206
207/* for conditionally setting boolean flag(s): */
208#define COND(bool, val) ((bool) ? (val) : 0)
209
210
211#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
new file mode 100644
index 000000000000..0286c0eeb10c
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -0,0 +1,202 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19
20#include "drm_crtc.h"
21#include "drm_crtc_helper.h"
22
23struct msm_framebuffer {
24 struct drm_framebuffer base;
25 const struct msm_format *format;
26 struct drm_gem_object *planes[2];
27};
28#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
29
30
31static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
32 struct drm_file *file_priv,
33 unsigned int *handle)
34{
35 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
36 return drm_gem_handle_create(file_priv,
37 msm_fb->planes[0], handle);
38}
39
40static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
41{
42 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
43 int i, n = drm_format_num_planes(fb->pixel_format);
44
45 DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
46
47 drm_framebuffer_cleanup(fb);
48
49 for (i = 0; i < n; i++) {
50 struct drm_gem_object *bo = msm_fb->planes[i];
51 if (bo)
52 drm_gem_object_unreference_unlocked(bo);
53 }
54
55 kfree(msm_fb);
56}
57
58static int msm_framebuffer_dirty(struct drm_framebuffer *fb,
59 struct drm_file *file_priv, unsigned flags, unsigned color,
60 struct drm_clip_rect *clips, unsigned num_clips)
61{
62 return 0;
63}
64
65static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
66 .create_handle = msm_framebuffer_create_handle,
67 .destroy = msm_framebuffer_destroy,
68 .dirty = msm_framebuffer_dirty,
69};
70
71#ifdef CONFIG_DEBUG_FS
72void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
73{
74 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
75 int i, n = drm_format_num_planes(fb->pixel_format);
76
77 seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
78 fb->width, fb->height, (char *)&fb->pixel_format,
79 fb->refcount.refcount.counter, fb->base.id);
80
81 for (i = 0; i < n; i++) {
82 seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
83 i, fb->offsets[i], fb->pitches[i]);
84 msm_gem_describe(msm_fb->planes[i], m);
85 }
86}
87#endif
88
89struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
90{
91 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
92 return msm_fb->planes[plane];
93}
94
95const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
96{
97 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
98 return msm_fb->format;
99}
100
101struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
102 struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
103{
104 struct drm_gem_object *bos[4] = {0};
105 struct drm_framebuffer *fb;
106 int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
107
108 for (i = 0; i < n; i++) {
109 bos[i] = drm_gem_object_lookup(dev, file,
110 mode_cmd->handles[i]);
111 if (!bos[i]) {
112 ret = -ENXIO;
113 goto out_unref;
114 }
115 }
116
117 fb = msm_framebuffer_init(dev, mode_cmd, bos);
118 if (IS_ERR(fb)) {
119 ret = PTR_ERR(fb);
120 goto out_unref;
121 }
122
123 return fb;
124
125out_unref:
126 for (i = 0; i < n; i++)
127 drm_gem_object_unreference_unlocked(bos[i]);
128 return ERR_PTR(ret);
129}
130
131struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
132 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
133{
134 struct msm_drm_private *priv = dev->dev_private;
135 struct msm_kms *kms = priv->kms;
136 struct msm_framebuffer *msm_fb;
137 struct drm_framebuffer *fb = NULL;
138 const struct msm_format *format;
139 int ret, i, n;
140 unsigned int hsub, vsub;
141
142 DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
143 dev, mode_cmd, mode_cmd->width, mode_cmd->height,
144 (char *)&mode_cmd->pixel_format);
145
146 n = drm_format_num_planes(mode_cmd->pixel_format);
147 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
148 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
149
150 format = kms->funcs->get_format(kms, mode_cmd->pixel_format);
151 if (!format) {
152 dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
153 (char *)&mode_cmd->pixel_format);
154 ret = -EINVAL;
155 goto fail;
156 }
157
158 msm_fb = kzalloc(sizeof(*msm_fb), GFP_KERNEL);
159 if (!msm_fb) {
160 ret = -ENOMEM;
161 goto fail;
162 }
163
164 fb = &msm_fb->base;
165
166 msm_fb->format = format;
167
168 for (i = 0; i < n; i++) {
169 unsigned int width = mode_cmd->width / (i ? hsub : 1);
170 unsigned int height = mode_cmd->height / (i ? vsub : 1);
171 unsigned int min_size;
172
173 min_size = (height - 1) * mode_cmd->pitches[i]
174 + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
175 + mode_cmd->offsets[i];
176
177 if (bos[i]->size < min_size) {
178 ret = -EINVAL;
179 goto fail;
180 }
181
182 msm_fb->planes[i] = bos[i];
183 }
184
185 drm_helper_mode_fill_fb_struct(fb, mode_cmd);
186
187 ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
188 if (ret) {
189 dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
190 goto fail;
191 }
192
193 DBG("create: FB ID: %d (%p)", fb->base.id, fb);
194
195 return fb;
196
197fail:
198 if (fb)
199 msm_framebuffer_destroy(fb);
200
201 return ERR_PTR(ret);
202}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
new file mode 100644
index 000000000000..6c6d7d4c9b4e
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -0,0 +1,258 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19
20#include "drm_crtc.h"
21#include "drm_fb_helper.h"
22
23/*
24 * fbdev funcs, to implement legacy fbdev interface on top of drm driver
25 */
26
27#define to_msm_fbdev(x) container_of(x, struct msm_fbdev, base)
28
29struct msm_fbdev {
30 struct drm_fb_helper base;
31 struct drm_framebuffer *fb;
32 struct drm_gem_object *bo;
33};
34
35static struct fb_ops msm_fb_ops = {
36 .owner = THIS_MODULE,
37
38 /* Note: to properly handle manual update displays, we wrap the
39 * basic fbdev ops which write to the framebuffer
40 */
41 .fb_read = fb_sys_read,
42 .fb_write = fb_sys_write,
43 .fb_fillrect = sys_fillrect,
44 .fb_copyarea = sys_copyarea,
45 .fb_imageblit = sys_imageblit,
46
47 .fb_check_var = drm_fb_helper_check_var,
48 .fb_set_par = drm_fb_helper_set_par,
49 .fb_pan_display = drm_fb_helper_pan_display,
50 .fb_blank = drm_fb_helper_blank,
51 .fb_setcmap = drm_fb_helper_setcmap,
52};
53
54static int msm_fbdev_create(struct drm_fb_helper *helper,
55 struct drm_fb_helper_surface_size *sizes)
56{
57 struct msm_fbdev *fbdev = to_msm_fbdev(helper);
58 struct drm_device *dev = helper->dev;
59 struct drm_framebuffer *fb = NULL;
60 struct fb_info *fbi = NULL;
61 struct drm_mode_fb_cmd2 mode_cmd = {0};
62 dma_addr_t paddr;
63 int ret, size;
64
65 /* only doing ARGB32 since this is what is needed to alpha-blend
66 * with video overlays:
67 */
68 sizes->surface_bpp = 32;
69 sizes->surface_depth = 32;
70
71 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
72 sizes->surface_height, sizes->surface_bpp,
73 sizes->fb_width, sizes->fb_height);
74
75 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
76 sizes->surface_depth);
77
78 mode_cmd.width = sizes->surface_width;
79 mode_cmd.height = sizes->surface_height;
80
81 mode_cmd.pitches[0] = align_pitch(
82 mode_cmd.width, sizes->surface_bpp);
83
84 /* allocate backing bo */
85 size = mode_cmd.pitches[0] * mode_cmd.height;
86 DBG("allocating %d bytes for fb %d", size, dev->primary->index);
87 mutex_lock(&dev->struct_mutex);
88 fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
89 mutex_unlock(&dev->struct_mutex);
90 if (IS_ERR(fbdev->bo)) {
91 ret = PTR_ERR(fbdev->bo);
92 fbdev->bo = NULL;
93 dev_err(dev->dev, "failed to allocate buffer object: %d\n", ret);
94 goto fail;
95 }
96
97 fb = msm_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
98 if (IS_ERR(fb)) {
99 dev_err(dev->dev, "failed to allocate fb\n");
100 /* note: if fb creation failed, we can't rely on fb destroy
101 * to unref the bo:
102 */
103 drm_gem_object_unreference(fbdev->bo);
104 ret = PTR_ERR(fb);
105 goto fail;
106 }
107
108 mutex_lock(&dev->struct_mutex);
109
110 /* TODO implement our own fb_mmap so we don't need this: */
111 msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
112
113 fbi = framebuffer_alloc(0, dev->dev);
114 if (!fbi) {
115 dev_err(dev->dev, "failed to allocate fb info\n");
116 ret = -ENOMEM;
117 goto fail_unlock;
118 }
119
120 DBG("fbi=%p, dev=%p", fbi, dev);
121
122 fbdev->fb = fb;
123 helper->fb = fb;
124 helper->fbdev = fbi;
125
126 fbi->par = helper;
127 fbi->flags = FBINFO_DEFAULT;
128 fbi->fbops = &msm_fb_ops;
129
130 strcpy(fbi->fix.id, "msm");
131
132 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
133 if (ret) {
134 ret = -ENOMEM;
135 goto fail_unlock;
136 }
137
138 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
139 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
140
141 dev->mode_config.fb_base = paddr;
142
143 fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
144 fbi->screen_size = fbdev->bo->size;
145 fbi->fix.smem_start = paddr;
146 fbi->fix.smem_len = fbdev->bo->size;
147
148 DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
149 DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
150
151 mutex_unlock(&dev->struct_mutex);
152
153 return 0;
154
155fail_unlock:
156 mutex_unlock(&dev->struct_mutex);
157fail:
158
159 if (ret) {
160 if (fbi)
161 framebuffer_release(fbi);
162 if (fb) {
163 drm_framebuffer_unregister_private(fb);
164 drm_framebuffer_remove(fb);
165 }
166 }
167
168 return ret;
169}
170
171static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
172 u16 red, u16 green, u16 blue, int regno)
173{
174 DBG("fbdev: set gamma");
175}
176
177static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
178 u16 *red, u16 *green, u16 *blue, int regno)
179{
180 DBG("fbdev: get gamma");
181}
182
183static struct drm_fb_helper_funcs msm_fb_helper_funcs = {
184 .gamma_set = msm_crtc_fb_gamma_set,
185 .gamma_get = msm_crtc_fb_gamma_get,
186 .fb_probe = msm_fbdev_create,
187};
188
189/* initialize fbdev helper */
190struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
191{
192 struct msm_drm_private *priv = dev->dev_private;
193 struct msm_fbdev *fbdev = NULL;
194 struct drm_fb_helper *helper;
195 int ret = 0;
196
197 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
198 if (!fbdev)
199 goto fail;
200
201 helper = &fbdev->base;
202
203 helper->funcs = &msm_fb_helper_funcs;
204
205 ret = drm_fb_helper_init(dev, helper,
206 priv->num_crtcs, priv->num_connectors);
207 if (ret) {
208 dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
209 goto fail;
210 }
211
212 drm_fb_helper_single_add_all_connectors(helper);
213
214 /* disable all the possible outputs/crtcs before entering KMS mode */
215 drm_helper_disable_unused_functions(dev);
216
217 drm_fb_helper_initial_config(helper, 32);
218
219 priv->fbdev = helper;
220
221 return helper;
222
223fail:
224 kfree(fbdev);
225 return NULL;
226}
227
228void msm_fbdev_free(struct drm_device *dev)
229{
230 struct msm_drm_private *priv = dev->dev_private;
231 struct drm_fb_helper *helper = priv->fbdev;
232 struct msm_fbdev *fbdev;
233 struct fb_info *fbi;
234
235 DBG();
236
237 fbi = helper->fbdev;
238
239 /* only cleanup framebuffer if it is present */
240 if (fbi) {
241 unregister_framebuffer(fbi);
242 framebuffer_release(fbi);
243 }
244
245 drm_fb_helper_fini(helper);
246
247 fbdev = to_msm_fbdev(priv->fbdev);
248
249 /* this will free the backing object */
250 if (fbdev->fb) {
251 drm_framebuffer_unregister_private(fbdev->fb);
252 drm_framebuffer_remove(fbdev->fb);
253 }
254
255 kfree(fbdev);
256
257 priv->fbdev = NULL;
258}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
new file mode 100644
index 000000000000..6b5a6c8c7658
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -0,0 +1,597 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
20
21#include "msm_drv.h"
22#include "msm_gem.h"
23#include "msm_gpu.h"
24
25
26/* called with dev->struct_mutex held */
27static struct page **get_pages(struct drm_gem_object *obj)
28{
29 struct msm_gem_object *msm_obj = to_msm_bo(obj);
30
31 if (!msm_obj->pages) {
32 struct drm_device *dev = obj->dev;
33 struct page **p = drm_gem_get_pages(obj, 0);
34 int npages = obj->size >> PAGE_SHIFT;
35
36 if (IS_ERR(p)) {
37 dev_err(dev->dev, "could not get pages: %ld\n",
38 PTR_ERR(p));
39 return p;
40 }
41
42 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
43 if (!msm_obj->sgt) {
44 dev_err(dev->dev, "failed to allocate sgt\n");
45 return ERR_PTR(-ENOMEM);
46 }
47
48 msm_obj->pages = p;
49
50 /* For non-cached buffers, ensure the new pages are clean
51 * because display controller, GPU, etc. are not coherent:
52 */
53 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
54 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
55 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
56 }
57
58 return msm_obj->pages;
59}
60
61static void put_pages(struct drm_gem_object *obj)
62{
63 struct msm_gem_object *msm_obj = to_msm_bo(obj);
64
65 if (msm_obj->pages) {
66 /* For non-cached buffers, ensure the new pages are clean
67 * because display controller, GPU, etc. are not coherent:
68 */
69 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
70 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
71 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
72 sg_free_table(msm_obj->sgt);
73 kfree(msm_obj->sgt);
74
75 drm_gem_put_pages(obj, msm_obj->pages, true, false);
76 msm_obj->pages = NULL;
77 }
78}
79
80int msm_gem_mmap_obj(struct drm_gem_object *obj,
81 struct vm_area_struct *vma)
82{
83 struct msm_gem_object *msm_obj = to_msm_bo(obj);
84
85 vma->vm_flags &= ~VM_PFNMAP;
86 vma->vm_flags |= VM_MIXEDMAP;
87
88 if (msm_obj->flags & MSM_BO_WC) {
89 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
90 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
91 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
92 } else {
93 /*
94 * Shunt off cached objs to shmem file so they have their own
95 * address_space (so unmap_mapping_range does what we want,
96 * in particular in the case of mmap'd dmabufs)
97 */
98 fput(vma->vm_file);
99 get_file(obj->filp);
100 vma->vm_pgoff = 0;
101 vma->vm_file = obj->filp;
102
103 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
104 }
105
106 return 0;
107}
108
109int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
110{
111 int ret;
112
113 ret = drm_gem_mmap(filp, vma);
114 if (ret) {
115 DBG("mmap failed: %d", ret);
116 return ret;
117 }
118
119 return msm_gem_mmap_obj(vma->vm_private_data, vma);
120}
121
122int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
123{
124 struct drm_gem_object *obj = vma->vm_private_data;
125 struct msm_gem_object *msm_obj = to_msm_bo(obj);
126 struct drm_device *dev = obj->dev;
127 struct page **pages;
128 unsigned long pfn;
129 pgoff_t pgoff;
130 int ret;
131
132 /* Make sure we don't parallel update on a fault, nor move or remove
133 * something from beneath our feet
134 */
135 ret = mutex_lock_interruptible(&dev->struct_mutex);
136 if (ret)
137 goto out;
138
139 /* make sure we have pages attached now */
140 pages = get_pages(obj);
141 if (IS_ERR(pages)) {
142 ret = PTR_ERR(pages);
143 goto out_unlock;
144 }
145
146 /* We don't use vmf->pgoff since that has the fake offset: */
147 pgoff = ((unsigned long)vmf->virtual_address -
148 vma->vm_start) >> PAGE_SHIFT;
149
150 pfn = page_to_pfn(msm_obj->pages[pgoff]);
151
152 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
153 pfn, pfn << PAGE_SHIFT);
154
155 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
156
157out_unlock:
158 mutex_unlock(&dev->struct_mutex);
159out:
160 switch (ret) {
161 case -EAGAIN:
162 set_need_resched();
163 case 0:
164 case -ERESTARTSYS:
165 case -EINTR:
166 return VM_FAULT_NOPAGE;
167 case -ENOMEM:
168 return VM_FAULT_OOM;
169 default:
170 return VM_FAULT_SIGBUS;
171 }
172}
173
174/** get mmap offset */
175static uint64_t mmap_offset(struct drm_gem_object *obj)
176{
177 struct drm_device *dev = obj->dev;
178 int ret;
179
180 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
181
182 /* Make it mmapable */
183 ret = drm_gem_create_mmap_offset(obj);
184
185 if (ret) {
186 dev_err(dev->dev, "could not allocate mmap offset\n");
187 return 0;
188 }
189
190 return drm_vma_node_offset_addr(&obj->vma_node);
191}
192
193uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
194{
195 uint64_t offset;
196 mutex_lock(&obj->dev->struct_mutex);
197 offset = mmap_offset(obj);
198 mutex_unlock(&obj->dev->struct_mutex);
199 return offset;
200}
201
202/* helpers for dealing w/ iommu: */
203static int map_range(struct iommu_domain *domain, unsigned int iova,
204 struct sg_table *sgt, unsigned int len, int prot)
205{
206 struct scatterlist *sg;
207 unsigned int da = iova;
208 unsigned int i, j;
209 int ret;
210
211 if (!domain || !sgt)
212 return -EINVAL;
213
214 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
215 u32 pa = sg_phys(sg) - sg->offset;
216 size_t bytes = sg->length + sg->offset;
217
218 VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
219
220 ret = iommu_map(domain, da, pa, bytes, prot);
221 if (ret)
222 goto fail;
223
224 da += bytes;
225 }
226
227 return 0;
228
229fail:
230 da = iova;
231
232 for_each_sg(sgt->sgl, sg, i, j) {
233 size_t bytes = sg->length + sg->offset;
234 iommu_unmap(domain, da, bytes);
235 da += bytes;
236 }
237 return ret;
238}
239
240static void unmap_range(struct iommu_domain *domain, unsigned int iova,
241 struct sg_table *sgt, unsigned int len)
242{
243 struct scatterlist *sg;
244 unsigned int da = iova;
245 int i;
246
247 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
248 size_t bytes = sg->length + sg->offset;
249 size_t unmapped;
250
251 unmapped = iommu_unmap(domain, da, bytes);
252 if (unmapped < bytes)
253 break;
254
255 VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
256
257 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
258
259 da += bytes;
260 }
261}
262
263/* should be called under struct_mutex.. although it can be called
264 * from atomic context without struct_mutex to acquire an extra
265 * iova ref if you know one is already held.
266 *
267 * That means when I do eventually need to add support for unpinning
268 * the refcnt counter needs to be atomic_t.
269 */
270int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
271 uint32_t *iova)
272{
273 struct msm_gem_object *msm_obj = to_msm_bo(obj);
274 int ret = 0;
275
276 if (!msm_obj->domain[id].iova) {
277 struct msm_drm_private *priv = obj->dev->dev_private;
278 uint32_t offset = (uint32_t)mmap_offset(obj);
279 struct page **pages;
280 pages = get_pages(obj);
281 if (IS_ERR(pages))
282 return PTR_ERR(pages);
283 // XXX ideally we would not map buffers writable when not needed...
284 ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
285 obj->size, IOMMU_READ | IOMMU_WRITE);
286 msm_obj->domain[id].iova = offset;
287 }
288
289 if (!ret)
290 *iova = msm_obj->domain[id].iova;
291
292 return ret;
293}
294
295int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
296{
297 int ret;
298 mutex_lock(&obj->dev->struct_mutex);
299 ret = msm_gem_get_iova_locked(obj, id, iova);
300 mutex_unlock(&obj->dev->struct_mutex);
301 return ret;
302}
303
304void msm_gem_put_iova(struct drm_gem_object *obj, int id)
305{
306 // XXX TODO ..
307 // NOTE: probably don't need a _locked() version.. we wouldn't
308 // normally unmap here, but instead just mark that it could be
309 // unmapped (if the iova refcnt drops to zero), but then later
310 // if another _get_iova_locked() fails we can start unmapping
311 // things that are no longer needed..
312}
313
314int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
315 struct drm_mode_create_dumb *args)
316{
317 args->pitch = align_pitch(args->width, args->bpp);
318 args->size = PAGE_ALIGN(args->pitch * args->height);
319 return msm_gem_new_handle(dev, file, args->size,
320 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
321}
322
323int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
324 uint32_t handle)
325{
326 /* No special work needed, drop the reference and see what falls out */
327 return drm_gem_handle_delete(file, handle);
328}
329
330int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
331 uint32_t handle, uint64_t *offset)
332{
333 struct drm_gem_object *obj;
334 int ret = 0;
335
336 /* GEM does all our handle to object mapping */
337 obj = drm_gem_object_lookup(dev, file, handle);
338 if (obj == NULL) {
339 ret = -ENOENT;
340 goto fail;
341 }
342
343 *offset = msm_gem_mmap_offset(obj);
344
345 drm_gem_object_unreference_unlocked(obj);
346
347fail:
348 return ret;
349}
350
351void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
352{
353 struct msm_gem_object *msm_obj = to_msm_bo(obj);
354 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
355 if (!msm_obj->vaddr) {
356 struct page **pages = get_pages(obj);
357 if (IS_ERR(pages))
358 return ERR_CAST(pages);
359 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
360 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
361 }
362 return msm_obj->vaddr;
363}
364
365void *msm_gem_vaddr(struct drm_gem_object *obj)
366{
367 void *ret;
368 mutex_lock(&obj->dev->struct_mutex);
369 ret = msm_gem_vaddr_locked(obj);
370 mutex_unlock(&obj->dev->struct_mutex);
371 return ret;
372}
373
374int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
375 struct work_struct *work)
376{
377 struct drm_device *dev = obj->dev;
378 struct msm_drm_private *priv = dev->dev_private;
379 struct msm_gem_object *msm_obj = to_msm_bo(obj);
380 int ret = 0;
381
382 mutex_lock(&dev->struct_mutex);
383 if (!list_empty(&work->entry)) {
384 ret = -EINVAL;
385 } else if (is_active(msm_obj)) {
386 list_add_tail(&work->entry, &msm_obj->inactive_work);
387 } else {
388 queue_work(priv->wq, work);
389 }
390 mutex_unlock(&dev->struct_mutex);
391
392 return ret;
393}
394
395void msm_gem_move_to_active(struct drm_gem_object *obj,
396 struct msm_gpu *gpu, uint32_t fence)
397{
398 struct msm_gem_object *msm_obj = to_msm_bo(obj);
399 msm_obj->gpu = gpu;
400 msm_obj->fence = fence;
401 list_del_init(&msm_obj->mm_list);
402 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
403}
404
405void msm_gem_move_to_inactive(struct drm_gem_object *obj)
406{
407 struct drm_device *dev = obj->dev;
408 struct msm_drm_private *priv = dev->dev_private;
409 struct msm_gem_object *msm_obj = to_msm_bo(obj);
410
411 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
412
413 msm_obj->gpu = NULL;
414 msm_obj->fence = 0;
415 list_del_init(&msm_obj->mm_list);
416 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
417
418 while (!list_empty(&msm_obj->inactive_work)) {
419 struct work_struct *work;
420
421 work = list_first_entry(&msm_obj->inactive_work,
422 struct work_struct, entry);
423
424 list_del_init(&work->entry);
425 queue_work(priv->wq, work);
426 }
427}
428
429int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
430 struct timespec *timeout)
431{
432 struct drm_device *dev = obj->dev;
433 struct msm_gem_object *msm_obj = to_msm_bo(obj);
434 int ret = 0;
435
436 if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC))
437 ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout);
438
439 /* TODO cache maintenance */
440
441 return ret;
442}
443
444int msm_gem_cpu_fini(struct drm_gem_object *obj)
445{
446 /* TODO cache maintenance */
447 return 0;
448}
449
450#ifdef CONFIG_DEBUG_FS
451void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
452{
453 struct drm_device *dev = obj->dev;
454 struct msm_gem_object *msm_obj = to_msm_bo(obj);
455 uint64_t off = drm_vma_node_start(&obj->vma_node);
456
457 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
458 seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n",
459 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
460 msm_obj->fence, obj->name, obj->refcount.refcount.counter,
461 off, msm_obj->vaddr, obj->size);
462}
463
464void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
465{
466 struct msm_gem_object *msm_obj;
467 int count = 0;
468 size_t size = 0;
469
470 list_for_each_entry(msm_obj, list, mm_list) {
471 struct drm_gem_object *obj = &msm_obj->base;
472 seq_printf(m, " ");
473 msm_gem_describe(obj, m);
474 count++;
475 size += obj->size;
476 }
477
478 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
479}
480#endif
481
482void msm_gem_free_object(struct drm_gem_object *obj)
483{
484 struct drm_device *dev = obj->dev;
485 struct msm_gem_object *msm_obj = to_msm_bo(obj);
486 int id;
487
488 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
489
490 /* object should not be on active list: */
491 WARN_ON(is_active(msm_obj));
492
493 list_del(&msm_obj->mm_list);
494
495 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
496 if (msm_obj->domain[id].iova) {
497 struct msm_drm_private *priv = obj->dev->dev_private;
498 uint32_t offset = (uint32_t)mmap_offset(obj);
499 unmap_range(priv->iommus[id], offset,
500 msm_obj->sgt, obj->size);
501 }
502 }
503
504 drm_gem_free_mmap_offset(obj);
505
506 if (msm_obj->vaddr)
507 vunmap(msm_obj->vaddr);
508
509 put_pages(obj);
510
511 if (msm_obj->resv == &msm_obj->_resv)
512 reservation_object_fini(msm_obj->resv);
513
514 drm_gem_object_release(obj);
515
516 kfree(msm_obj);
517}
518
519/* convenience method to construct a GEM buffer object, and userspace handle */
520int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
521 uint32_t size, uint32_t flags, uint32_t *handle)
522{
523 struct drm_gem_object *obj;
524 int ret;
525
526 ret = mutex_lock_interruptible(&dev->struct_mutex);
527 if (ret)
528 return ret;
529
530 obj = msm_gem_new(dev, size, flags);
531
532 mutex_unlock(&dev->struct_mutex);
533
534 if (IS_ERR(obj))
535 return PTR_ERR(obj);
536
537 ret = drm_gem_handle_create(file, obj, handle);
538
539 /* drop reference from allocate - handle holds it now */
540 drm_gem_object_unreference_unlocked(obj);
541
542 return ret;
543}
544
545struct drm_gem_object *msm_gem_new(struct drm_device *dev,
546 uint32_t size, uint32_t flags)
547{
548 struct msm_drm_private *priv = dev->dev_private;
549 struct msm_gem_object *msm_obj;
550 struct drm_gem_object *obj = NULL;
551 int ret;
552
553 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
554
555 size = PAGE_ALIGN(size);
556
557 switch (flags & MSM_BO_CACHE_MASK) {
558 case MSM_BO_UNCACHED:
559 case MSM_BO_CACHED:
560 case MSM_BO_WC:
561 break;
562 default:
563 dev_err(dev->dev, "invalid cache flag: %x\n",
564 (flags & MSM_BO_CACHE_MASK));
565 ret = -EINVAL;
566 goto fail;
567 }
568
569 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
570 if (!msm_obj) {
571 ret = -ENOMEM;
572 goto fail;
573 }
574
575 obj = &msm_obj->base;
576
577 ret = drm_gem_object_init(dev, obj, size);
578 if (ret)
579 goto fail;
580
581 msm_obj->flags = flags;
582
583 msm_obj->resv = &msm_obj->_resv;
584 reservation_object_init(msm_obj->resv);
585
586 INIT_LIST_HEAD(&msm_obj->submit_entry);
587 INIT_LIST_HEAD(&msm_obj->inactive_work);
588 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
589
590 return obj;
591
592fail:
593 if (obj)
594 drm_gem_object_unreference_unlocked(obj);
595
596 return ERR_PTR(ret);
597}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
new file mode 100644
index 000000000000..d746f13d283c
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_GEM_H__
19#define __MSM_GEM_H__
20
21#include <linux/reservation.h>
22#include "msm_drv.h"
23
24struct msm_gem_object {
25 struct drm_gem_object base;
26
27 uint32_t flags;
28
29 /* And object is either:
30 * inactive - on priv->inactive_list
31 * active - on one one of the gpu's active_list.. well, at
32 * least for now we don't have (I don't think) hw sync between
33 * 2d and 3d one devices which have both, meaning we need to
34 * block on submit if a bo is already on other ring
35 *
36 */
37 struct list_head mm_list;
38 struct msm_gpu *gpu; /* non-null if active */
39 uint32_t fence;
40
41 /* Transiently in the process of submit ioctl, objects associated
42 * with the submit are on submit->bo_list.. this only lasts for
43 * the duration of the ioctl, so one bo can never be on multiple
44 * submit lists.
45 */
46 struct list_head submit_entry;
47
48 /* work defered until bo is inactive: */
49 struct list_head inactive_work;
50
51 struct page **pages;
52 struct sg_table *sgt;
53 void *vaddr;
54
55 struct {
56 // XXX
57 uint32_t iova;
58 } domain[NUM_DOMAINS];
59
60 /* normally (resv == &_resv) except for imported bo's */
61 struct reservation_object *resv;
62 struct reservation_object _resv;
63};
64#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
65
66static inline bool is_active(struct msm_gem_object *msm_obj)
67{
68 return msm_obj->gpu != NULL;
69}
70
71#define MAX_CMDS 4
72
73/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
74 * associated with the cmdstream submission for synchronization (and
75 * make it easier to unwind when things go wrong, etc). This only
76 * lasts for the duration of the submit-ioctl.
77 */
78struct msm_gem_submit {
79 struct drm_device *dev;
80 struct msm_gpu *gpu;
81 struct list_head bo_list;
82 struct ww_acquire_ctx ticket;
83 uint32_t fence;
84 bool valid;
85 unsigned int nr_cmds;
86 unsigned int nr_bos;
87 struct {
88 uint32_t type;
89 uint32_t size; /* in dwords */
90 uint32_t iova;
91 } cmd[MAX_CMDS];
92 struct {
93 uint32_t flags;
94 struct msm_gem_object *obj;
95 uint32_t iova;
96 } bos[0];
97};
98
99#endif /* __MSM_GEM_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
new file mode 100644
index 000000000000..3e1ef3a00f60
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -0,0 +1,412 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_gpu.h"
20#include "msm_gem.h"
21
22/*
23 * Cmdstream submission:
24 */
25
26#define BO_INVALID_FLAGS ~(MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
27/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
28#define BO_VALID 0x8000
29#define BO_LOCKED 0x4000
30#define BO_PINNED 0x2000
31
32static inline void __user *to_user_ptr(u64 address)
33{
34 return (void __user *)(uintptr_t)address;
35}
36
37static struct msm_gem_submit *submit_create(struct drm_device *dev,
38 struct msm_gpu *gpu, int nr)
39{
40 struct msm_gem_submit *submit;
41 int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
42
43 submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
44 if (submit) {
45 submit->dev = dev;
46 submit->gpu = gpu;
47
48 /* initially, until copy_from_user() and bo lookup succeeds: */
49 submit->nr_bos = 0;
50 submit->nr_cmds = 0;
51
52 INIT_LIST_HEAD(&submit->bo_list);
53 ww_acquire_init(&submit->ticket, &reservation_ww_class);
54 }
55
56 return submit;
57}
58
59static int submit_lookup_objects(struct msm_gem_submit *submit,
60 struct drm_msm_gem_submit *args, struct drm_file *file)
61{
62 unsigned i;
63 int ret = 0;
64
65 spin_lock(&file->table_lock);
66
67 for (i = 0; i < args->nr_bos; i++) {
68 struct drm_msm_gem_submit_bo submit_bo;
69 struct drm_gem_object *obj;
70 struct msm_gem_object *msm_obj;
71 void __user *userptr =
72 to_user_ptr(args->bos + (i * sizeof(submit_bo)));
73
74 ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
75 if (ret) {
76 ret = -EFAULT;
77 goto out_unlock;
78 }
79
80 if (submit_bo.flags & BO_INVALID_FLAGS) {
81 DBG("invalid flags: %x", submit_bo.flags);
82 ret = -EINVAL;
83 goto out_unlock;
84 }
85
86 submit->bos[i].flags = submit_bo.flags;
87 /* in validate_objects() we figure out if this is true: */
88 submit->bos[i].iova = submit_bo.presumed;
89
90 /* normally use drm_gem_object_lookup(), but for bulk lookup
91 * all under single table_lock just hit object_idr directly:
92 */
93 obj = idr_find(&file->object_idr, submit_bo.handle);
94 if (!obj) {
95 DBG("invalid handle %u at index %u", submit_bo.handle, i);
96 ret = -EINVAL;
97 goto out_unlock;
98 }
99
100 msm_obj = to_msm_bo(obj);
101
102 if (!list_empty(&msm_obj->submit_entry)) {
103 DBG("handle %u at index %u already on submit list",
104 submit_bo.handle, i);
105 ret = -EINVAL;
106 goto out_unlock;
107 }
108
109 drm_gem_object_reference(obj);
110
111 submit->bos[i].obj = msm_obj;
112
113 list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
114 }
115
116out_unlock:
117 submit->nr_bos = i;
118 spin_unlock(&file->table_lock);
119
120 return ret;
121}
122
123static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
124{
125 struct msm_gem_object *msm_obj = submit->bos[i].obj;
126
127 if (submit->bos[i].flags & BO_PINNED)
128 msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
129
130 if (submit->bos[i].flags & BO_LOCKED)
131 ww_mutex_unlock(&msm_obj->resv->lock);
132
133 if (!(submit->bos[i].flags & BO_VALID))
134 submit->bos[i].iova = 0;
135
136 submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
137}
138
139/* This is where we make sure all the bo's are reserved and pin'd: */
140static int submit_validate_objects(struct msm_gem_submit *submit)
141{
142 int contended, slow_locked = -1, i, ret = 0;
143
144retry:
145 submit->valid = true;
146
147 for (i = 0; i < submit->nr_bos; i++) {
148 struct msm_gem_object *msm_obj = submit->bos[i].obj;
149 uint32_t iova;
150
151 if (slow_locked == i)
152 slow_locked = -1;
153
154 contended = i;
155
156 if (!(submit->bos[i].flags & BO_LOCKED)) {
157 ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
158 &submit->ticket);
159 if (ret)
160 goto fail;
161 submit->bos[i].flags |= BO_LOCKED;
162 }
163
164
165 /* if locking succeeded, pin bo: */
166 ret = msm_gem_get_iova(&msm_obj->base,
167 submit->gpu->id, &iova);
168
169 /* this would break the logic in the fail path.. there is no
170 * reason for this to happen, but just to be on the safe side
171 * let's notice if this starts happening in the future:
172 */
173 WARN_ON(ret == -EDEADLK);
174
175 if (ret)
176 goto fail;
177
178 submit->bos[i].flags |= BO_PINNED;
179
180 if (iova == submit->bos[i].iova) {
181 submit->bos[i].flags |= BO_VALID;
182 } else {
183 submit->bos[i].iova = iova;
184 submit->bos[i].flags &= ~BO_VALID;
185 submit->valid = false;
186 }
187 }
188
189 ww_acquire_done(&submit->ticket);
190
191 return 0;
192
193fail:
194 for (; i >= 0; i--)
195 submit_unlock_unpin_bo(submit, i);
196
197 if (slow_locked > 0)
198 submit_unlock_unpin_bo(submit, slow_locked);
199
200 if (ret == -EDEADLK) {
201 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
202 /* we lost out in a seqno race, lock and retry.. */
203 ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
204 &submit->ticket);
205 if (!ret) {
206 submit->bos[contended].flags |= BO_LOCKED;
207 slow_locked = contended;
208 goto retry;
209 }
210 }
211
212 return ret;
213}
214
215static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
216 struct msm_gem_object **obj, uint32_t *iova, bool *valid)
217{
218 if (idx >= submit->nr_bos) {
219 DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos);
220 return EINVAL;
221 }
222
223 if (obj)
224 *obj = submit->bos[idx].obj;
225 if (iova)
226 *iova = submit->bos[idx].iova;
227 if (valid)
228 *valid = !!(submit->bos[idx].flags & BO_VALID);
229
230 return 0;
231}
232
233/* process the reloc's and patch up the cmdstream as needed: */
234static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
235 uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
236{
237 uint32_t i, last_offset = 0;
238 uint32_t *ptr;
239 int ret;
240
241 if (offset % 4) {
242 DBG("non-aligned cmdstream buffer: %u", offset);
243 return -EINVAL;
244 }
245
246 /* For now, just map the entire thing. Eventually we probably
247 * to do it page-by-page, w/ kmap() if not vmap()d..
248 */
249 ptr = msm_gem_vaddr(&obj->base);
250
251 if (IS_ERR(ptr)) {
252 ret = PTR_ERR(ptr);
253 DBG("failed to map: %d", ret);
254 return ret;
255 }
256
257 for (i = 0; i < nr_relocs; i++) {
258 struct drm_msm_gem_submit_reloc submit_reloc;
259 void __user *userptr =
260 to_user_ptr(relocs + (i * sizeof(submit_reloc)));
261 uint32_t iova, off;
262 bool valid;
263
264 ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
265 if (ret)
266 return -EFAULT;
267
268 if (submit_reloc.submit_offset % 4) {
269 DBG("non-aligned reloc offset: %u",
270 submit_reloc.submit_offset);
271 return -EINVAL;
272 }
273
274 /* offset in dwords: */
275 off = submit_reloc.submit_offset / 4;
276
277 if ((off >= (obj->base.size / 4)) ||
278 (off < last_offset)) {
279 DBG("invalid offset %u at reloc %u", off, i);
280 return -EINVAL;
281 }
282
283 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
284 if (ret)
285 return ret;
286
287 if (valid)
288 continue;
289
290 iova += submit_reloc.reloc_offset;
291
292 if (submit_reloc.shift < 0)
293 iova >>= -submit_reloc.shift;
294 else
295 iova <<= submit_reloc.shift;
296
297 ptr[off] = iova | submit_reloc.or;
298
299 last_offset = off;
300 }
301
302 return 0;
303}
304
305static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
306{
307 unsigned i;
308
309 mutex_lock(&submit->dev->struct_mutex);
310 for (i = 0; i < submit->nr_bos; i++) {
311 struct msm_gem_object *msm_obj = submit->bos[i].obj;
312 submit_unlock_unpin_bo(submit, i);
313 list_del_init(&msm_obj->submit_entry);
314 drm_gem_object_unreference(&msm_obj->base);
315 }
316 mutex_unlock(&submit->dev->struct_mutex);
317
318 ww_acquire_fini(&submit->ticket);
319 kfree(submit);
320}
321
322int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
323 struct drm_file *file)
324{
325 struct msm_drm_private *priv = dev->dev_private;
326 struct drm_msm_gem_submit *args = data;
327 struct msm_file_private *ctx = file->driver_priv;
328 struct msm_gem_submit *submit;
329 struct msm_gpu *gpu;
330 unsigned i;
331 int ret;
332
333 /* for now, we just have 3d pipe.. eventually this would need to
334 * be more clever to dispatch to appropriate gpu module:
335 */
336 if (args->pipe != MSM_PIPE_3D0)
337 return -EINVAL;
338
339 gpu = priv->gpu;
340
341 if (args->nr_cmds > MAX_CMDS)
342 return -EINVAL;
343
344 submit = submit_create(dev, gpu, args->nr_bos);
345 if (!submit) {
346 ret = -ENOMEM;
347 goto out;
348 }
349
350 ret = submit_lookup_objects(submit, args, file);
351 if (ret)
352 goto out;
353
354 ret = submit_validate_objects(submit);
355 if (ret)
356 goto out;
357
358 for (i = 0; i < args->nr_cmds; i++) {
359 struct drm_msm_gem_submit_cmd submit_cmd;
360 void __user *userptr =
361 to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
362 struct msm_gem_object *msm_obj;
363 uint32_t iova;
364
365 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
366 if (ret) {
367 ret = -EFAULT;
368 goto out;
369 }
370
371 ret = submit_bo(submit, submit_cmd.submit_idx,
372 &msm_obj, &iova, NULL);
373 if (ret)
374 goto out;
375
376 if (submit_cmd.size % 4) {
377 DBG("non-aligned cmdstream buffer size: %u",
378 submit_cmd.size);
379 ret = -EINVAL;
380 goto out;
381 }
382
383 if (submit_cmd.size >= msm_obj->base.size) {
384 DBG("invalid cmdstream size: %u", submit_cmd.size);
385 ret = -EINVAL;
386 goto out;
387 }
388
389 submit->cmd[i].type = submit_cmd.type;
390 submit->cmd[i].size = submit_cmd.size / 4;
391 submit->cmd[i].iova = iova + submit_cmd.submit_offset;
392
393 if (submit->valid)
394 continue;
395
396 ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
397 submit_cmd.nr_relocs, submit_cmd.relocs);
398 if (ret)
399 goto out;
400 }
401
402 submit->nr_cmds = i;
403
404 ret = msm_gpu_submit(gpu, submit, ctx);
405
406 args->fence = submit->fence;
407
408out:
409 if (submit)
410 submit_cleanup(submit, !!ret);
411 return ret;
412}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
new file mode 100644
index 000000000000..e1e1ec9321ff
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -0,0 +1,463 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_gpu.h"
19#include "msm_gem.h"
20
21
22/*
23 * Power Management:
24 */
25
26#ifdef CONFIG_MSM_BUS_SCALING
27#include <mach/board.h>
28#include <mach/kgsl.h>
29static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
30{
31 struct drm_device *dev = gpu->dev;
32 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
33
34 if (!pdev) {
35 dev_err(dev->dev, "could not find dtv pdata\n");
36 return;
37 }
38
39 if (pdata->bus_scale_table) {
40 gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
41 DBG("bus scale client: %08x", gpu->bsc);
42 }
43}
44
45static void bs_fini(struct msm_gpu *gpu)
46{
47 if (gpu->bsc) {
48 msm_bus_scale_unregister_client(gpu->bsc);
49 gpu->bsc = 0;
50 }
51}
52
53static void bs_set(struct msm_gpu *gpu, int idx)
54{
55 if (gpu->bsc) {
56 DBG("set bus scaling: %d", idx);
57 msm_bus_scale_client_update_request(gpu->bsc, idx);
58 }
59}
60#else
61static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) {}
62static void bs_fini(struct msm_gpu *gpu) {}
63static void bs_set(struct msm_gpu *gpu, int idx) {}
64#endif
65
66static int enable_pwrrail(struct msm_gpu *gpu)
67{
68 struct drm_device *dev = gpu->dev;
69 int ret = 0;
70
71 if (gpu->gpu_reg) {
72 ret = regulator_enable(gpu->gpu_reg);
73 if (ret) {
74 dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
75 return ret;
76 }
77 }
78
79 if (gpu->gpu_cx) {
80 ret = regulator_enable(gpu->gpu_cx);
81 if (ret) {
82 dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
83 return ret;
84 }
85 }
86
87 return 0;
88}
89
90static int disable_pwrrail(struct msm_gpu *gpu)
91{
92 if (gpu->gpu_cx)
93 regulator_disable(gpu->gpu_cx);
94 if (gpu->gpu_reg)
95 regulator_disable(gpu->gpu_reg);
96 return 0;
97}
98
99static int enable_clk(struct msm_gpu *gpu)
100{
101 struct clk *rate_clk = NULL;
102 int i;
103
104 /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
105 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
106 if (gpu->grp_clks[i]) {
107 clk_prepare(gpu->grp_clks[i]);
108 rate_clk = gpu->grp_clks[i];
109 }
110 }
111
112 if (rate_clk && gpu->fast_rate)
113 clk_set_rate(rate_clk, gpu->fast_rate);
114
115 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
116 if (gpu->grp_clks[i])
117 clk_enable(gpu->grp_clks[i]);
118
119 return 0;
120}
121
122static int disable_clk(struct msm_gpu *gpu)
123{
124 struct clk *rate_clk = NULL;
125 int i;
126
127 /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
128 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
129 if (gpu->grp_clks[i]) {
130 clk_disable(gpu->grp_clks[i]);
131 rate_clk = gpu->grp_clks[i];
132 }
133 }
134
135 if (rate_clk && gpu->slow_rate)
136 clk_set_rate(rate_clk, gpu->slow_rate);
137
138 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
139 if (gpu->grp_clks[i])
140 clk_unprepare(gpu->grp_clks[i]);
141
142 return 0;
143}
144
145static int enable_axi(struct msm_gpu *gpu)
146{
147 if (gpu->ebi1_clk)
148 clk_prepare_enable(gpu->ebi1_clk);
149 if (gpu->bus_freq)
150 bs_set(gpu, gpu->bus_freq);
151 return 0;
152}
153
154static int disable_axi(struct msm_gpu *gpu)
155{
156 if (gpu->ebi1_clk)
157 clk_disable_unprepare(gpu->ebi1_clk);
158 if (gpu->bus_freq)
159 bs_set(gpu, 0);
160 return 0;
161}
162
163int msm_gpu_pm_resume(struct msm_gpu *gpu)
164{
165 int ret;
166
167 DBG("%s", gpu->name);
168
169 ret = enable_pwrrail(gpu);
170 if (ret)
171 return ret;
172
173 ret = enable_clk(gpu);
174 if (ret)
175 return ret;
176
177 ret = enable_axi(gpu);
178 if (ret)
179 return ret;
180
181 return 0;
182}
183
184int msm_gpu_pm_suspend(struct msm_gpu *gpu)
185{
186 int ret;
187
188 DBG("%s", gpu->name);
189
190 ret = disable_axi(gpu);
191 if (ret)
192 return ret;
193
194 ret = disable_clk(gpu);
195 if (ret)
196 return ret;
197
198 ret = disable_pwrrail(gpu);
199 if (ret)
200 return ret;
201
202 return 0;
203}
204
205/*
206 * Hangcheck detection for locked gpu:
207 */
208
209static void recover_worker(struct work_struct *work)
210{
211 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
212 struct drm_device *dev = gpu->dev;
213
214 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
215
216 mutex_lock(&dev->struct_mutex);
217 gpu->funcs->recover(gpu);
218 mutex_unlock(&dev->struct_mutex);
219
220 msm_gpu_retire(gpu);
221}
222
223static void hangcheck_timer_reset(struct msm_gpu *gpu)
224{
225 DBG("%s", gpu->name);
226 mod_timer(&gpu->hangcheck_timer,
227 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
228}
229
230static void hangcheck_handler(unsigned long data)
231{
232 struct msm_gpu *gpu = (struct msm_gpu *)data;
233 uint32_t fence = gpu->funcs->last_fence(gpu);
234
235 if (fence != gpu->hangcheck_fence) {
236 /* some progress has been made.. ya! */
237 gpu->hangcheck_fence = fence;
238 } else if (fence < gpu->submitted_fence) {
239 /* no progress and not done.. hung! */
240 struct msm_drm_private *priv = gpu->dev->dev_private;
241 gpu->hangcheck_fence = fence;
242 queue_work(priv->wq, &gpu->recover_work);
243 }
244
245 /* if still more pending work, reset the hangcheck timer: */
246 if (gpu->submitted_fence > gpu->hangcheck_fence)
247 hangcheck_timer_reset(gpu);
248}
249
250/*
251 * Cmdstream submission/retirement:
252 */
253
254static void retire_worker(struct work_struct *work)
255{
256 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
257 struct drm_device *dev = gpu->dev;
258 uint32_t fence = gpu->funcs->last_fence(gpu);
259
260 mutex_lock(&dev->struct_mutex);
261
262 while (!list_empty(&gpu->active_list)) {
263 struct msm_gem_object *obj;
264
265 obj = list_first_entry(&gpu->active_list,
266 struct msm_gem_object, mm_list);
267
268 if (obj->fence <= fence) {
269 /* move to inactive: */
270 msm_gem_move_to_inactive(&obj->base);
271 msm_gem_put_iova(&obj->base, gpu->id);
272 drm_gem_object_unreference(&obj->base);
273 } else {
274 break;
275 }
276 }
277
278 msm_update_fence(gpu->dev, fence);
279
280 mutex_unlock(&dev->struct_mutex);
281}
282
283/* call from irq handler to schedule work to retire bo's */
284void msm_gpu_retire(struct msm_gpu *gpu)
285{
286 struct msm_drm_private *priv = gpu->dev->dev_private;
287 queue_work(priv->wq, &gpu->retire_work);
288}
289
290/* add bo's to gpu's ring, and kick gpu: */
291int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
292 struct msm_file_private *ctx)
293{
294 struct drm_device *dev = gpu->dev;
295 struct msm_drm_private *priv = dev->dev_private;
296 int i, ret;
297
298 mutex_lock(&dev->struct_mutex);
299
300 submit->fence = ++priv->next_fence;
301
302 gpu->submitted_fence = submit->fence;
303
304 ret = gpu->funcs->submit(gpu, submit, ctx);
305 priv->lastctx = ctx;
306
307 for (i = 0; i < submit->nr_bos; i++) {
308 struct msm_gem_object *msm_obj = submit->bos[i].obj;
309
310 /* can't happen yet.. but when we add 2d support we'll have
311 * to deal w/ cross-ring synchronization:
312 */
313 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
314
315 if (!is_active(msm_obj)) {
316 uint32_t iova;
317
318 /* ring takes a reference to the bo and iova: */
319 drm_gem_object_reference(&msm_obj->base);
320 msm_gem_get_iova_locked(&msm_obj->base,
321 submit->gpu->id, &iova);
322 }
323
324 msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence);
325 }
326 hangcheck_timer_reset(gpu);
327 mutex_unlock(&dev->struct_mutex);
328
329 return ret;
330}
331
332/*
333 * Init/Cleanup:
334 */
335
336static irqreturn_t irq_handler(int irq, void *data)
337{
338 struct msm_gpu *gpu = data;
339 return gpu->funcs->irq(gpu);
340}
341
342static const char *clk_names[] = {
343 "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
344};
345
346int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
347 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
348 const char *name, const char *ioname, const char *irqname, int ringsz)
349{
350 int i, ret;
351
352 gpu->dev = drm;
353 gpu->funcs = funcs;
354 gpu->name = name;
355
356 INIT_LIST_HEAD(&gpu->active_list);
357 INIT_WORK(&gpu->retire_work, retire_worker);
358 INIT_WORK(&gpu->recover_work, recover_worker);
359
360 setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
361 (unsigned long)gpu);
362
363 BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
364
365 /* Map registers: */
366 gpu->mmio = msm_ioremap(pdev, ioname, name);
367 if (IS_ERR(gpu->mmio)) {
368 ret = PTR_ERR(gpu->mmio);
369 goto fail;
370 }
371
372 /* Get Interrupt: */
373 gpu->irq = platform_get_irq_byname(pdev, irqname);
374 if (gpu->irq < 0) {
375 ret = gpu->irq;
376 dev_err(drm->dev, "failed to get irq: %d\n", ret);
377 goto fail;
378 }
379
380 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
381 IRQF_TRIGGER_HIGH, gpu->name, gpu);
382 if (ret) {
383 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
384 goto fail;
385 }
386
387 /* Acquire clocks: */
388 for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
389 gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
390 DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
391 if (IS_ERR(gpu->grp_clks[i]))
392 gpu->grp_clks[i] = NULL;
393 }
394
395 gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
396 DBG("ebi1_clk: %p", gpu->ebi1_clk);
397 if (IS_ERR(gpu->ebi1_clk))
398 gpu->ebi1_clk = NULL;
399
400 /* Acquire regulators: */
401 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
402 DBG("gpu_reg: %p", gpu->gpu_reg);
403 if (IS_ERR(gpu->gpu_reg))
404 gpu->gpu_reg = NULL;
405
406 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
407 DBG("gpu_cx: %p", gpu->gpu_cx);
408 if (IS_ERR(gpu->gpu_cx))
409 gpu->gpu_cx = NULL;
410
411 /* Setup IOMMU.. eventually we will (I think) do this once per context
412 * and have separate page tables per context. For now, to keep things
413 * simple and to get something working, just use a single address space:
414 */
415 gpu->iommu = iommu_domain_alloc(&platform_bus_type);
416 if (!gpu->iommu) {
417 dev_err(drm->dev, "failed to allocate IOMMU\n");
418 ret = -ENOMEM;
419 goto fail;
420 }
421 gpu->id = msm_register_iommu(drm, gpu->iommu);
422
423 /* Create ringbuffer: */
424 gpu->rb = msm_ringbuffer_new(gpu, ringsz);
425 if (IS_ERR(gpu->rb)) {
426 ret = PTR_ERR(gpu->rb);
427 gpu->rb = NULL;
428 dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
429 goto fail;
430 }
431
432 ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
433 if (ret) {
434 gpu->rb_iova = 0;
435 dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
436 goto fail;
437 }
438
439 bs_init(gpu, pdev);
440
441 return 0;
442
443fail:
444 return ret;
445}
446
447void msm_gpu_cleanup(struct msm_gpu *gpu)
448{
449 DBG("%s", gpu->name);
450
451 WARN_ON(!list_empty(&gpu->active_list));
452
453 bs_fini(gpu);
454
455 if (gpu->rb) {
456 if (gpu->rb_iova)
457 msm_gem_put_iova(gpu->rb->bo, gpu->id);
458 msm_ringbuffer_destroy(gpu->rb);
459 }
460
461 if (gpu->iommu)
462 iommu_domain_free(gpu->iommu);
463}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
new file mode 100644
index 000000000000..8cd829e520bb
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -0,0 +1,124 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_GPU_H__
19#define __MSM_GPU_H__
20
21#include <linux/clk.h>
22#include <linux/regulator/consumer.h>
23
24#include "msm_drv.h"
25#include "msm_ringbuffer.h"
26
27struct msm_gem_submit;
28
29/* So far, with hardware that I've seen to date, we can have:
30 * + zero, one, or two z180 2d cores
31 * + a3xx or a2xx 3d core, which share a common CP (the firmware
32 * for the CP seems to implement some different PM4 packet types
33 * but the basics of cmdstream submission are the same)
34 *
35 * Which means that the eventual complete "class" hierarchy, once
36 * support for all past and present hw is in place, becomes:
37 * + msm_gpu
38 * + adreno_gpu
39 * + a3xx_gpu
40 * + a2xx_gpu
41 * + z180_gpu
42 */
43struct msm_gpu_funcs {
44 int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
45 int (*hw_init)(struct msm_gpu *gpu);
46 int (*pm_suspend)(struct msm_gpu *gpu);
47 int (*pm_resume)(struct msm_gpu *gpu);
48 int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
49 struct msm_file_private *ctx);
50 void (*flush)(struct msm_gpu *gpu);
51 void (*idle)(struct msm_gpu *gpu);
52 irqreturn_t (*irq)(struct msm_gpu *irq);
53 uint32_t (*last_fence)(struct msm_gpu *gpu);
54 void (*recover)(struct msm_gpu *gpu);
55 void (*destroy)(struct msm_gpu *gpu);
56#ifdef CONFIG_DEBUG_FS
57 /* show GPU status in debugfs: */
58 void (*show)(struct msm_gpu *gpu, struct seq_file *m);
59#endif
60};
61
62struct msm_gpu {
63 const char *name;
64 struct drm_device *dev;
65 const struct msm_gpu_funcs *funcs;
66
67 struct msm_ringbuffer *rb;
68 uint32_t rb_iova;
69
70 /* list of GEM active objects: */
71 struct list_head active_list;
72
73 uint32_t submitted_fence;
74
75 /* worker for handling active-list retiring: */
76 struct work_struct retire_work;
77
78 void __iomem *mmio;
79 int irq;
80
81 struct iommu_domain *iommu;
82 int id;
83
84 /* Power Control: */
85 struct regulator *gpu_reg, *gpu_cx;
86 struct clk *ebi1_clk, *grp_clks[5];
87 uint32_t fast_rate, slow_rate, bus_freq;
88 uint32_t bsc;
89
90 /* Hang Detction: */
91#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
92#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
93 struct timer_list hangcheck_timer;
94 uint32_t hangcheck_fence;
95 struct work_struct recover_work;
96};
97
98static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
99{
100 msm_writel(data, gpu->mmio + (reg << 2));
101}
102
103static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
104{
105 return msm_readl(gpu->mmio + (reg << 2));
106}
107
108int msm_gpu_pm_suspend(struct msm_gpu *gpu);
109int msm_gpu_pm_resume(struct msm_gpu *gpu);
110
111void msm_gpu_retire(struct msm_gpu *gpu);
112int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
113 struct msm_file_private *ctx);
114
115int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
116 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
117 const char *name, const char *ioname, const char *irqname, int ringsz);
118void msm_gpu_cleanup(struct msm_gpu *gpu);
119
120struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
121void __init a3xx_register(void);
122void __exit a3xx_unregister(void);
123
124#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
new file mode 100644
index 000000000000..8171537dd7d1
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -0,0 +1,61 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_ringbuffer.h"
19#include "msm_gpu.h"
20
21struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
22{
23 struct msm_ringbuffer *ring;
24 int ret;
25
26 size = ALIGN(size, 4); /* size should be dword aligned */
27
28 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
29 if (!ring) {
30 ret = -ENOMEM;
31 goto fail;
32 }
33
34 ring->gpu = gpu;
35 ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
36 if (IS_ERR(ring->bo)) {
37 ret = PTR_ERR(ring->bo);
38 ring->bo = NULL;
39 goto fail;
40 }
41
42 ring->start = msm_gem_vaddr_locked(ring->bo);
43 ring->end = ring->start + (size / 4);
44 ring->cur = ring->start;
45
46 ring->size = size;
47
48 return ring;
49
50fail:
51 if (ring)
52 msm_ringbuffer_destroy(ring);
53 return ERR_PTR(ret);
54}
55
56void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
57{
58 if (ring->bo)
59 drm_gem_object_unreference(ring->bo);
60 kfree(ring);
61}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
new file mode 100644
index 000000000000..6e0e1049fa4f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_RINGBUFFER_H__
19#define __MSM_RINGBUFFER_H__
20
21#include "msm_drv.h"
22
23struct msm_ringbuffer {
24 struct msm_gpu *gpu;
25 int size;
26 struct drm_gem_object *bo;
27 uint32_t *start, *end, *cur;
28};
29
30struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size);
31void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
32
33/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
34
35static inline void
36OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
37{
38 if (ring->cur == ring->end)
39 ring->cur = ring->start;
40 *(ring->cur++) = data;
41}
42
43#endif /* __MSM_RINGBUFFER_H__ */
diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild
index 119487e05e65..2d9a25daab05 100644
--- a/include/uapi/drm/Kbuild
+++ b/include/uapi/drm/Kbuild
@@ -16,3 +16,4 @@ header-y += sis_drm.h
16header-y += tegra_drm.h 16header-y += tegra_drm.h
17header-y += via_drm.h 17header-y += via_drm.h
18header-y += vmwgfx_drm.h 18header-y += vmwgfx_drm.h
19header-y += msm_drm.h
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
new file mode 100644
index 000000000000..d3c62074016d
--- /dev/null
+++ b/include/uapi/drm/msm_drm.h
@@ -0,0 +1,207 @@
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __MSM_DRM_H__
19#define __MSM_DRM_H__
20
21#include <stddef.h>
22#include <drm/drm.h>
23
24/* Please note that modifications to all structs defined here are
25 * subject to backwards-compatibility constraints:
26 * 1) Do not use pointers, use uint64_t instead for 32 bit / 64 bit
27 * user/kernel compatibility
28 * 2) Keep fields aligned to their size
29 * 3) Because of how drm_ioctl() works, we can add new fields at
30 * the end of an ioctl if some care is taken: drm_ioctl() will
31 * zero out the new fields at the tail of the ioctl, so a zero
32 * value should have a backwards compatible meaning. And for
33 * output params, userspace won't see the newly added output
34 * fields.. so that has to be somehow ok.
35 */
36
37#define MSM_PIPE_NONE 0x00
38#define MSM_PIPE_2D0 0x01
39#define MSM_PIPE_2D1 0x02
40#define MSM_PIPE_3D0 0x10
41
42/* timeouts are specified in clock-monotonic absolute times (to simplify
43 * restarting interrupted ioctls). The following struct is logically the
44 * same as 'struct timespec' but 32/64b ABI safe.
45 */
46struct drm_msm_timespec {
47 int64_t tv_sec; /* seconds */
48 int64_t tv_nsec; /* nanoseconds */
49};
50
51#define MSM_PARAM_GPU_ID 0x01
52#define MSM_PARAM_GMEM_SIZE 0x02
53
54struct drm_msm_param {
55 uint32_t pipe; /* in, MSM_PIPE_x */
56 uint32_t param; /* in, MSM_PARAM_x */
57 uint64_t value; /* out (get_param) or in (set_param) */
58};
59
60/*
61 * GEM buffers:
62 */
63
64#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
65#define MSM_BO_GPU_READONLY 0x00000002
66#define MSM_BO_CACHE_MASK 0x000f0000
67/* cache modes */
68#define MSM_BO_CACHED 0x00010000
69#define MSM_BO_WC 0x00020000
70#define MSM_BO_UNCACHED 0x00040000
71
72struct drm_msm_gem_new {
73 uint64_t size; /* in */
74 uint32_t flags; /* in, mask of MSM_BO_x */
75 uint32_t handle; /* out */
76};
77
78struct drm_msm_gem_info {
79 uint32_t handle; /* in */
80 uint32_t pad;
81 uint64_t offset; /* out, offset to pass to mmap() */
82};
83
84#define MSM_PREP_READ 0x01
85#define MSM_PREP_WRITE 0x02
86#define MSM_PREP_NOSYNC 0x04
87
88struct drm_msm_gem_cpu_prep {
89 uint32_t handle; /* in */
90 uint32_t op; /* in, mask of MSM_PREP_x */
91 struct drm_msm_timespec timeout; /* in */
92};
93
94struct drm_msm_gem_cpu_fini {
95 uint32_t handle; /* in */
96};
97
98/*
99 * Cmdstream Submission:
100 */
101
102/* The value written into the cmdstream is logically:
103 *
104 * ((relocbuf->gpuaddr + reloc_offset) << shift) | or
105 *
106 * When we have GPU's w/ >32bit ptrs, it should be possible to deal
107 * with this by emit'ing two reloc entries with appropriate shift
108 * values. Or a new MSM_SUBMIT_CMD_x type would also be an option.
109 *
110 * NOTE that reloc's must be sorted by order of increasing submit_offset,
111 * otherwise EINVAL.
112 */
113struct drm_msm_gem_submit_reloc {
114 uint32_t submit_offset; /* in, offset from submit_bo */
115 uint32_t or; /* in, value OR'd with result */
116 int32_t shift; /* in, amount of left shift (can be negative) */
117 uint32_t reloc_idx; /* in, index of reloc_bo buffer */
118 uint64_t reloc_offset; /* in, offset from start of reloc_bo */
119};
120
121/* submit-types:
122 * BUF - this cmd buffer is executed normally.
123 * IB_TARGET_BUF - this cmd buffer is an IB target. Reloc's are
124 * processed normally, but the kernel does not setup an IB to
125 * this buffer in the first-level ringbuffer
126 * CTX_RESTORE_BUF - only executed if there has been a GPU context
127 * switch since the last SUBMIT ioctl
128 */
129#define MSM_SUBMIT_CMD_BUF 0x0001
130#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002
131#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003
132struct drm_msm_gem_submit_cmd {
133 uint32_t type; /* in, one of MSM_SUBMIT_CMD_x */
134 uint32_t submit_idx; /* in, index of submit_bo cmdstream buffer */
135 uint32_t submit_offset; /* in, offset into submit_bo */
136 uint32_t size; /* in, cmdstream size */
137 uint32_t pad;
138 uint32_t nr_relocs; /* in, number of submit_reloc's */
139 uint64_t __user relocs; /* in, ptr to array of submit_reloc's */
140};
141
142/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
143 * cmdstream buffer(s) themselves or reloc entries) has one (and only
144 * one) entry in the submit->bos[] table.
145 *
146 * As a optimization, the current buffer (gpu virtual address) can be
147 * passed back through the 'presumed' field. If on a subsequent reloc,
148 * userspace passes back a 'presumed' address that is still valid,
149 * then patching the cmdstream for this entry is skipped. This can
150 * avoid kernel needing to map/access the cmdstream bo in the common
151 * case.
152 */
153#define MSM_SUBMIT_BO_READ 0x0001
154#define MSM_SUBMIT_BO_WRITE 0x0002
155struct drm_msm_gem_submit_bo {
156 uint32_t flags; /* in, mask of MSM_SUBMIT_BO_x */
157 uint32_t handle; /* in, GEM handle */
158 uint64_t presumed; /* in/out, presumed buffer address */
159};
160
161/* Each cmdstream submit consists of a table of buffers involved, and
162 * one or more cmdstream buffers. This allows for conditional execution
163 * (context-restore), and IB buffers needed for per tile/bin draw cmds.
164 */
165struct drm_msm_gem_submit {
166 uint32_t pipe; /* in, MSM_PIPE_x */
167 uint32_t fence; /* out */
168 uint32_t nr_bos; /* in, number of submit_bo's */
169 uint32_t nr_cmds; /* in, number of submit_cmd's */
170 uint64_t __user bos; /* in, ptr to array of submit_bo's */
171 uint64_t __user cmds; /* in, ptr to array of submit_cmd's */
172};
173
174/* The normal way to synchronize with the GPU is just to CPU_PREP on
175 * a buffer if you need to access it from the CPU (other cmdstream
176 * submission from same or other contexts, PAGE_FLIP ioctl, etc, all
177 * handle the required synchronization under the hood). This ioctl
178 * mainly just exists as a way to implement the gallium pipe_fence
179 * APIs without requiring a dummy bo to synchronize on.
180 */
181struct drm_msm_wait_fence {
182 uint32_t fence; /* in */
183 uint32_t pad;
184 struct drm_msm_timespec timeout; /* in */
185};
186
187#define DRM_MSM_GET_PARAM 0x00
188/* placeholder:
189#define DRM_MSM_SET_PARAM 0x01
190 */
191#define DRM_MSM_GEM_NEW 0x02
192#define DRM_MSM_GEM_INFO 0x03
193#define DRM_MSM_GEM_CPU_PREP 0x04
194#define DRM_MSM_GEM_CPU_FINI 0x05
195#define DRM_MSM_GEM_SUBMIT 0x06
196#define DRM_MSM_WAIT_FENCE 0x07
197#define DRM_MSM_NUM_IOCTLS 0x08
198
199#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
200#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
201#define DRM_IOCTL_MSM_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_INFO, struct drm_msm_gem_info)
202#define DRM_IOCTL_MSM_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_PREP, struct drm_msm_gem_cpu_prep)
203#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
204#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
205#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
206
207#endif /* __MSM_DRM_H__ */