aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig13
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile9
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h1793
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_escape.h89
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_overlay.h201
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h1346
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_types.h45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c229
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c735
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h511
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c516
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c742
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c521
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c213
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c295
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c872
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h102
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c516
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c634
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h57
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c1192
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c99
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--include/drm/Kbuild1
-rw-r--r--include/drm/ttm/ttm_object.h6
-rw-r--r--include/drm/vmwgfx_drm.h574
28 files changed, 11394 insertions, 1 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 470ef6779db3..39c5aa75b8f1 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_DRM_I830) += i830/
30obj-$(CONFIG_DRM_I915) += i915/ 30obj-$(CONFIG_DRM_I915) += i915/
31obj-$(CONFIG_DRM_SIS) += sis/ 31obj-$(CONFIG_DRM_SIS) += sis/
32obj-$(CONFIG_DRM_SAVAGE)+= savage/ 32obj-$(CONFIG_DRM_SAVAGE)+= savage/
33obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
33obj-$(CONFIG_DRM_VIA) +=via/ 34obj-$(CONFIG_DRM_VIA) +=via/
34obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ 35obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
35obj-y += i2c/ 36obj-y += i2c/
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
new file mode 100644
index 000000000000..f20b8bcbef39
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -0,0 +1,13 @@
1config DRM_VMWGFX
2 tristate "DRM driver for VMware Virtual GPU"
3 depends on DRM && PCI
4 select FB_DEFERRED_IO
5 select FB_CFB_FILLRECT
6 select FB_CFB_COPYAREA
7 select FB_CFB_IMAGEBLIT
8 select DRM_TTM
9 help
10 KMS enabled DRM driver for SVGA2 virtual hardware.
11
12 If unsure say n. The compiled module will be
13 called vmwgfx.ko
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
new file mode 100644
index 000000000000..1a3cb6816d1c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -0,0 +1,9 @@
1
2ccflags-y := -Iinclude/drm
3
4vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
7 vmwgfx_overlay.o
8
9obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
new file mode 100644
index 000000000000..77cb45331000
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -0,0 +1,1793 @@
1/**********************************************************
2 * Copyright 1998-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_reg.h --
28 *
29 * SVGA 3D hardware definitions
30 */
31
32#ifndef _SVGA3D_REG_H_
33#define _SVGA3D_REG_H_
34
35#include "svga_reg.h"
36
37
38/*
39 * 3D Hardware Version
40 *
41 * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
42 * register. Is set by the host and read by the guest. This lets
43 * us make new guest drivers which are backwards-compatible with old
44 * SVGA hardware revisions. It does not let us support old guest
45 * drivers. Good enough for now.
46 *
47 */
48
49#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
50#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
51#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
52
53typedef enum {
54 SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
55 SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
56 SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
57 SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
58 SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
59 SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
60 SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS65_B1,
61} SVGA3dHardwareVersion;
62
63/*
64 * Generic Types
65 */
66
67typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
68#define SVGA3D_NUM_CLIPPLANES 6
69#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8
70
71
72/*
73 * Surface formats.
74 *
75 * If you modify this list, be sure to keep GLUtil.c in sync. It
76 * includes the internal format definition of each surface in
77 * GLUtil_ConvertSurfaceFormat, and it contains a table of
78 * human-readable names in GLUtil_GetFormatName.
79 */
80
81typedef enum SVGA3dSurfaceFormat {
82 SVGA3D_FORMAT_INVALID = 0,
83
84 SVGA3D_X8R8G8B8 = 1,
85 SVGA3D_A8R8G8B8 = 2,
86
87 SVGA3D_R5G6B5 = 3,
88 SVGA3D_X1R5G5B5 = 4,
89 SVGA3D_A1R5G5B5 = 5,
90 SVGA3D_A4R4G4B4 = 6,
91
92 SVGA3D_Z_D32 = 7,
93 SVGA3D_Z_D16 = 8,
94 SVGA3D_Z_D24S8 = 9,
95 SVGA3D_Z_D15S1 = 10,
96
97 SVGA3D_LUMINANCE8 = 11,
98 SVGA3D_LUMINANCE4_ALPHA4 = 12,
99 SVGA3D_LUMINANCE16 = 13,
100 SVGA3D_LUMINANCE8_ALPHA8 = 14,
101
102 SVGA3D_DXT1 = 15,
103 SVGA3D_DXT2 = 16,
104 SVGA3D_DXT3 = 17,
105 SVGA3D_DXT4 = 18,
106 SVGA3D_DXT5 = 19,
107
108 SVGA3D_BUMPU8V8 = 20,
109 SVGA3D_BUMPL6V5U5 = 21,
110 SVGA3D_BUMPX8L8V8U8 = 22,
111 SVGA3D_BUMPL8V8U8 = 23,
112
113 SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
114 SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
115
116 SVGA3D_A2R10G10B10 = 26,
117
118 /* signed formats */
119 SVGA3D_V8U8 = 27,
120 SVGA3D_Q8W8V8U8 = 28,
121 SVGA3D_CxV8U8 = 29,
122
123 /* mixed formats */
124 SVGA3D_X8L8V8U8 = 30,
125 SVGA3D_A2W10V10U10 = 31,
126
127 SVGA3D_ALPHA8 = 32,
128
129 /* Single- and dual-component floating point formats */
130 SVGA3D_R_S10E5 = 33,
131 SVGA3D_R_S23E8 = 34,
132 SVGA3D_RG_S10E5 = 35,
133 SVGA3D_RG_S23E8 = 36,
134
135 /*
136 * Any surface can be used as a buffer object, but SVGA3D_BUFFER is
137 * the most efficient format to use when creating new surfaces
138 * expressly for index or vertex data.
139 */
140 SVGA3D_BUFFER = 37,
141
142 SVGA3D_Z_D24X8 = 38,
143
144 SVGA3D_V16U16 = 39,
145
146 SVGA3D_G16R16 = 40,
147 SVGA3D_A16B16G16R16 = 41,
148
149 /* Packed Video formats */
150 SVGA3D_UYVY = 42,
151 SVGA3D_YUY2 = 43,
152
153 SVGA3D_FORMAT_MAX
154} SVGA3dSurfaceFormat;
155
156typedef uint32 SVGA3dColor; /* a, r, g, b */
157
158/*
159 * These match the D3DFORMAT_OP definitions used by Direct3D. We need
160 * them so that we can query the host for what the supported surface
161 * operations are (when we're using the D3D backend, in particular),
162 * and so we can send those operations to the guest.
163 */
164typedef enum {
165 SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
166 SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
167 SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004,
168 SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008,
169 SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010,
170 SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040,
171 SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080,
172
173/*
174 * This format can be used as a render target if the current display mode
175 * is the same depth if the alpha channel is ignored. e.g. if the device
176 * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
177 * format op list entry for A8R8G8B8 should have this cap.
178 */
179 SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100,
180
181/*
182 * This format contains DirectDraw support (including Flip). This flag
183 * should not to be set on alpha formats.
184 */
185 SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400,
186
187/*
188 * The rasterizer can support some level of Direct3D support in this format
189 * and implies that the driver can create a Context in this mode (for some
190 * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
191 * flag must also be set.
192 */
193 SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800,
194
195/*
196 * This is set for a private format when the driver has put the bpp in
197 * the structure.
198 */
199 SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000,
200
201/*
202 * Indicates that this format can be converted to any RGB format for which
203 * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
204 */
205 SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
206
207/*
208 * Indicates that this format can be used to create offscreen plain surfaces.
209 */
210 SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000,
211
212/*
213 * Indicated that this format can be read as an SRGB texture (meaning that the
214 * sampler will linearize the looked up data)
215 */
216 SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
217
218/*
219 * Indicates that this format can be used in the bumpmap instructions
220 */
221 SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
222
223/*
224 * Indicates that this format can be sampled by the displacement map sampler
225 */
226 SVGA3DFORMAT_OP_DMAP = 0x00020000,
227
228/*
229 * Indicates that this format cannot be used with texture filtering
230 */
231 SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
232
233/*
234 * Indicates that format conversions are supported to this RGB format if
235 * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
236 */
237 SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000,
238
239/*
240 * Indicated that this format can be written as an SRGB target (meaning that the
241 * pixel pipe will DE-linearize data on output to format)
242 */
243 SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
244
245/*
246 * Indicates that this format cannot be used with alpha blending
247 */
248 SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
249
250/*
251 * Indicates that the device can auto-generated sublevels for resources
252 * of this format
253 */
254 SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
255
256/*
257 * Indicates that this format can be used by vertex texture sampler
258 */
259 SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
260
261/*
262 * Indicates that this format supports neither texture coordinate wrap
263 * modes, nor mipmapping
264 */
265 SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000
266} SVGA3dFormatOp;
267
268/*
269 * This structure is a conversion of SVGA3DFORMAT_OP_*.
270 * Entries must be located at the same position.
271 */
272typedef union {
273 uint32 value;
274 struct {
275 uint32 texture : 1;
276 uint32 volumeTexture : 1;
277 uint32 cubeTexture : 1;
278 uint32 offscreenRenderTarget : 1;
279 uint32 sameFormatRenderTarget : 1;
280 uint32 unknown1 : 1;
281 uint32 zStencil : 1;
282 uint32 zStencilArbitraryDepth : 1;
283 uint32 sameFormatUpToAlpha : 1;
284 uint32 unknown2 : 1;
285 uint32 displayMode : 1;
286 uint32 acceleration3d : 1;
287 uint32 pixelSize : 1;
288 uint32 convertToARGB : 1;
289 uint32 offscreenPlain : 1;
290 uint32 sRGBRead : 1;
291 uint32 bumpMap : 1;
292 uint32 dmap : 1;
293 uint32 noFilter : 1;
294 uint32 memberOfGroupARGB : 1;
295 uint32 sRGBWrite : 1;
296 uint32 noAlphaBlend : 1;
297 uint32 autoGenMipMap : 1;
298 uint32 vertexTexture : 1;
299 uint32 noTexCoordWrapNorMip : 1;
300 };
301} SVGA3dSurfaceFormatCaps;
302
303/*
304 * SVGA_3D_CMD_SETRENDERSTATE Types. All value types
305 * must fit in a uint32.
306 */
307
308typedef enum {
309 SVGA3D_RS_INVALID = 0,
310 SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */
311 SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */
312 SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */
313 SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */
314 SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */
315 SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */
316 SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */
317 SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */
318 SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */
319 SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */
320 SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */
321 SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */
322 SVGA3D_RS_STENCILREF = 13, /* uint32 */
323 SVGA3D_RS_STENCILMASK = 14, /* uint32 */
324 SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */
325 SVGA3D_RS_FOGSTART = 16, /* float */
326 SVGA3D_RS_FOGEND = 17, /* float */
327 SVGA3D_RS_FOGDENSITY = 18, /* float */
328 SVGA3D_RS_POINTSIZE = 19, /* float */
329 SVGA3D_RS_POINTSIZEMIN = 20, /* float */
330 SVGA3D_RS_POINTSIZEMAX = 21, /* float */
331 SVGA3D_RS_POINTSCALE_A = 22, /* float */
332 SVGA3D_RS_POINTSCALE_B = 23, /* float */
333 SVGA3D_RS_POINTSCALE_C = 24, /* float */
334 SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */
335 SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */
336 SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */
337 SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */
338 SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */
339 SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */
340 SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */
341 SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */
342 SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */
343 SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */
344 SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */
345 SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */
346 SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */
347 SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */
348 SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */
349 SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */
350 SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */
351 SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */
352 SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */
353 SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */
354 SVGA3D_RS_ZBIAS = 45, /* float */
355 SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */
356 SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */
357 SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */
358 SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */
359 SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */
360 SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */
361 SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */
362 SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */
363 SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */
364 SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */
365 SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */
366 SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */
367 SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */
368 SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */
369 SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */
370 SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */
371 SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */
372 SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */
373 SVGA3D_RS_DEPTHBIAS = 64, /* float */
374
375
376 /*
377 * Output Gamma Level
378 *
379 * Output gamma effects the gamma curve of colors that are output from the
380 * rendering pipeline. A value of 1.0 specifies a linear color space. If the
381 * value is <= 0.0, gamma correction is ignored and linear color space is
382 * used.
383 */
384
385 SVGA3D_RS_OUTPUTGAMMA = 65, /* float */
386 SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */
387 SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */
388 SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */
389 SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */
390 SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */
391 SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */
392 SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */
393 SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */
394 SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */
395 SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */
396 SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */
397 SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */
398 SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */
399 SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */
400 SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */
401 SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */
402 SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */
403 SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */
404 SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */
405 SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */
406 SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */
407 SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */
408 SVGA3D_RS_TWEENFACTOR = 88, /* float */
409 SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */
410 SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */
411 SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */
412 SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */
413 SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */
414 SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
415 SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
416 SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
417 SVGA3D_RS_MAX
418} SVGA3dRenderStateName;
419
420typedef enum {
421 SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
422 SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
423 SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
424} SVGA3dVertexMaterial;
425
426typedef enum {
427 SVGA3D_FILLMODE_INVALID = 0,
428 SVGA3D_FILLMODE_POINT = 1,
429 SVGA3D_FILLMODE_LINE = 2,
430 SVGA3D_FILLMODE_FILL = 3,
431 SVGA3D_FILLMODE_MAX
432} SVGA3dFillModeType;
433
434
435typedef
436union {
437 struct {
438 uint16 mode; /* SVGA3dFillModeType */
439 uint16 face; /* SVGA3dFace */
440 };
441 uint32 uintValue;
442} SVGA3dFillMode;
443
444typedef enum {
445 SVGA3D_SHADEMODE_INVALID = 0,
446 SVGA3D_SHADEMODE_FLAT = 1,
447 SVGA3D_SHADEMODE_SMOOTH = 2,
448 SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */
449 SVGA3D_SHADEMODE_MAX
450} SVGA3dShadeMode;
451
452typedef
453union {
454 struct {
455 uint16 repeat;
456 uint16 pattern;
457 };
458 uint32 uintValue;
459} SVGA3dLinePattern;
460
461typedef enum {
462 SVGA3D_BLENDOP_INVALID = 0,
463 SVGA3D_BLENDOP_ZERO = 1,
464 SVGA3D_BLENDOP_ONE = 2,
465 SVGA3D_BLENDOP_SRCCOLOR = 3,
466 SVGA3D_BLENDOP_INVSRCCOLOR = 4,
467 SVGA3D_BLENDOP_SRCALPHA = 5,
468 SVGA3D_BLENDOP_INVSRCALPHA = 6,
469 SVGA3D_BLENDOP_DESTALPHA = 7,
470 SVGA3D_BLENDOP_INVDESTALPHA = 8,
471 SVGA3D_BLENDOP_DESTCOLOR = 9,
472 SVGA3D_BLENDOP_INVDESTCOLOR = 10,
473 SVGA3D_BLENDOP_SRCALPHASAT = 11,
474 SVGA3D_BLENDOP_BLENDFACTOR = 12,
475 SVGA3D_BLENDOP_INVBLENDFACTOR = 13,
476 SVGA3D_BLENDOP_MAX
477} SVGA3dBlendOp;
478
479typedef enum {
480 SVGA3D_BLENDEQ_INVALID = 0,
481 SVGA3D_BLENDEQ_ADD = 1,
482 SVGA3D_BLENDEQ_SUBTRACT = 2,
483 SVGA3D_BLENDEQ_REVSUBTRACT = 3,
484 SVGA3D_BLENDEQ_MINIMUM = 4,
485 SVGA3D_BLENDEQ_MAXIMUM = 5,
486 SVGA3D_BLENDEQ_MAX
487} SVGA3dBlendEquation;
488
489typedef enum {
490 SVGA3D_FRONTWINDING_INVALID = 0,
491 SVGA3D_FRONTWINDING_CW = 1,
492 SVGA3D_FRONTWINDING_CCW = 2,
493 SVGA3D_FRONTWINDING_MAX
494} SVGA3dFrontWinding;
495
496typedef enum {
497 SVGA3D_FACE_INVALID = 0,
498 SVGA3D_FACE_NONE = 1,
499 SVGA3D_FACE_FRONT = 2,
500 SVGA3D_FACE_BACK = 3,
501 SVGA3D_FACE_FRONT_BACK = 4,
502 SVGA3D_FACE_MAX
503} SVGA3dFace;
504
505/*
506 * The order and the values should not be changed
507 */
508
509typedef enum {
510 SVGA3D_CMP_INVALID = 0,
511 SVGA3D_CMP_NEVER = 1,
512 SVGA3D_CMP_LESS = 2,
513 SVGA3D_CMP_EQUAL = 3,
514 SVGA3D_CMP_LESSEQUAL = 4,
515 SVGA3D_CMP_GREATER = 5,
516 SVGA3D_CMP_NOTEQUAL = 6,
517 SVGA3D_CMP_GREATEREQUAL = 7,
518 SVGA3D_CMP_ALWAYS = 8,
519 SVGA3D_CMP_MAX
520} SVGA3dCmpFunc;
521
522/*
523 * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
524 * the fog factor to be specified in the alpha component of the specular
525 * (a.k.a. secondary) vertex color.
526 */
527typedef enum {
528 SVGA3D_FOGFUNC_INVALID = 0,
529 SVGA3D_FOGFUNC_EXP = 1,
530 SVGA3D_FOGFUNC_EXP2 = 2,
531 SVGA3D_FOGFUNC_LINEAR = 3,
532 SVGA3D_FOGFUNC_PER_VERTEX = 4
533} SVGA3dFogFunction;
534
535/*
536 * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
537 * or per-pixel basis.
538 */
539typedef enum {
540 SVGA3D_FOGTYPE_INVALID = 0,
541 SVGA3D_FOGTYPE_VERTEX = 1,
542 SVGA3D_FOGTYPE_PIXEL = 2,
543 SVGA3D_FOGTYPE_MAX = 3
544} SVGA3dFogType;
545
546/*
547 * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
548 * computed using the eye Z value of each pixel (or vertex), whereas range-
549 * based fog is computed using the actual distance (range) to the eye.
550 */
551typedef enum {
552 SVGA3D_FOGBASE_INVALID = 0,
553 SVGA3D_FOGBASE_DEPTHBASED = 1,
554 SVGA3D_FOGBASE_RANGEBASED = 2,
555 SVGA3D_FOGBASE_MAX = 3
556} SVGA3dFogBase;
557
558typedef enum {
559 SVGA3D_STENCILOP_INVALID = 0,
560 SVGA3D_STENCILOP_KEEP = 1,
561 SVGA3D_STENCILOP_ZERO = 2,
562 SVGA3D_STENCILOP_REPLACE = 3,
563 SVGA3D_STENCILOP_INCRSAT = 4,
564 SVGA3D_STENCILOP_DECRSAT = 5,
565 SVGA3D_STENCILOP_INVERT = 6,
566 SVGA3D_STENCILOP_INCR = 7,
567 SVGA3D_STENCILOP_DECR = 8,
568 SVGA3D_STENCILOP_MAX
569} SVGA3dStencilOp;
570
571typedef enum {
572 SVGA3D_CLIPPLANE_0 = (1 << 0),
573 SVGA3D_CLIPPLANE_1 = (1 << 1),
574 SVGA3D_CLIPPLANE_2 = (1 << 2),
575 SVGA3D_CLIPPLANE_3 = (1 << 3),
576 SVGA3D_CLIPPLANE_4 = (1 << 4),
577 SVGA3D_CLIPPLANE_5 = (1 << 5),
578} SVGA3dClipPlanes;
579
580typedef enum {
581 SVGA3D_CLEAR_COLOR = 0x1,
582 SVGA3D_CLEAR_DEPTH = 0x2,
583 SVGA3D_CLEAR_STENCIL = 0x4
584} SVGA3dClearFlag;
585
586typedef enum {
587 SVGA3D_RT_DEPTH = 0,
588 SVGA3D_RT_STENCIL = 1,
589 SVGA3D_RT_COLOR0 = 2,
590 SVGA3D_RT_COLOR1 = 3,
591 SVGA3D_RT_COLOR2 = 4,
592 SVGA3D_RT_COLOR3 = 5,
593 SVGA3D_RT_COLOR4 = 6,
594 SVGA3D_RT_COLOR5 = 7,
595 SVGA3D_RT_COLOR6 = 8,
596 SVGA3D_RT_COLOR7 = 9,
597 SVGA3D_RT_MAX,
598 SVGA3D_RT_INVALID = ((uint32)-1),
599} SVGA3dRenderTargetType;
600
601#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
602
603typedef
604union {
605 struct {
606 uint32 red : 1;
607 uint32 green : 1;
608 uint32 blue : 1;
609 uint32 alpha : 1;
610 };
611 uint32 uintValue;
612} SVGA3dColorMask;
613
614typedef enum {
615 SVGA3D_VBLEND_DISABLE = 0,
616 SVGA3D_VBLEND_1WEIGHT = 1,
617 SVGA3D_VBLEND_2WEIGHT = 2,
618 SVGA3D_VBLEND_3WEIGHT = 3,
619} SVGA3dVertexBlendFlags;
620
621typedef enum {
622 SVGA3D_WRAPCOORD_0 = 1 << 0,
623 SVGA3D_WRAPCOORD_1 = 1 << 1,
624 SVGA3D_WRAPCOORD_2 = 1 << 2,
625 SVGA3D_WRAPCOORD_3 = 1 << 3,
626 SVGA3D_WRAPCOORD_ALL = 0xF,
627} SVGA3dWrapFlags;
628
629/*
630 * SVGA_3D_CMD_TEXTURESTATE Types. All value types
631 * must fit in a uint32.
632 */
633
634typedef enum {
635 SVGA3D_TS_INVALID = 0,
636 SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */
637 SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */
638 SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */
639 SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */
640 SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */
641 SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */
642 SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */
643 SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */
644 SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */
645 SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */
646 SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */
647 SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */
648 SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */
649 SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */
650 SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */
651 SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */
652 SVGA3D_TS_BUMPENVMAT00 = 17, /* float */
653 SVGA3D_TS_BUMPENVMAT01 = 18, /* float */
654 SVGA3D_TS_BUMPENVMAT10 = 19, /* float */
655 SVGA3D_TS_BUMPENVMAT11 = 20, /* float */
656 SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */
657 SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */
658 SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */
659 SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */
660
661
662 /*
663 * Sampler Gamma Level
664 *
665 * Sampler gamma effects the color of samples taken from the sampler. A
666 * value of 1.0 will produce linear samples. If the value is <= 0.0 the
667 * gamma value is ignored and a linear space is used.
668 */
669
670 SVGA3D_TS_GAMMA = 25, /* float */
671 SVGA3D_TS_BUMPENVLSCALE = 26, /* float */
672 SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */
673 SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */
674 SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */
675 SVGA3D_TS_MAX
676} SVGA3dTextureStateName;
677
678typedef enum {
679 SVGA3D_TC_INVALID = 0,
680 SVGA3D_TC_DISABLE = 1,
681 SVGA3D_TC_SELECTARG1 = 2,
682 SVGA3D_TC_SELECTARG2 = 3,
683 SVGA3D_TC_MODULATE = 4,
684 SVGA3D_TC_ADD = 5,
685 SVGA3D_TC_ADDSIGNED = 6,
686 SVGA3D_TC_SUBTRACT = 7,
687 SVGA3D_TC_BLENDTEXTUREALPHA = 8,
688 SVGA3D_TC_BLENDDIFFUSEALPHA = 9,
689 SVGA3D_TC_BLENDCURRENTALPHA = 10,
690 SVGA3D_TC_BLENDFACTORALPHA = 11,
691 SVGA3D_TC_MODULATE2X = 12,
692 SVGA3D_TC_MODULATE4X = 13,
693 SVGA3D_TC_DSDT = 14,
694 SVGA3D_TC_DOTPRODUCT3 = 15,
695 SVGA3D_TC_BLENDTEXTUREALPHAPM = 16,
696 SVGA3D_TC_ADDSIGNED2X = 17,
697 SVGA3D_TC_ADDSMOOTH = 18,
698 SVGA3D_TC_PREMODULATE = 19,
699 SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20,
700 SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21,
701 SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
702 SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
703 SVGA3D_TC_BUMPENVMAPLUMINANCE = 24,
704 SVGA3D_TC_MULTIPLYADD = 25,
705 SVGA3D_TC_LERP = 26,
706 SVGA3D_TC_MAX
707} SVGA3dTextureCombiner;
708
709#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
710
711typedef enum {
712 SVGA3D_TEX_ADDRESS_INVALID = 0,
713 SVGA3D_TEX_ADDRESS_WRAP = 1,
714 SVGA3D_TEX_ADDRESS_MIRROR = 2,
715 SVGA3D_TEX_ADDRESS_CLAMP = 3,
716 SVGA3D_TEX_ADDRESS_BORDER = 4,
717 SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
718 SVGA3D_TEX_ADDRESS_EDGE = 6,
719 SVGA3D_TEX_ADDRESS_MAX
720} SVGA3dTextureAddress;
721
722/*
723 * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
724 * disabled, and the rasterizer should use the magnification filter instead.
725 */
726typedef enum {
727 SVGA3D_TEX_FILTER_NONE = 0,
728 SVGA3D_TEX_FILTER_NEAREST = 1,
729 SVGA3D_TEX_FILTER_LINEAR = 2,
730 SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
731 SVGA3D_TEX_FILTER_FLATCUBIC = 4, // Deprecated, not implemented
732 SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, // Deprecated, not implemented
733 SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, // Not currently implemented
734 SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, // Not currently implemented
735 SVGA3D_TEX_FILTER_MAX
736} SVGA3dTextureFilter;
737
738typedef enum {
739 SVGA3D_TEX_TRANSFORM_OFF = 0,
740 SVGA3D_TEX_TRANSFORM_S = (1 << 0),
741 SVGA3D_TEX_TRANSFORM_T = (1 << 1),
742 SVGA3D_TEX_TRANSFORM_R = (1 << 2),
743 SVGA3D_TEX_TRANSFORM_Q = (1 << 3),
744 SVGA3D_TEX_PROJECTED = (1 << 15),
745} SVGA3dTexTransformFlags;
746
747typedef enum {
748 SVGA3D_TEXCOORD_GEN_OFF = 0,
749 SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1,
750 SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2,
751 SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
752 SVGA3D_TEXCOORD_GEN_SPHERE = 4,
753 SVGA3D_TEXCOORD_GEN_MAX
754} SVGA3dTextureCoordGen;
755
756/*
757 * Texture argument constants for texture combiner
758 */
759typedef enum {
760 SVGA3D_TA_INVALID = 0,
761 SVGA3D_TA_CONSTANT = 1,
762 SVGA3D_TA_PREVIOUS = 2,
763 SVGA3D_TA_DIFFUSE = 3,
764 SVGA3D_TA_TEXTURE = 4,
765 SVGA3D_TA_SPECULAR = 5,
766 SVGA3D_TA_MAX
767} SVGA3dTextureArgData;
768
769#define SVGA3D_TM_MASK_LEN 4
770
771/* Modifiers for texture argument constants defined above. */
772typedef enum {
773 SVGA3D_TM_NONE = 0,
774 SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN),
775 SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN),
776} SVGA3dTextureArgModifier;
777
778#define SVGA3D_INVALID_ID ((uint32)-1)
779#define SVGA3D_MAX_CLIP_PLANES 6
780
781/*
782 * This is the limit to the number of fixed-function texture
783 * transforms and texture coordinates we can support. It does *not*
784 * correspond to the number of texture image units (samplers) we
785 * support!
786 */
787#define SVGA3D_MAX_TEXTURE_COORDS 8
788
789/*
790 * Vertex declarations
791 *
792 * Notes:
793 *
794 * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
795 * draw with any POSITIONT vertex arrays, the programmable vertex
796 * pipeline will be implicitly disabled. Drawing will take place as if
797 * no vertex shader was bound.
798 */
799
800typedef enum {
801 SVGA3D_DECLUSAGE_POSITION = 0,
802 SVGA3D_DECLUSAGE_BLENDWEIGHT, // 1
803 SVGA3D_DECLUSAGE_BLENDINDICES, // 2
804 SVGA3D_DECLUSAGE_NORMAL, // 3
805 SVGA3D_DECLUSAGE_PSIZE, // 4
806 SVGA3D_DECLUSAGE_TEXCOORD, // 5
807 SVGA3D_DECLUSAGE_TANGENT, // 6
808 SVGA3D_DECLUSAGE_BINORMAL, // 7
809 SVGA3D_DECLUSAGE_TESSFACTOR, // 8
810 SVGA3D_DECLUSAGE_POSITIONT, // 9
811 SVGA3D_DECLUSAGE_COLOR, // 10
812 SVGA3D_DECLUSAGE_FOG, // 11
813 SVGA3D_DECLUSAGE_DEPTH, // 12
814 SVGA3D_DECLUSAGE_SAMPLE, // 13
815 SVGA3D_DECLUSAGE_MAX
816} SVGA3dDeclUsage;
817
818typedef enum {
819 SVGA3D_DECLMETHOD_DEFAULT = 0,
820 SVGA3D_DECLMETHOD_PARTIALU,
821 SVGA3D_DECLMETHOD_PARTIALV,
822 SVGA3D_DECLMETHOD_CROSSUV, // Normal
823 SVGA3D_DECLMETHOD_UV,
824 SVGA3D_DECLMETHOD_LOOKUP, // Lookup a displacement map
825 SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, // Lookup a pre-sampled displacement map
826} SVGA3dDeclMethod;
827
828typedef enum {
829 SVGA3D_DECLTYPE_FLOAT1 = 0,
830 SVGA3D_DECLTYPE_FLOAT2 = 1,
831 SVGA3D_DECLTYPE_FLOAT3 = 2,
832 SVGA3D_DECLTYPE_FLOAT4 = 3,
833 SVGA3D_DECLTYPE_D3DCOLOR = 4,
834 SVGA3D_DECLTYPE_UBYTE4 = 5,
835 SVGA3D_DECLTYPE_SHORT2 = 6,
836 SVGA3D_DECLTYPE_SHORT4 = 7,
837 SVGA3D_DECLTYPE_UBYTE4N = 8,
838 SVGA3D_DECLTYPE_SHORT2N = 9,
839 SVGA3D_DECLTYPE_SHORT4N = 10,
840 SVGA3D_DECLTYPE_USHORT2N = 11,
841 SVGA3D_DECLTYPE_USHORT4N = 12,
842 SVGA3D_DECLTYPE_UDEC3 = 13,
843 SVGA3D_DECLTYPE_DEC3N = 14,
844 SVGA3D_DECLTYPE_FLOAT16_2 = 15,
845 SVGA3D_DECLTYPE_FLOAT16_4 = 16,
846 SVGA3D_DECLTYPE_MAX,
847} SVGA3dDeclType;
848
849/*
850 * This structure is used for the divisor for geometry instancing;
851 * it's a direct translation of the Direct3D equivalent.
852 */
853typedef union {
854 struct {
855 /*
856 * For index data, this number represents the number of instances to draw.
857 * For instance data, this number represents the number of
858 * instances/vertex in this stream
859 */
860 uint32 count : 30;
861
862 /*
863 * This is 1 if this is supposed to be the data that is repeated for
864 * every instance.
865 */
866 uint32 indexedData : 1;
867
868 /*
869 * This is 1 if this is supposed to be the per-instance data.
870 */
871 uint32 instanceData : 1;
872 };
873
874 uint32 value;
875} SVGA3dVertexDivisor;
876
877typedef enum {
878 SVGA3D_PRIMITIVE_INVALID = 0,
879 SVGA3D_PRIMITIVE_TRIANGLELIST = 1,
880 SVGA3D_PRIMITIVE_POINTLIST = 2,
881 SVGA3D_PRIMITIVE_LINELIST = 3,
882 SVGA3D_PRIMITIVE_LINESTRIP = 4,
883 SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5,
884 SVGA3D_PRIMITIVE_TRIANGLEFAN = 6,
885 SVGA3D_PRIMITIVE_MAX
886} SVGA3dPrimitiveType;
887
888typedef enum {
889 SVGA3D_COORDINATE_INVALID = 0,
890 SVGA3D_COORDINATE_LEFTHANDED = 1,
891 SVGA3D_COORDINATE_RIGHTHANDED = 2,
892 SVGA3D_COORDINATE_MAX
893} SVGA3dCoordinateType;
894
895typedef enum {
896 SVGA3D_TRANSFORM_INVALID = 0,
897 SVGA3D_TRANSFORM_WORLD = 1,
898 SVGA3D_TRANSFORM_VIEW = 2,
899 SVGA3D_TRANSFORM_PROJECTION = 3,
900 SVGA3D_TRANSFORM_TEXTURE0 = 4,
901 SVGA3D_TRANSFORM_TEXTURE1 = 5,
902 SVGA3D_TRANSFORM_TEXTURE2 = 6,
903 SVGA3D_TRANSFORM_TEXTURE3 = 7,
904 SVGA3D_TRANSFORM_TEXTURE4 = 8,
905 SVGA3D_TRANSFORM_TEXTURE5 = 9,
906 SVGA3D_TRANSFORM_TEXTURE6 = 10,
907 SVGA3D_TRANSFORM_TEXTURE7 = 11,
908 SVGA3D_TRANSFORM_WORLD1 = 12,
909 SVGA3D_TRANSFORM_WORLD2 = 13,
910 SVGA3D_TRANSFORM_WORLD3 = 14,
911 SVGA3D_TRANSFORM_MAX
912} SVGA3dTransformType;
913
914typedef enum {
915 SVGA3D_LIGHTTYPE_INVALID = 0,
916 SVGA3D_LIGHTTYPE_POINT = 1,
917 SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */
918 SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */
919 SVGA3D_LIGHTTYPE_DIRECTIONAL = 4,
920 SVGA3D_LIGHTTYPE_MAX
921} SVGA3dLightType;
922
923typedef enum {
924 SVGA3D_CUBEFACE_POSX = 0,
925 SVGA3D_CUBEFACE_NEGX = 1,
926 SVGA3D_CUBEFACE_POSY = 2,
927 SVGA3D_CUBEFACE_NEGY = 3,
928 SVGA3D_CUBEFACE_POSZ = 4,
929 SVGA3D_CUBEFACE_NEGZ = 5,
930} SVGA3dCubeFace;
931
932typedef enum {
933 SVGA3D_SHADERTYPE_COMPILED_DX8 = 0,
934 SVGA3D_SHADERTYPE_VS = 1,
935 SVGA3D_SHADERTYPE_PS = 2,
936 SVGA3D_SHADERTYPE_MAX
937} SVGA3dShaderType;
938
939typedef enum {
940 SVGA3D_CONST_TYPE_FLOAT = 0,
941 SVGA3D_CONST_TYPE_INT = 1,
942 SVGA3D_CONST_TYPE_BOOL = 2,
943} SVGA3dShaderConstType;
944
945#define SVGA3D_MAX_SURFACE_FACES 6
946
947typedef enum {
948 SVGA3D_STRETCH_BLT_POINT = 0,
949 SVGA3D_STRETCH_BLT_LINEAR = 1,
950 SVGA3D_STRETCH_BLT_MAX
951} SVGA3dStretchBltMode;
952
953typedef enum {
954 SVGA3D_QUERYTYPE_OCCLUSION = 0,
955 SVGA3D_QUERYTYPE_MAX
956} SVGA3dQueryType;
957
958typedef enum {
959 SVGA3D_QUERYSTATE_PENDING = 0, /* Waiting on the host (set by guest) */
960 SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully (set by host) */
961 SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully (set by host) */
962 SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (For guest use only) */
963} SVGA3dQueryState;
964
965typedef enum {
966 SVGA3D_WRITE_HOST_VRAM = 1,
967 SVGA3D_READ_HOST_VRAM = 2,
968} SVGA3dTransferType;
969
970/*
971 * The maximum number vertex arrays we're guaranteed to support in
972 * SVGA_3D_CMD_DRAWPRIMITIVES.
973 */
974#define SVGA3D_MAX_VERTEX_ARRAYS 32
975
976/*
977 * Identifiers for commands in the command FIFO.
978 *
979 * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
980 * the SVGA3D protocol and remain reserved; they should not be used in the
981 * future.
982 *
983 * IDs between 1040 and 1999 (inclusive) are available for use by the
984 * current SVGA3D protocol.
985 *
986 * FIFO clients other than SVGA3D should stay below 1000, or at 2000
987 * and up.
988 */
989
990#define SVGA_3D_CMD_LEGACY_BASE 1000
991#define SVGA_3D_CMD_BASE 1040
992
993#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0
994#define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1
995#define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2
996#define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3
997#define SVGA_3D_CMD_SURFACE_DMA SVGA_3D_CMD_BASE + 4
998#define SVGA_3D_CMD_CONTEXT_DEFINE SVGA_3D_CMD_BASE + 5
999#define SVGA_3D_CMD_CONTEXT_DESTROY SVGA_3D_CMD_BASE + 6
1000#define SVGA_3D_CMD_SETTRANSFORM SVGA_3D_CMD_BASE + 7
1001#define SVGA_3D_CMD_SETZRANGE SVGA_3D_CMD_BASE + 8
1002#define SVGA_3D_CMD_SETRENDERSTATE SVGA_3D_CMD_BASE + 9
1003#define SVGA_3D_CMD_SETRENDERTARGET SVGA_3D_CMD_BASE + 10
1004#define SVGA_3D_CMD_SETTEXTURESTATE SVGA_3D_CMD_BASE + 11
1005#define SVGA_3D_CMD_SETMATERIAL SVGA_3D_CMD_BASE + 12
1006#define SVGA_3D_CMD_SETLIGHTDATA SVGA_3D_CMD_BASE + 13
1007#define SVGA_3D_CMD_SETLIGHTENABLED SVGA_3D_CMD_BASE + 14
1008#define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15
1009#define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16
1010#define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17
1011#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 // Deprecated
1012#define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19
1013#define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20
1014#define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21
1015#define SVGA_3D_CMD_SET_SHADER_CONST SVGA_3D_CMD_BASE + 22
1016#define SVGA_3D_CMD_DRAW_PRIMITIVES SVGA_3D_CMD_BASE + 23
1017#define SVGA_3D_CMD_SETSCISSORRECT SVGA_3D_CMD_BASE + 24
1018#define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25
1019#define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26
1020#define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27
1021#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 // Deprecated
1022#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
1023#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 30
1024
1025#define SVGA_3D_CMD_FUTURE_MAX 2000
1026
1027/*
1028 * Common substructures used in multiple FIFO commands:
1029 */
1030
1031typedef struct {
1032 union {
1033 struct {
1034 uint16 function; // SVGA3dFogFunction
1035 uint8 type; // SVGA3dFogType
1036 uint8 base; // SVGA3dFogBase
1037 };
1038 uint32 uintValue;
1039 };
1040} SVGA3dFogMode;
1041
1042/*
1043 * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
1044 * is a surface ID as well as face/mipmap indices.
1045 */
1046
1047typedef
1048struct SVGA3dSurfaceImageId {
1049 uint32 sid;
1050 uint32 face;
1051 uint32 mipmap;
1052} SVGA3dSurfaceImageId;
1053
1054typedef
1055struct SVGA3dGuestImage {
1056 SVGAGuestPtr ptr;
1057
1058 /*
1059 * A note on interpretation of pitch: This value of pitch is the
1060 * number of bytes between vertically adjacent image
1061 * blocks. Normally this is the number of bytes between the first
1062 * pixel of two adjacent scanlines. With compressed textures,
1063 * however, this may represent the number of bytes between
1064 * compression blocks rather than between rows of pixels.
1065 *
1066 * XXX: Compressed textures currently must be tightly packed in guest memory.
1067 *
1068 * If the image is 1-dimensional, pitch is ignored.
1069 *
1070 * If 'pitch' is zero, the SVGA3D device calculates a pitch value
1071 * assuming each row of blocks is tightly packed.
1072 */
1073 uint32 pitch;
1074} SVGA3dGuestImage;
1075
1076
1077/*
1078 * FIFO command format definitions:
1079 */
1080
1081/*
1082 * The data size header following cmdNum for every 3d command
1083 */
1084typedef
1085struct {
1086 uint32 id;
1087 uint32 size;
1088} SVGA3dCmdHeader;
1089
1090/*
1091 * A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
1092 * optional mipmaps and cube faces.
1093 */
1094
1095typedef
1096struct {
1097 uint32 width;
1098 uint32 height;
1099 uint32 depth;
1100} SVGA3dSize;
1101
1102typedef enum {
1103 SVGA3D_SURFACE_CUBEMAP = (1 << 0),
1104 SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
1105 SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
1106 SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
1107 SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
1108 SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
1109 SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
1110 SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
1111 SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
1112} SVGA3dSurfaceFlags;
1113
1114typedef
1115struct {
1116 uint32 numMipLevels;
1117} SVGA3dSurfaceFace;
1118
1119typedef
1120struct {
1121 uint32 sid;
1122 SVGA3dSurfaceFlags surfaceFlags;
1123 SVGA3dSurfaceFormat format;
1124 SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
1125 /*
1126 * Followed by an SVGA3dSize structure for each mip level in each face.
1127 *
1128 * A note on surface sizes: Sizes are always specified in pixels,
1129 * even if the true surface size is not a multiple of the minimum
1130 * block size of the surface's format. For example, a 3x3x1 DXT1
1131 * compressed texture would actually be stored as a 4x4x1 image in
1132 * memory.
1133 */
1134} SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */
1135
1136typedef
1137struct {
1138 uint32 sid;
1139} SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
1140
1141typedef
1142struct {
1143 uint32 cid;
1144} SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */
1145
1146typedef
1147struct {
1148 uint32 cid;
1149} SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */
1150
1151typedef
1152struct {
1153 uint32 cid;
1154 SVGA3dClearFlag clearFlag;
1155 uint32 color;
1156 float depth;
1157 uint32 stencil;
1158 /* Followed by variable number of SVGA3dRect structures */
1159} SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */
1160
1161typedef
1162struct SVGA3dCopyRect {
1163 uint32 x;
1164 uint32 y;
1165 uint32 w;
1166 uint32 h;
1167 uint32 srcx;
1168 uint32 srcy;
1169} SVGA3dCopyRect;
1170
1171typedef
1172struct SVGA3dCopyBox {
1173 uint32 x;
1174 uint32 y;
1175 uint32 z;
1176 uint32 w;
1177 uint32 h;
1178 uint32 d;
1179 uint32 srcx;
1180 uint32 srcy;
1181 uint32 srcz;
1182} SVGA3dCopyBox;
1183
1184typedef
1185struct {
1186 uint32 x;
1187 uint32 y;
1188 uint32 w;
1189 uint32 h;
1190} SVGA3dRect;
1191
1192typedef
1193struct {
1194 uint32 x;
1195 uint32 y;
1196 uint32 z;
1197 uint32 w;
1198 uint32 h;
1199 uint32 d;
1200} SVGA3dBox;
1201
1202typedef
1203struct {
1204 uint32 x;
1205 uint32 y;
1206 uint32 z;
1207} SVGA3dPoint;
1208
1209typedef
1210struct {
1211 SVGA3dLightType type;
1212 SVGA3dBool inWorldSpace;
1213 float diffuse[4];
1214 float specular[4];
1215 float ambient[4];
1216 float position[4];
1217 float direction[4];
1218 float range;
1219 float falloff;
1220 float attenuation0;
1221 float attenuation1;
1222 float attenuation2;
1223 float theta;
1224 float phi;
1225} SVGA3dLightData;
1226
1227typedef
1228struct {
1229 uint32 sid;
1230 /* Followed by variable number of SVGA3dCopyRect structures */
1231} SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */
1232
1233typedef
1234struct {
1235 SVGA3dRenderStateName state;
1236 union {
1237 uint32 uintValue;
1238 float floatValue;
1239 };
1240} SVGA3dRenderState;
1241
1242typedef
1243struct {
1244 uint32 cid;
1245 /* Followed by variable number of SVGA3dRenderState structures */
1246} SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */
1247
1248typedef
1249struct {
1250 uint32 cid;
1251 SVGA3dRenderTargetType type;
1252 SVGA3dSurfaceImageId target;
1253} SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */
1254
1255typedef
1256struct {
1257 SVGA3dSurfaceImageId src;
1258 SVGA3dSurfaceImageId dest;
1259 /* Followed by variable number of SVGA3dCopyBox structures */
1260} SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
1261
1262typedef
1263struct {
1264 SVGA3dSurfaceImageId src;
1265 SVGA3dSurfaceImageId dest;
1266 SVGA3dBox boxSrc;
1267 SVGA3dBox boxDest;
1268 SVGA3dStretchBltMode mode;
1269} SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
1270
1271typedef
1272struct {
1273 /*
1274 * If the discard flag is present in a surface DMA operation, the host may
1275 * discard the contents of the current mipmap level and face of the target
1276 * surface before applying the surface DMA contents.
1277 */
1278 uint32 discard : 1;
1279
1280 /*
1281 * If the unsynchronized flag is present, the host may perform this upload
1282 * without syncing to pending reads on this surface.
1283 */
1284 uint32 unsynchronized : 1;
1285
1286 /*
1287 * Guests *MUST* set the reserved bits to 0 before submitting the command
1288 * suffix as future flags may occupy these bits.
1289 */
1290 uint32 reserved : 30;
1291} SVGA3dSurfaceDMAFlags;
1292
1293typedef
1294struct {
1295 SVGA3dGuestImage guest;
1296 SVGA3dSurfaceImageId host;
1297 SVGA3dTransferType transfer;
1298 /*
1299 * Followed by variable number of SVGA3dCopyBox structures. For consistency
1300 * in all clipping logic and coordinate translation, we define the
1301 * "source" in each copyBox as the guest image and the
1302 * "destination" as the host image, regardless of transfer
1303 * direction.
1304 *
1305 * For efficiency, the SVGA3D device is free to copy more data than
1306 * specified. For example, it may round copy boxes outwards such
1307 * that they lie on particular alignment boundaries.
1308 */
1309} SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */
1310
1311/*
1312 * SVGA3dCmdSurfaceDMASuffix --
1313 *
1314 * This is a command suffix that will appear after a SurfaceDMA command in
1315 * the FIFO. It contains some extra information that hosts may use to
1316 * optimize performance or protect the guest. This suffix exists to preserve
1317 * backwards compatibility while also allowing for new functionality to be
1318 * implemented.
1319 */
1320
1321typedef
1322struct {
1323 uint32 suffixSize;
1324
1325 /*
1326 * The maximum offset is used to determine the maximum offset from the
1327 * guestPtr base address that will be accessed or written to during this
1328 * surfaceDMA. If the suffix is supported, the host will respect this
1329 * boundary while performing surface DMAs.
1330 *
1331 * Defaults to MAX_UINT32
1332 */
1333 uint32 maximumOffset;
1334
1335 /*
1336 * A set of flags that describes optimizations that the host may perform
1337 * while performing this surface DMA operation. The guest should never rely
1338 * on behaviour that is different when these flags are set for correctness.
1339 *
1340 * Defaults to 0
1341 */
1342 SVGA3dSurfaceDMAFlags flags;
1343} SVGA3dCmdSurfaceDMASuffix;
1344
1345/*
1346 * SVGA_3D_CMD_DRAW_PRIMITIVES --
1347 *
1348 * This command is the SVGA3D device's generic drawing entry point.
1349 * It can draw multiple ranges of primitives, optionally using an
1350 * index buffer, using an arbitrary collection of vertex buffers.
1351 *
1352 * Each SVGA3dVertexDecl defines a distinct vertex array to bind
1353 * during this draw call. The declarations specify which surface
1354 * the vertex data lives in, what that vertex data is used for,
1355 * and how to interpret it.
1356 *
1357 * Each SVGA3dPrimitiveRange defines a collection of primitives
1358 * to render using the same vertex arrays. An index buffer is
1359 * optional.
1360 */
1361
1362typedef
1363struct {
1364 /*
1365 * A range hint is an optional specification for the range of indices
1366 * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
1367 * that the entire array will be used.
1368 *
1369 * These are only hints. The SVGA3D device may use them for
1370 * performance optimization if possible, but it's also allowed to
1371 * ignore these values.
1372 */
1373 uint32 first;
1374 uint32 last;
1375} SVGA3dArrayRangeHint;
1376
1377typedef
1378struct {
1379 /*
1380 * Define the origin and shape of a vertex or index array. Both
1381 * 'offset' and 'stride' are in bytes. The provided surface will be
1382 * reinterpreted as a flat array of bytes in the same format used
1383 * by surface DMA operations. To avoid unnecessary conversions, the
1384 * surface should be created with the SVGA3D_BUFFER format.
1385 *
1386 * Index 0 in the array starts 'offset' bytes into the surface.
1387 * Index 1 begins at byte 'offset + stride', etc. Array indices may
1388 * not be negative.
1389 */
1390 uint32 surfaceId;
1391 uint32 offset;
1392 uint32 stride;
1393} SVGA3dArray;
1394
1395typedef
1396struct {
1397 /*
1398 * Describe a vertex array's data type, and define how it is to be
1399 * used by the fixed function pipeline or the vertex shader. It
1400 * isn't useful to have two VertexDecls with the same
1401 * VertexArrayIdentity in one draw call.
1402 */
1403 SVGA3dDeclType type;
1404 SVGA3dDeclMethod method;
1405 SVGA3dDeclUsage usage;
1406 uint32 usageIndex;
1407} SVGA3dVertexArrayIdentity;
1408
1409typedef
1410struct {
1411 SVGA3dVertexArrayIdentity identity;
1412 SVGA3dArray array;
1413 SVGA3dArrayRangeHint rangeHint;
1414} SVGA3dVertexDecl;
1415
1416typedef
1417struct {
1418 /*
1419 * Define a group of primitives to render, from sequential indices.
1420 *
1421 * The value of 'primitiveType' and 'primitiveCount' imply the
1422 * total number of vertices that will be rendered.
1423 */
1424 SVGA3dPrimitiveType primType;
1425 uint32 primitiveCount;
1426
1427 /*
1428 * Optional index buffer. If indexArray.surfaceId is
1429 * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
1430 * without an index buffer is identical to rendering with an index
1431 * buffer containing the sequence [0, 1, 2, 3, ...].
1432 *
1433 * If an index buffer is in use, indexWidth specifies the width in
1434 * bytes of each index value. It must be less than or equal to
1435 * indexArray.stride.
1436 *
1437 * (Currently, the SVGA3D device requires index buffers to be tightly
1438 * packed. In other words, indexWidth == indexArray.stride)
1439 */
1440 SVGA3dArray indexArray;
1441 uint32 indexWidth;
1442
1443 /*
1444 * Optional index bias. This number is added to all indices from
1445 * indexArray before they are used as vertex array indices. This
1446 * can be used in multiple ways:
1447 *
1448 * - When not using an indexArray, this bias can be used to
1449 * specify where in the vertex arrays to begin rendering.
1450 *
1451 * - A positive number here is equivalent to increasing the
1452 * offset in each vertex array.
1453 *
1454 * - A negative number can be used to render using a small
1455 * vertex array and an index buffer that contains large
1456 * values. This may be used by some applications that
1457 * crop a vertex buffer without modifying their index
1458 * buffer.
1459 *
1460 * Note that rendering with a negative bias value may be slower and
1461 * use more memory than rendering with a positive or zero bias.
1462 */
1463 int32 indexBias;
1464} SVGA3dPrimitiveRange;
1465
1466typedef
1467struct {
1468 uint32 cid;
1469 uint32 numVertexDecls;
1470 uint32 numRanges;
1471
1472 /*
1473 * There are two variable size arrays after the
1474 * SVGA3dCmdDrawPrimitives structure. In order,
1475 * they are:
1476 *
1477 * 1. SVGA3dVertexDecl, quantity 'numVertexDecls'
1478 * 2. SVGA3dPrimitiveRange, quantity 'numRanges'
1479 * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
1480 * the frequency divisor for this the corresponding vertex decl)
1481 */
1482} SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
1483
1484typedef
1485struct {
1486 uint32 stage;
1487 SVGA3dTextureStateName name;
1488 union {
1489 uint32 value;
1490 float floatValue;
1491 };
1492} SVGA3dTextureState;
1493
1494typedef
1495struct {
1496 uint32 cid;
1497 /* Followed by variable number of SVGA3dTextureState structures */
1498} SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */
1499
1500typedef
1501struct {
1502 uint32 cid;
1503 SVGA3dTransformType type;
1504 float matrix[16];
1505} SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */
1506
1507typedef
1508struct {
1509 float min;
1510 float max;
1511} SVGA3dZRange;
1512
1513typedef
1514struct {
1515 uint32 cid;
1516 SVGA3dZRange zRange;
1517} SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */
1518
1519typedef
1520struct {
1521 float diffuse[4];
1522 float ambient[4];
1523 float specular[4];
1524 float emissive[4];
1525 float shininess;
1526} SVGA3dMaterial;
1527
1528typedef
1529struct {
1530 uint32 cid;
1531 SVGA3dFace face;
1532 SVGA3dMaterial material;
1533} SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */
1534
1535typedef
1536struct {
1537 uint32 cid;
1538 uint32 index;
1539 SVGA3dLightData data;
1540} SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */
1541
1542typedef
1543struct {
1544 uint32 cid;
1545 uint32 index;
1546 uint32 enabled;
1547} SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */
1548
1549typedef
1550struct {
1551 uint32 cid;
1552 SVGA3dRect rect;
1553} SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */
1554
1555typedef
1556struct {
1557 uint32 cid;
1558 SVGA3dRect rect;
1559} SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */
1560
1561typedef
1562struct {
1563 uint32 cid;
1564 uint32 index;
1565 float plane[4];
1566} SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */
1567
1568typedef
1569struct {
1570 uint32 cid;
1571 uint32 shid;
1572 SVGA3dShaderType type;
1573 /* Followed by variable number of DWORDs for shader bycode */
1574} SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */
1575
1576typedef
1577struct {
1578 uint32 cid;
1579 uint32 shid;
1580 SVGA3dShaderType type;
1581} SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */
1582
1583typedef
1584struct {
1585 uint32 cid;
1586 uint32 reg; /* register number */
1587 SVGA3dShaderType type;
1588 SVGA3dShaderConstType ctype;
1589 uint32 values[4];
1590} SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */
1591
1592typedef
1593struct {
1594 uint32 cid;
1595 SVGA3dShaderType type;
1596 uint32 shid;
1597} SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */
1598
1599typedef
1600struct {
1601 uint32 cid;
1602 SVGA3dQueryType type;
1603} SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */
1604
1605typedef
1606struct {
1607 uint32 cid;
1608 SVGA3dQueryType type;
1609 SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */
1610} SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */
1611
1612typedef
1613struct {
1614 uint32 cid; /* Same parameters passed to END_QUERY */
1615 SVGA3dQueryType type;
1616 SVGAGuestPtr guestResult;
1617} SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */
1618
1619typedef
1620struct {
1621 uint32 totalSize; /* Set by guest before query is ended. */
1622 SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */
1623 union { /* Set by host on exit from PENDING state */
1624 uint32 result32;
1625 };
1626} SVGA3dQueryResult;
1627
1628/*
1629 * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
1630 *
1631 * This is a blit from an SVGA3D surface to a Screen Object. Just
1632 * like GMR-to-screen blits, this blit may be directed at a
1633 * specific screen or to the virtual coordinate space.
1634 *
1635 * The blit copies from a rectangular region of an SVGA3D surface
1636 * image to a rectangular region of a screen or screens.
1637 *
1638 * This command takes an optional variable-length list of clipping
1639 * rectangles after the body of the command. If no rectangles are
1640 * specified, there is no clipping region. The entire destRect is
1641 * drawn to. If one or more rectangles are included, they describe
1642 * a clipping region. The clip rectangle coordinates are measured
1643 * relative to the top-left corner of destRect.
1644 *
1645 * This clipping region serves multiple purposes:
1646 *
1647 * - It can be used to perform an irregularly shaped blit more
1648 * efficiently than by issuing many separate blit commands.
1649 *
1650 * - It is equivalent to allowing blits with non-integer
1651 * source coordinates. You could blit just one half-pixel
1652 * of a source, for example, by specifying a larger
1653 * destination rectangle than you need, then removing
1654 * part of it using a clip rectangle.
1655 *
1656 * Availability:
1657 * SVGA_FIFO_CAP_SCREEN_OBJECT
1658 *
1659 * Limitations:
1660 *
1661 * - Currently, no backend supports blits from a mipmap or face
1662 * other than the first one.
1663 */
1664
1665typedef
1666struct {
1667 SVGA3dSurfaceImageId srcImage;
1668 SVGASignedRect srcRect;
1669 uint32 destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */
1670 SVGASignedRect destRect; /* Supports scaling if src/rest different size */
1671 /* Clipping: zero or more SVGASignedRects follow */
1672} SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
1673
1674
1675/*
1676 * Capability query index.
1677 *
1678 * Notes:
1679 *
1680 * 1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
1681 * fixed-function texture units available. Each of these units
1682 * work in both FFP and Shader modes, and they support texture
1683 * transforms and texture coordinates. The host may have additional
1684 * texture image units that are only usable with shaders.
1685 *
1686 * 2. The BUFFER_FORMAT capabilities are deprecated, and they always
1687 * return TRUE. Even on physical hardware that does not support
1688 * these formats natively, the SVGA3D device will provide an emulation
1689 * which should be invisible to the guest OS.
1690 *
1691 * In general, the SVGA3D device should support any operation on
1692 * any surface format, it just may perform some of these
1693 * operations in software depending on the capabilities of the
1694 * available physical hardware.
1695 *
1696 * XXX: In the future, we will add capabilities that describe in
1697 * detail what formats are supported in hardware for what kinds
1698 * of operations.
1699 */
1700
1701typedef enum {
1702 SVGA3D_DEVCAP_3D = 0,
1703 SVGA3D_DEVCAP_MAX_LIGHTS = 1,
1704 SVGA3D_DEVCAP_MAX_TEXTURES = 2, /* See note (1) */
1705 SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
1706 SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
1707 SVGA3D_DEVCAP_VERTEX_SHADER = 5,
1708 SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
1709 SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
1710 SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
1711 SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
1712 SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
1713 SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
1714 SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12, /* See note (2) */
1715 SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13, /* See note (2) */
1716 SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14, /* See note (2) */
1717 SVGA3D_DEVCAP_QUERY_TYPES = 15,
1718 SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
1719 SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
1720 SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
1721 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
1722 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
1723 SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
1724 SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
1725 SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
1726 SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
1727 SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
1728 SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
1729 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
1730 SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
1731 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
1732 SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
1733 SVGA3D_DEVCAP_TEXTURE_OPS = 31,
1734 SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
1735 SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
1736 SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
1737 SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
1738 SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
1739 SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
1740 SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
1741 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
1742 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
1743 SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
1744 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
1745 SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
1746 SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
1747 SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
1748 SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
1749 SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
1750 SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
1751 SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
1752 SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
1753 SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
1754 SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
1755 SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
1756 SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
1757 SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
1758 SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
1759 SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
1760 SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
1761 SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
1762 SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
1763 SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
1764 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
1765
1766 /*
1767 * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
1768 * render targets. This does no include the depth or stencil targets.
1769 */
1770 SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
1771
1772 SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
1773 SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
1774 SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
1775 SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
1776 SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
1777
1778 /*
1779 * Don't add new caps into the previous section; the values in this
1780 * enumeration must not change. You can put new values right before
1781 * SVGA3D_DEVCAP_MAX.
1782 */
1783 SVGA3D_DEVCAP_MAX /* This must be the last index. */
1784} SVGA3dDevCapIndex;
1785
1786typedef union {
1787 Bool b;
1788 uint32 u;
1789 int32 i;
1790 float f;
1791} SVGA3dDevCapResult;
1792
1793#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/svga_escape.h
new file mode 100644
index 000000000000..7b85e9b8c854
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_escape.h
@@ -0,0 +1,89 @@
1/**********************************************************
2 * Copyright 2007-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga_escape.h --
28 *
29 * Definitions for our own (vendor-specific) SVGA Escape commands.
30 */
31
32#ifndef _SVGA_ESCAPE_H_
33#define _SVGA_ESCAPE_H_
34
35
36/*
37 * Namespace IDs for the escape command
38 */
39
40#define SVGA_ESCAPE_NSID_VMWARE 0x00000000
41#define SVGA_ESCAPE_NSID_DEVEL 0xFFFFFFFF
42
43
44/*
45 * Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to
46 * the first DWORD of escape data (after the nsID and size). As a
47 * guideline we're using the high word and low word as a major and
48 * minor command number, respectively.
49 *
50 * Major command number allocation:
51 *
52 * 0000: Reserved
53 * 0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h)
54 * 0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h)
55 * 0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h)
56 */
57
58#define SVGA_ESCAPE_VMWARE_MAJOR_MASK 0xFFFF0000
59
60
61/*
62 * SVGA Hint commands.
63 *
64 * These escapes let the SVGA driver provide optional information to
65 * he host about the state of the guest or guest applications. The
66 * host can use these hints to make user interface or performance
67 * decisions.
68 *
69 * Notes:
70 *
71 * - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests
72 * that use the SVGA Screen Object extension. Instead of sending
73 * this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your
74 * Screen Object.
75 */
76
77#define SVGA_ESCAPE_VMWARE_HINT 0x00030000
78#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 // Deprecated
79
80typedef
81struct {
82 uint32 command;
83 uint32 fullscreen;
84 struct {
85 int32 x, y;
86 } monitorPosition;
87} SVGAEscapeHintFullscreen;
88
89#endif /* _SVGA_ESCAPE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/svga_overlay.h
new file mode 100644
index 000000000000..f753d73c14b4
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_overlay.h
@@ -0,0 +1,201 @@
1/**********************************************************
2 * Copyright 2007-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga_overlay.h --
28 *
29 * Definitions for video-overlay support.
30 */
31
32#ifndef _SVGA_OVERLAY_H_
33#define _SVGA_OVERLAY_H_
34
35#include "svga_reg.h"
36
37/*
38 * Video formats we support
39 */
40
41#define VMWARE_FOURCC_YV12 0x32315659 // 'Y' 'V' '1' '2'
42#define VMWARE_FOURCC_YUY2 0x32595559 // 'Y' 'U' 'Y' '2'
43#define VMWARE_FOURCC_UYVY 0x59565955 // 'U' 'Y' 'V' 'Y'
44
45typedef enum {
46 SVGA_OVERLAY_FORMAT_INVALID = 0,
47 SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12,
48 SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2,
49 SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY,
50} SVGAOverlayFormat;
51
52#define SVGA_VIDEO_COLORKEY_MASK 0x00ffffff
53
54#define SVGA_ESCAPE_VMWARE_VIDEO 0x00020000
55
56#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS 0x00020001
57 /* FIFO escape layout:
58 * Type, Stream Id, (Register Id, Value) pairs */
59
60#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH 0x00020002
61 /* FIFO escape layout:
62 * Type, Stream Id */
63
64typedef
65struct SVGAEscapeVideoSetRegs {
66 struct {
67 uint32 cmdType;
68 uint32 streamId;
69 } header;
70
71 // May include zero or more items.
72 struct {
73 uint32 registerId;
74 uint32 value;
75 } items[1];
76} SVGAEscapeVideoSetRegs;
77
78typedef
79struct SVGAEscapeVideoFlush {
80 uint32 cmdType;
81 uint32 streamId;
82} SVGAEscapeVideoFlush;
83
84
85/*
86 * Struct definitions for the video overlay commands built on
87 * SVGAFifoCmdEscape.
88 */
89typedef
90struct {
91 uint32 command;
92 uint32 overlay;
93} SVGAFifoEscapeCmdVideoBase;
94
95typedef
96struct {
97 SVGAFifoEscapeCmdVideoBase videoCmd;
98} SVGAFifoEscapeCmdVideoFlush;
99
100typedef
101struct {
102 SVGAFifoEscapeCmdVideoBase videoCmd;
103 struct {
104 uint32 regId;
105 uint32 value;
106 } items[1];
107} SVGAFifoEscapeCmdVideoSetRegs;
108
109typedef
110struct {
111 SVGAFifoEscapeCmdVideoBase videoCmd;
112 struct {
113 uint32 regId;
114 uint32 value;
115 } items[SVGA_VIDEO_NUM_REGS];
116} SVGAFifoEscapeCmdVideoSetAllRegs;
117
118
119/*
120 *----------------------------------------------------------------------
121 *
122 * VMwareVideoGetAttributes --
123 *
124 * Computes the size, pitches and offsets for YUV frames.
125 *
126 * Results:
127 * TRUE on success; otherwise FALSE on failure.
128 *
129 * Side effects:
130 * Pitches and offsets for the given YUV frame are put in 'pitches'
131 * and 'offsets' respectively. They are both optional though.
132 *
133 *----------------------------------------------------------------------
134 */
135
136static inline bool
137VMwareVideoGetAttributes(const SVGAOverlayFormat format, // IN
138 uint32 *width, // IN / OUT
139 uint32 *height, // IN / OUT
140 uint32 *size, // OUT
141 uint32 *pitches, // OUT (optional)
142 uint32 *offsets) // OUT (optional)
143{
144 int tmp;
145
146 *width = (*width + 1) & ~1;
147
148 if (offsets) {
149 offsets[0] = 0;
150 }
151
152 switch (format) {
153 case VMWARE_FOURCC_YV12:
154 *height = (*height + 1) & ~1;
155 *size = (*width + 3) & ~3;
156
157 if (pitches) {
158 pitches[0] = *size;
159 }
160
161 *size *= *height;
162
163 if (offsets) {
164 offsets[1] = *size;
165 }
166
167 tmp = ((*width >> 1) + 3) & ~3;
168
169 if (pitches) {
170 pitches[1] = pitches[2] = tmp;
171 }
172
173 tmp *= (*height >> 1);
174 *size += tmp;
175
176 if (offsets) {
177 offsets[2] = *size;
178 }
179
180 *size += tmp;
181 break;
182
183 case VMWARE_FOURCC_YUY2:
184 case VMWARE_FOURCC_UYVY:
185 *size = *width * 2;
186
187 if (pitches) {
188 pitches[0] = *size;
189 }
190
191 *size *= *height;
192 break;
193
194 default:
195 return false;
196 }
197
198 return true;
199}
200
201#endif // _SVGA_OVERLAY_H_
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
new file mode 100644
index 000000000000..1b96c2ec07dd
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -0,0 +1,1346 @@
1/**********************************************************
2 * Copyright 1998-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga_reg.h --
28 *
29 * Virtual hardware definitions for the VMware SVGA II device.
30 */
31
32#ifndef _SVGA_REG_H_
33#define _SVGA_REG_H_
34
35/*
36 * PCI device IDs.
37 */
38#define PCI_VENDOR_ID_VMWARE 0x15AD
39#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
40
41/*
42 * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
43 * cursor bypass mode. This is still supported, but no new guest
44 * drivers should use it.
45 */
46#define SVGA_CURSOR_ON_HIDE 0x0 /* Must be 0 to maintain backward compatibility */
47#define SVGA_CURSOR_ON_SHOW 0x1 /* Must be 1 to maintain backward compatibility */
48#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2 /* Remove the cursor from the framebuffer because we need to see what's under it */
49#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */
50
51/*
52 * The maximum framebuffer size that can traced for e.g. guests in VESA mode.
53 * The changeMap in the monitor is proportional to this number. Therefore, we'd
54 * like to keep it as small as possible to reduce monitor overhead (using
55 * SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
56 * 4k!).
57 *
58 * NB: For compatibility reasons, this value must be greater than 0xff0000.
59 * See bug 335072.
60 */
61#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000
62
63#define SVGA_MAX_PSEUDOCOLOR_DEPTH 8
64#define SVGA_MAX_PSEUDOCOLORS (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH)
65#define SVGA_NUM_PALETTE_REGS (3 * SVGA_MAX_PSEUDOCOLORS)
66
67#define SVGA_MAGIC 0x900000UL
68#define SVGA_MAKE_ID(ver) (SVGA_MAGIC << 8 | (ver))
69
70/* Version 2 let the address of the frame buffer be unsigned on Win32 */
71#define SVGA_VERSION_2 2
72#define SVGA_ID_2 SVGA_MAKE_ID(SVGA_VERSION_2)
73
74/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so
75 PALETTE_BASE has moved */
76#define SVGA_VERSION_1 1
77#define SVGA_ID_1 SVGA_MAKE_ID(SVGA_VERSION_1)
78
79/* Version 0 is the initial version */
80#define SVGA_VERSION_0 0
81#define SVGA_ID_0 SVGA_MAKE_ID(SVGA_VERSION_0)
82
83/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
84#define SVGA_ID_INVALID 0xFFFFFFFF
85
86/* Port offsets, relative to BAR0 */
87#define SVGA_INDEX_PORT 0x0
88#define SVGA_VALUE_PORT 0x1
89#define SVGA_BIOS_PORT 0x2
90#define SVGA_IRQSTATUS_PORT 0x8
91
92/*
93 * Interrupt source flags for IRQSTATUS_PORT and IRQMASK.
94 *
95 * Interrupts are only supported when the
96 * SVGA_CAP_IRQMASK capability is present.
97 */
98#define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */
99#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */
100#define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */
101
102/*
103 * Registers
104 */
105
106enum {
107 SVGA_REG_ID = 0,
108 SVGA_REG_ENABLE = 1,
109 SVGA_REG_WIDTH = 2,
110 SVGA_REG_HEIGHT = 3,
111 SVGA_REG_MAX_WIDTH = 4,
112 SVGA_REG_MAX_HEIGHT = 5,
113 SVGA_REG_DEPTH = 6,
114 SVGA_REG_BITS_PER_PIXEL = 7, /* Current bpp in the guest */
115 SVGA_REG_PSEUDOCOLOR = 8,
116 SVGA_REG_RED_MASK = 9,
117 SVGA_REG_GREEN_MASK = 10,
118 SVGA_REG_BLUE_MASK = 11,
119 SVGA_REG_BYTES_PER_LINE = 12,
120 SVGA_REG_FB_START = 13, /* (Deprecated) */
121 SVGA_REG_FB_OFFSET = 14,
122 SVGA_REG_VRAM_SIZE = 15,
123 SVGA_REG_FB_SIZE = 16,
124
125 /* ID 0 implementation only had the above registers, then the palette */
126
127 SVGA_REG_CAPABILITIES = 17,
128 SVGA_REG_MEM_START = 18, /* (Deprecated) */
129 SVGA_REG_MEM_SIZE = 19,
130 SVGA_REG_CONFIG_DONE = 20, /* Set when memory area configured */
131 SVGA_REG_SYNC = 21, /* See "FIFO Synchronization Registers" */
132 SVGA_REG_BUSY = 22, /* See "FIFO Synchronization Registers" */
133 SVGA_REG_GUEST_ID = 23, /* Set guest OS identifier */
134 SVGA_REG_CURSOR_ID = 24, /* (Deprecated) */
135 SVGA_REG_CURSOR_X = 25, /* (Deprecated) */
136 SVGA_REG_CURSOR_Y = 26, /* (Deprecated) */
137 SVGA_REG_CURSOR_ON = 27, /* (Deprecated) */
138 SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* (Deprecated) */
139 SVGA_REG_SCRATCH_SIZE = 29, /* Number of scratch registers */
140 SVGA_REG_MEM_REGS = 30, /* Number of FIFO registers */
141 SVGA_REG_NUM_DISPLAYS = 31, /* (Deprecated) */
142 SVGA_REG_PITCHLOCK = 32, /* Fixed pitch for all modes */
143 SVGA_REG_IRQMASK = 33, /* Interrupt mask */
144
145 /* Legacy multi-monitor support */
146 SVGA_REG_NUM_GUEST_DISPLAYS = 34,/* Number of guest displays in X/Y direction */
147 SVGA_REG_DISPLAY_ID = 35, /* Display ID for the following display attributes */
148 SVGA_REG_DISPLAY_IS_PRIMARY = 36,/* Whether this is a primary display */
149 SVGA_REG_DISPLAY_POSITION_X = 37,/* The display position x */
150 SVGA_REG_DISPLAY_POSITION_Y = 38,/* The display position y */
151 SVGA_REG_DISPLAY_WIDTH = 39, /* The display's width */
152 SVGA_REG_DISPLAY_HEIGHT = 40, /* The display's height */
153
154 /* See "Guest memory regions" below. */
155 SVGA_REG_GMR_ID = 41,
156 SVGA_REG_GMR_DESCRIPTOR = 42,
157 SVGA_REG_GMR_MAX_IDS = 43,
158 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
159
160 SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
161 SVGA_REG_TOP = 46, /* Must be 1 more than the last register */
162
163 SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
164 /* Next 768 (== 256*3) registers exist for colormap */
165
166 SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
167 /* Base of scratch registers */
168 /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
169 First 4 are reserved for VESA BIOS Extension; any remaining are for
170 the use of the current SVGA driver. */
171};
172
173
174/*
175 * Guest memory regions (GMRs):
176 *
177 * This is a new memory mapping feature available in SVGA devices
178 * which have the SVGA_CAP_GMR bit set. Previously, there were two
179 * fixed memory regions available with which to share data between the
180 * device and the driver: the FIFO ('MEM') and the framebuffer. GMRs
181 * are our name for an extensible way of providing arbitrary DMA
182 * buffers for use between the driver and the SVGA device. They are a
183 * new alternative to framebuffer memory, usable for both 2D and 3D
184 * graphics operations.
185 *
186 * Since GMR mapping must be done synchronously with guest CPU
187 * execution, we use a new pair of SVGA registers:
188 *
189 * SVGA_REG_GMR_ID --
190 *
191 * Read/write.
192 * This register holds the 32-bit ID (a small positive integer)
193 * of a GMR to create, delete, or redefine. Writing this register
194 * has no side-effects.
195 *
196 * SVGA_REG_GMR_DESCRIPTOR --
197 *
198 * Write-only.
199 * Writing this register will create, delete, or redefine the GMR
200 * specified by the above ID register. If this register is zero,
201 * the GMR is deleted. Any pointers into this GMR (including those
202 * currently being processed by FIFO commands) will be
203 * synchronously invalidated.
204 *
205 * If this register is nonzero, it must be the physical page
206 * number (PPN) of a data structure which describes the physical
207 * layout of the memory region this GMR should describe. The
208 * descriptor structure will be read synchronously by the SVGA
209 * device when this register is written. The descriptor need not
210 * remain allocated for the lifetime of the GMR.
211 *
212 * The guest driver should write SVGA_REG_GMR_ID first, then
213 * SVGA_REG_GMR_DESCRIPTOR.
214 *
215 * SVGA_REG_GMR_MAX_IDS --
216 *
217 * Read-only.
218 * The SVGA device may choose to support a maximum number of
219 * user-defined GMR IDs. This register holds the number of supported
220 * IDs. (The maximum supported ID plus 1)
221 *
222 * SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH --
223 *
224 * Read-only.
225 * The SVGA device may choose to put a limit on the total number
226 * of SVGAGuestMemDescriptor structures it will read when defining
227 * a single GMR.
228 *
229 * The descriptor structure is an array of SVGAGuestMemDescriptor
230 * structures. Each structure may do one of three things:
231 *
232 * - Terminate the GMR descriptor list.
233 * (ppn==0, numPages==0)
234 *
235 * - Add a PPN or range of PPNs to the GMR's virtual address space.
236 * (ppn != 0, numPages != 0)
237 *
238 * - Provide the PPN of the next SVGAGuestMemDescriptor, in order to
239 * support multi-page GMR descriptor tables without forcing the
240 * driver to allocate physically contiguous memory.
241 * (ppn != 0, numPages == 0)
242 *
243 * Note that each physical page of SVGAGuestMemDescriptor structures
244 * can describe at least 2MB of guest memory. If the driver needs to
245 * use more than one page of descriptor structures, it must use one of
246 * its SVGAGuestMemDescriptors to point to an additional page. The
247 * device will never automatically cross a page boundary.
248 *
249 * Once the driver has described a GMR, it is immediately available
250 * for use via any FIFO command that uses an SVGAGuestPtr structure.
251 * These pointers include a GMR identifier plus an offset into that
252 * GMR.
253 *
254 * The driver must check the SVGA_CAP_GMR bit before using the GMR
255 * registers.
256 */
257
258/*
259 * Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer
260 * memory as well. In the future, these IDs could even be used to
261 * allow legacy memory regions to be redefined by the guest as GMRs.
262 *
263 * Using the guest framebuffer (GFB) at BAR1 for general purpose DMA
264 * is being phased out. Please try to use user-defined GMRs whenever
265 * possible.
266 */
267#define SVGA_GMR_NULL ((uint32) -1)
268#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) // Guest Framebuffer (GFB)
269
270typedef
271struct SVGAGuestMemDescriptor {
272 uint32 ppn;
273 uint32 numPages;
274} SVGAGuestMemDescriptor;
275
276typedef
277struct SVGAGuestPtr {
278 uint32 gmrId;
279 uint32 offset;
280} SVGAGuestPtr;
281
282
283/*
284 * SVGAGMRImageFormat --
285 *
286 * This is a packed representation of the source 2D image format
287 * for a GMR-to-screen blit. Currently it is defined as an encoding
288 * of the screen's color depth and bits-per-pixel, however, 16 bits
289 * are reserved for future use to identify other encodings (such as
290 * RGBA or higher-precision images).
291 *
292 * Currently supported formats:
293 *
294 * bpp depth Format Name
295 * --- ----- -----------
296 * 32 24 32-bit BGRX
297 * 24 24 24-bit BGR
298 * 16 16 RGB 5-6-5
299 * 16 15 RGB 5-5-5
300 *
301 */
302
303typedef
304struct SVGAGMRImageFormat {
305 union {
306 struct {
307 uint32 bitsPerPixel : 8;
308 uint32 colorDepth : 8;
309 uint32 reserved : 16; // Must be zero
310 };
311
312 uint32 value;
313 };
314} SVGAGMRImageFormat;
315
316/*
317 * SVGAColorBGRX --
318 *
319 * A 24-bit color format (BGRX), which does not depend on the
320 * format of the legacy guest framebuffer (GFB) or the current
321 * GMRFB state.
322 */
323
324typedef
325struct SVGAColorBGRX {
326 union {
327 struct {
328 uint32 b : 8;
329 uint32 g : 8;
330 uint32 r : 8;
331 uint32 x : 8; // Unused
332 };
333
334 uint32 value;
335 };
336} SVGAColorBGRX;
337
338
339/*
340 * SVGASignedRect --
341 * SVGASignedPoint --
342 *
343 * Signed rectangle and point primitives. These are used by the new
344 * 2D primitives for drawing to Screen Objects, which can occupy a
345 * signed virtual coordinate space.
346 *
347 * SVGASignedRect specifies a half-open interval: the (left, top)
348 * pixel is part of the rectangle, but the (right, bottom) pixel is
349 * not.
350 */
351
352typedef
353struct SVGASignedRect {
354 int32 left;
355 int32 top;
356 int32 right;
357 int32 bottom;
358} SVGASignedRect;
359
360typedef
361struct SVGASignedPoint {
362 int32 x;
363 int32 y;
364} SVGASignedPoint;
365
366
367/*
368 * Capabilities
369 *
370 * Note the holes in the bitfield. Missing bits have been deprecated,
371 * and must not be reused. Those capabilities will never be reported
372 * by new versions of the SVGA device.
373 */
374
375#define SVGA_CAP_NONE 0x00000000
376#define SVGA_CAP_RECT_COPY 0x00000002
377#define SVGA_CAP_CURSOR 0x00000020
378#define SVGA_CAP_CURSOR_BYPASS 0x00000040 // Legacy (Use Cursor Bypass 3 instead)
379#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 // Legacy (Use Cursor Bypass 3 instead)
380#define SVGA_CAP_8BIT_EMULATION 0x00000100
381#define SVGA_CAP_ALPHA_CURSOR 0x00000200
382#define SVGA_CAP_3D 0x00004000
383#define SVGA_CAP_EXTENDED_FIFO 0x00008000
384#define SVGA_CAP_MULTIMON 0x00010000 // Legacy multi-monitor support
385#define SVGA_CAP_PITCHLOCK 0x00020000
386#define SVGA_CAP_IRQMASK 0x00040000
387#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 // Legacy multi-monitor support
388#define SVGA_CAP_GMR 0x00100000
389#define SVGA_CAP_TRACES 0x00200000
390
391
392/*
393 * FIFO register indices.
394 *
395 * The FIFO is a chunk of device memory mapped into guest physmem. It
396 * is always treated as 32-bit words.
397 *
398 * The guest driver gets to decide how to partition it between
399 * - FIFO registers (there are always at least 4, specifying where the
400 * following data area is and how much data it contains; there may be
401 * more registers following these, depending on the FIFO protocol
402 * version in use)
403 * - FIFO data, written by the guest and slurped out by the VMX.
404 * These indices are 32-bit word offsets into the FIFO.
405 */
406
407enum {
408 /*
409 * Block 1 (basic registers): The originally defined FIFO registers.
410 * These exist and are valid for all versions of the FIFO protocol.
411 */
412
413 SVGA_FIFO_MIN = 0,
414 SVGA_FIFO_MAX, /* The distance from MIN to MAX must be at least 10K */
415 SVGA_FIFO_NEXT_CMD,
416 SVGA_FIFO_STOP,
417
418 /*
419 * Block 2 (extended registers): Mandatory registers for the extended
420 * FIFO. These exist if the SVGA caps register includes
421 * SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their
422 * associated capability bit is enabled.
423 *
424 * Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied
425 * support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE.
426 * This means that the guest has to test individually (in most cases
427 * using FIFO caps) for the presence of registers after this; the VMX
428 * can define "extended FIFO" to mean whatever it wants, and currently
429 * won't enable it unless there's room for that set and much more.
430 */
431
432 SVGA_FIFO_CAPABILITIES = 4,
433 SVGA_FIFO_FLAGS,
434 // Valid with SVGA_FIFO_CAP_FENCE:
435 SVGA_FIFO_FENCE,
436
437 /*
438 * Block 3a (optional extended registers): Additional registers for the
439 * extended FIFO, whose presence isn't actually implied by
440 * SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to
441 * leave room for them.
442 *
443 * These in block 3a, the VMX currently considers mandatory for the
444 * extended FIFO.
445 */
446
447 // Valid if exists (i.e. if extended FIFO enabled):
448 SVGA_FIFO_3D_HWVERSION, /* See SVGA3dHardwareVersion in svga3d_reg.h */
449 // Valid with SVGA_FIFO_CAP_PITCHLOCK:
450 SVGA_FIFO_PITCHLOCK,
451
452 // Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3:
453 SVGA_FIFO_CURSOR_ON, /* Cursor bypass 3 show/hide register */
454 SVGA_FIFO_CURSOR_X, /* Cursor bypass 3 x register */
455 SVGA_FIFO_CURSOR_Y, /* Cursor bypass 3 y register */
456 SVGA_FIFO_CURSOR_COUNT, /* Incremented when any of the other 3 change */
457 SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
458
459 // Valid with SVGA_FIFO_CAP_RESERVE:
460 SVGA_FIFO_RESERVED, /* Bytes past NEXT_CMD with real contents */
461
462 /*
463 * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT:
464 *
465 * By default this is SVGA_ID_INVALID, to indicate that the cursor
466 * coordinates are specified relative to the virtual root. If this
467 * is set to a specific screen ID, cursor position is reinterpreted
468 * as a signed offset relative to that screen's origin. This is the
469 * only way to place the cursor on a non-rooted screen.
470 */
471 SVGA_FIFO_CURSOR_SCREEN_ID,
472
473 /*
474 * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
475 * registers, but this must be done carefully and with judicious use of
476 * capability bits, since comparisons based on SVGA_FIFO_MIN aren't
477 * enough to tell you whether the register exists: we've shipped drivers
478 * and products that used SVGA_FIFO_3D_CAPS but didn't know about some of
479 * the earlier ones. The actual order of introduction was:
480 * - PITCHLOCK
481 * - 3D_CAPS
482 * - CURSOR_* (cursor bypass 3)
483 * - RESERVED
484 * So, code that wants to know whether it can use any of the
485 * aforementioned registers, or anything else added after PITCHLOCK and
486 * before 3D_CAPS, needs to reason about something other than
487 * SVGA_FIFO_MIN.
488 */
489
490 /*
491 * 3D caps block space; valid with 3D hardware version >=
492 * SVGA3D_HWVERSION_WS6_B1.
493 */
494 SVGA_FIFO_3D_CAPS = 32,
495 SVGA_FIFO_3D_CAPS_LAST = 32 + 255,
496
497 /*
498 * End of VMX's current definition of "extended-FIFO registers".
499 * Registers before here are always enabled/disabled as a block; either
500 * the extended FIFO is enabled and includes all preceding registers, or
501 * it's disabled entirely.
502 *
503 * Block 3b (truly optional extended registers): Additional registers for
504 * the extended FIFO, which the VMX already knows how to enable and
505 * disable with correct granularity.
506 *
507 * Registers after here exist if and only if the guest SVGA driver
508 * sets SVGA_FIFO_MIN high enough to leave room for them.
509 */
510
511 // Valid if register exists:
512 SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
513 SVGA_FIFO_FENCE_GOAL, /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
514 SVGA_FIFO_BUSY, /* See "FIFO Synchronization Registers" */
515
516 /*
517 * Always keep this last. This defines the maximum number of
518 * registers we know about. At power-on, this value is placed in
519 * the SVGA_REG_MEM_REGS register, and we expect the guest driver
520 * to allocate this much space in FIFO memory for registers.
521 */
522 SVGA_FIFO_NUM_REGS
523};
524
525
526/*
527 * Definition of registers included in extended FIFO support.
528 *
529 * The guest SVGA driver gets to allocate the FIFO between registers
530 * and data. It must always allocate at least 4 registers, but old
531 * drivers stopped there.
532 *
533 * The VMX will enable extended FIFO support if and only if the guest
534 * left enough room for all registers defined as part of the mandatory
535 * set for the extended FIFO.
536 *
537 * Note that the guest drivers typically allocate the FIFO only at
538 * initialization time, not at mode switches, so it's likely that the
539 * number of FIFO registers won't change without a reboot.
540 *
541 * All registers less than this value are guaranteed to be present if
542 * svgaUser->fifo.extended is set. Any later registers must be tested
543 * individually for compatibility at each use (in the VMX).
544 *
545 * This value is used only by the VMX, so it can change without
546 * affecting driver compatibility; keep it that way?
547 */
548#define SVGA_FIFO_EXTENDED_MANDATORY_REGS (SVGA_FIFO_3D_CAPS_LAST + 1)
549
550
551/*
552 * FIFO Synchronization Registers
553 *
554 * This explains the relationship between the various FIFO
555 * sync-related registers in IOSpace and in FIFO space.
556 *
557 * SVGA_REG_SYNC --
558 *
559 * The SYNC register can be used in two different ways by the guest:
560 *
561 * 1. If the guest wishes to fully sync (drain) the FIFO,
562 * it will write once to SYNC then poll on the BUSY
563 * register. The FIFO is sync'ed once BUSY is zero.
564 *
565 * 2. If the guest wants to asynchronously wake up the host,
566 * it will write once to SYNC without polling on BUSY.
567 * Ideally it will do this after some new commands have
568 * been placed in the FIFO, and after reading a zero
569 * from SVGA_FIFO_BUSY.
570 *
571 * (1) is the original behaviour that SYNC was designed to
572 * support. Originally, a write to SYNC would implicitly
573 * trigger a read from BUSY. This causes us to synchronously
574 * process the FIFO.
575 *
576 * This behaviour has since been changed so that writing SYNC
577 * will *not* implicitly cause a read from BUSY. Instead, it
578 * makes a channel call which asynchronously wakes up the MKS
579 * thread.
580 *
581 * New guests can use this new behaviour to implement (2)
582 * efficiently. This lets guests get the host's attention
583 * without waiting for the MKS to poll, which gives us much
584 * better CPU utilization on SMP hosts and on UP hosts while
585 * we're blocked on the host GPU.
586 *
587 * Old guests shouldn't notice the behaviour change. SYNC was
588 * never guaranteed to process the entire FIFO, since it was
589 * bounded to a particular number of CPU cycles. Old guests will
590 * still loop on the BUSY register until the FIFO is empty.
591 *
592 * Writing to SYNC currently has the following side-effects:
593 *
594 * - Sets SVGA_REG_BUSY to TRUE (in the monitor)
595 * - Asynchronously wakes up the MKS thread for FIFO processing
596 * - The value written to SYNC is recorded as a "reason", for
597 * stats purposes.
598 *
599 * If SVGA_FIFO_BUSY is available, drivers are advised to only
600 * write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
601 * SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
602 * eventually set SVGA_FIFO_BUSY on its own, but this approach
603 * lets the driver avoid sending multiple asynchronous wakeup
604 * messages to the MKS thread.
605 *
606 * SVGA_REG_BUSY --
607 *
608 * This register is set to TRUE when SVGA_REG_SYNC is written,
609 * and it reads as FALSE when the FIFO has been completely
610 * drained.
611 *
612 * Every read from this register causes us to synchronously
613 * process FIFO commands. There is no guarantee as to how many
614 * commands each read will process.
615 *
616 * CPU time spent processing FIFO commands will be billed to
617 * the guest.
618 *
619 * New drivers should avoid using this register unless they
620 * need to guarantee that the FIFO is completely drained. It
621 * is overkill for performing a sync-to-fence. Older drivers
622 * will use this register for any type of synchronization.
623 *
624 * SVGA_FIFO_BUSY --
625 *
626 * This register is a fast way for the guest driver to check
627 * whether the FIFO is already being processed. It reads and
628 * writes at normal RAM speeds, with no monitor intervention.
629 *
630 * If this register reads as TRUE, the host is guaranteeing that
631 * any new commands written into the FIFO will be noticed before
632 * the MKS goes back to sleep.
633 *
634 * If this register reads as FALSE, no such guarantee can be
635 * made.
636 *
637 * The guest should use this register to quickly determine
638 * whether or not it needs to wake up the host. If the guest
639 * just wrote a command or group of commands that it would like
640 * the host to begin processing, it should:
641 *
642 * 1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
643 * action is necessary.
644 *
645 * 2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
646 * code that we've already sent a SYNC to the host and we
647 * don't need to send a duplicate.
648 *
649 * 3. Write a reason to SVGA_REG_SYNC. This will send an
650 * asynchronous wakeup to the MKS thread.
651 */
652
653
654/*
655 * FIFO Capabilities
656 *
657 * Fence -- Fence register and command are supported
658 * Accel Front -- Front buffer only commands are supported
659 * Pitch Lock -- Pitch lock register is supported
660 * Video -- SVGA Video overlay units are supported
661 * Escape -- Escape command is supported
662 *
663 * XXX: Add longer descriptions for each capability, including a list
664 * of the new features that each capability provides.
665 *
666 * SVGA_FIFO_CAP_SCREEN_OBJECT --
667 *
668 * Provides dynamic multi-screen rendering, for improved Unity and
669 * multi-monitor modes. With Screen Object, the guest can
670 * dynamically create and destroy 'screens', which can represent
671 * Unity windows or virtual monitors. Screen Object also provides
672 * strong guarantees that DMA operations happen only when
673 * guest-initiated. Screen Object deprecates the BAR1 guest
674 * framebuffer (GFB) and all commands that work only with the GFB.
675 *
676 * New registers:
677 * FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID
678 *
679 * New 2D commands:
680 * DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN,
681 * BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY
682 *
683 * New 3D commands:
684 * BLIT_SURFACE_TO_SCREEN
685 *
686 * New guarantees:
687 *
688 * - The host will not read or write guest memory, including the GFB,
689 * except when explicitly initiated by a DMA command.
690 *
691 * - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK,
692 * is guaranteed to complete before any subsequent FENCEs.
693 *
694 * - All legacy commands which affect a Screen (UPDATE, PRESENT,
695 * PRESENT_READBACK) as well as new Screen blit commands will
696 * all behave consistently as blits, and memory will be read
697 * or written in FIFO order.
698 *
699 * For example, if you PRESENT from one SVGA3D surface to multiple
700 * places on the screen, the data copied will always be from the
701 * SVGA3D surface at the time the PRESENT was issued in the FIFO.
702 * This was not necessarily true on devices without Screen Object.
703 *
704 * This means that on devices that support Screen Object, the
705 * PRESENT_READBACK command should not be necessary unless you
706 * actually want to read back the results of 3D rendering into
707 * system memory. (And for that, the BLIT_SCREEN_TO_GMRFB
708 * command provides a strict superset of functionality.)
709 *
710 * - When a screen is resized, either using Screen Object commands or
711 * legacy multimon registers, its contents are preserved.
712 */
713
714#define SVGA_FIFO_CAP_NONE 0
715#define SVGA_FIFO_CAP_FENCE (1<<0)
716#define SVGA_FIFO_CAP_ACCELFRONT (1<<1)
717#define SVGA_FIFO_CAP_PITCHLOCK (1<<2)
718#define SVGA_FIFO_CAP_VIDEO (1<<3)
719#define SVGA_FIFO_CAP_CURSOR_BYPASS_3 (1<<4)
720#define SVGA_FIFO_CAP_ESCAPE (1<<5)
721#define SVGA_FIFO_CAP_RESERVE (1<<6)
722#define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7)
723
724
725/*
726 * FIFO Flags
727 *
728 * Accel Front -- Driver should use front buffer only commands
729 */
730
731#define SVGA_FIFO_FLAG_NONE 0
732#define SVGA_FIFO_FLAG_ACCELFRONT (1<<0)
733#define SVGA_FIFO_FLAG_RESERVED (1<<31) // Internal use only
734
735/*
736 * FIFO reservation sentinel value
737 */
738
739#define SVGA_FIFO_RESERVED_UNKNOWN 0xffffffff
740
741
742/*
743 * Video overlay support
744 */
745
746#define SVGA_NUM_OVERLAY_UNITS 32
747
748
749/*
750 * Video capabilities that the guest is currently using
751 */
752
753#define SVGA_VIDEO_FLAG_COLORKEY 0x0001
754
755
756/*
757 * Offsets for the video overlay registers
758 */
759
760enum {
761 SVGA_VIDEO_ENABLED = 0,
762 SVGA_VIDEO_FLAGS,
763 SVGA_VIDEO_DATA_OFFSET,
764 SVGA_VIDEO_FORMAT,
765 SVGA_VIDEO_COLORKEY,
766 SVGA_VIDEO_SIZE, // Deprecated
767 SVGA_VIDEO_WIDTH,
768 SVGA_VIDEO_HEIGHT,
769 SVGA_VIDEO_SRC_X,
770 SVGA_VIDEO_SRC_Y,
771 SVGA_VIDEO_SRC_WIDTH,
772 SVGA_VIDEO_SRC_HEIGHT,
773 SVGA_VIDEO_DST_X, // Signed int32
774 SVGA_VIDEO_DST_Y, // Signed int32
775 SVGA_VIDEO_DST_WIDTH,
776 SVGA_VIDEO_DST_HEIGHT,
777 SVGA_VIDEO_PITCH_1,
778 SVGA_VIDEO_PITCH_2,
779 SVGA_VIDEO_PITCH_3,
780 SVGA_VIDEO_DATA_GMRID, // Optional, defaults to SVGA_GMR_FRAMEBUFFER
781 SVGA_VIDEO_DST_SCREEN_ID, // Optional, defaults to virtual coords (SVGA_ID_INVALID)
782 SVGA_VIDEO_NUM_REGS
783};
784
785
786/*
787 * SVGA Overlay Units
788 *
789 * width and height relate to the entire source video frame.
790 * srcX, srcY, srcWidth and srcHeight represent subset of the source
791 * video frame to be displayed.
792 */
793
794typedef struct SVGAOverlayUnit {
795 uint32 enabled;
796 uint32 flags;
797 uint32 dataOffset;
798 uint32 format;
799 uint32 colorKey;
800 uint32 size;
801 uint32 width;
802 uint32 height;
803 uint32 srcX;
804 uint32 srcY;
805 uint32 srcWidth;
806 uint32 srcHeight;
807 int32 dstX;
808 int32 dstY;
809 uint32 dstWidth;
810 uint32 dstHeight;
811 uint32 pitches[3];
812 uint32 dataGMRId;
813 uint32 dstScreenId;
814} SVGAOverlayUnit;
815
816
817/*
818 * SVGAScreenObject --
819 *
820 * This is a new way to represent a guest's multi-monitor screen or
821 * Unity window. Screen objects are only supported if the
822 * SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set.
823 *
824 * If Screen Objects are supported, they can be used to fully
825 * replace the functionality provided by the framebuffer registers
826 * (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY.
827 *
828 * The screen object is a struct with guaranteed binary
829 * compatibility. New flags can be added, and the struct may grow,
830 * but existing fields must retain their meaning.
831 *
832 */
833
834#define SVGA_SCREEN_HAS_ROOT (1 << 0) // Screen is present in the virtual coord space
835#define SVGA_SCREEN_IS_PRIMARY (1 << 1) // Guest considers this screen to be 'primary'
836#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) // Guest is running a fullscreen app here
837
838typedef
839struct SVGAScreenObject {
840 uint32 structSize; // sizeof(SVGAScreenObject)
841 uint32 id;
842 uint32 flags;
843 struct {
844 uint32 width;
845 uint32 height;
846 } size;
847 struct {
848 int32 x;
849 int32 y;
850 } root; // Only used if SVGA_SCREEN_HAS_ROOT is set.
851} SVGAScreenObject;
852
853
854/*
855 * Commands in the command FIFO:
856 *
857 * Command IDs defined below are used for the traditional 2D FIFO
858 * communication (not all commands are available for all versions of the
859 * SVGA FIFO protocol).
860 *
861 * Note the holes in the command ID numbers: These commands have been
862 * deprecated, and the old IDs must not be reused.
863 *
864 * Command IDs from 1000 to 1999 are reserved for use by the SVGA3D
865 * protocol.
866 *
867 * Each command's parameters are described by the comments and
868 * structs below.
869 */
870
871typedef enum {
872 SVGA_CMD_INVALID_CMD = 0,
873 SVGA_CMD_UPDATE = 1,
874 SVGA_CMD_RECT_COPY = 3,
875 SVGA_CMD_DEFINE_CURSOR = 19,
876 SVGA_CMD_DEFINE_ALPHA_CURSOR = 22,
877 SVGA_CMD_UPDATE_VERBOSE = 25,
878 SVGA_CMD_FRONT_ROP_FILL = 29,
879 SVGA_CMD_FENCE = 30,
880 SVGA_CMD_ESCAPE = 33,
881 SVGA_CMD_DEFINE_SCREEN = 34,
882 SVGA_CMD_DESTROY_SCREEN = 35,
883 SVGA_CMD_DEFINE_GMRFB = 36,
884 SVGA_CMD_BLIT_GMRFB_TO_SCREEN = 37,
885 SVGA_CMD_BLIT_SCREEN_TO_GMRFB = 38,
886 SVGA_CMD_ANNOTATION_FILL = 39,
887 SVGA_CMD_ANNOTATION_COPY = 40,
888 SVGA_CMD_MAX
889} SVGAFifoCmdId;
890
891#define SVGA_CMD_MAX_ARGS 64
892
893
894/*
895 * SVGA_CMD_UPDATE --
896 *
897 * This is a DMA transfer which copies from the Guest Framebuffer
898 * (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which
899 * intersect with the provided virtual rectangle.
900 *
901 * This command does not support using arbitrary guest memory as a
902 * data source- it only works with the pre-defined GFB memory.
903 * This command also does not support signed virtual coordinates.
904 * If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with
905 * negative root x/y coordinates, the negative portion of those
906 * screens will not be reachable by this command.
907 *
908 * This command is not necessary when using framebuffer
909 * traces. Traces are automatically enabled if the SVGA FIFO is
910 * disabled, and you may explicitly enable/disable traces using
911 * SVGA_REG_TRACES. With traces enabled, any write to the GFB will
912 * automatically act as if a subsequent SVGA_CMD_UPDATE was issued.
913 *
914 * Traces and SVGA_CMD_UPDATE are the only supported ways to render
915 * pseudocolor screen updates. The newer Screen Object commands
916 * only support true color formats.
917 *
918 * Availability:
919 * Always available.
920 */
921
922typedef
923struct {
924 uint32 x;
925 uint32 y;
926 uint32 width;
927 uint32 height;
928} SVGAFifoCmdUpdate;
929
930
931/*
932 * SVGA_CMD_RECT_COPY --
933 *
934 * Perform a rectangular DMA transfer from one area of the GFB to
935 * another, and copy the result to any screens which intersect it.
936 *
937 * Availability:
938 * SVGA_CAP_RECT_COPY
939 */
940
941typedef
942struct {
943 uint32 srcX;
944 uint32 srcY;
945 uint32 destX;
946 uint32 destY;
947 uint32 width;
948 uint32 height;
949} SVGAFifoCmdRectCopy;
950
951
952/*
953 * SVGA_CMD_DEFINE_CURSOR --
954 *
955 * Provide a new cursor image, as an AND/XOR mask.
956 *
957 * The recommended way to position the cursor overlay is by using
958 * the SVGA_FIFO_CURSOR_* registers, supported by the
959 * SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
960 *
961 * Availability:
962 * SVGA_CAP_CURSOR
963 */
964
965typedef
966struct {
967 uint32 id; // Reserved, must be zero.
968 uint32 hotspotX;
969 uint32 hotspotY;
970 uint32 width;
971 uint32 height;
972 uint32 andMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
973 uint32 xorMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
974 /*
975 * Followed by scanline data for AND mask, then XOR mask.
976 * Each scanline is padded to a 32-bit boundary.
977 */
978} SVGAFifoCmdDefineCursor;
979
980
981/*
982 * SVGA_CMD_DEFINE_ALPHA_CURSOR --
983 *
984 * Provide a new cursor image, in 32-bit BGRA format.
985 *
986 * The recommended way to position the cursor overlay is by using
987 * the SVGA_FIFO_CURSOR_* registers, supported by the
988 * SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
989 *
990 * Availability:
991 * SVGA_CAP_ALPHA_CURSOR
992 */
993
994typedef
995struct {
996 uint32 id; // Reserved, must be zero.
997 uint32 hotspotX;
998 uint32 hotspotY;
999 uint32 width;
1000 uint32 height;
1001 /* Followed by scanline data */
1002} SVGAFifoCmdDefineAlphaCursor;
1003
1004
1005/*
1006 * SVGA_CMD_UPDATE_VERBOSE --
1007 *
1008 * Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
1009 * 'reason' value, an opaque cookie which is used by internal
1010 * debugging tools. Third party drivers should not use this
1011 * command.
1012 *
1013 * Availability:
1014 * SVGA_CAP_EXTENDED_FIFO
1015 */
1016
1017typedef
1018struct {
1019 uint32 x;
1020 uint32 y;
1021 uint32 width;
1022 uint32 height;
1023 uint32 reason;
1024} SVGAFifoCmdUpdateVerbose;
1025
1026
1027/*
1028 * SVGA_CMD_FRONT_ROP_FILL --
1029 *
1030 * This is a hint which tells the SVGA device that the driver has
1031 * just filled a rectangular region of the GFB with a solid
1032 * color. Instead of reading these pixels from the GFB, the device
1033 * can assume that they all equal 'color'. This is primarily used
1034 * for remote desktop protocols.
1035 *
1036 * Availability:
1037 * SVGA_FIFO_CAP_ACCELFRONT
1038 */
1039
1040#define SVGA_ROP_COPY 0x03
1041
1042typedef
1043struct {
1044 uint32 color; // In the same format as the GFB
1045 uint32 x;
1046 uint32 y;
1047 uint32 width;
1048 uint32 height;
1049 uint32 rop; // Must be SVGA_ROP_COPY
1050} SVGAFifoCmdFrontRopFill;
1051
1052
1053/*
1054 * SVGA_CMD_FENCE --
1055 *
1056 * Insert a synchronization fence. When the SVGA device reaches
1057 * this command, it will copy the 'fence' value into the
1058 * SVGA_FIFO_FENCE register. It will also compare the fence against
1059 * SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the
1060 * SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will
1061 * raise this interrupt.
1062 *
1063 * Availability:
1064 * SVGA_FIFO_FENCE for this command,
1065 * SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL.
1066 */
1067
1068typedef
1069struct {
1070 uint32 fence;
1071} SVGAFifoCmdFence;
1072
1073
1074/*
1075 * SVGA_CMD_ESCAPE --
1076 *
1077 * Send an extended or vendor-specific variable length command.
1078 * This is used for video overlay, third party plugins, and
1079 * internal debugging tools. See svga_escape.h
1080 *
1081 * Availability:
1082 * SVGA_FIFO_CAP_ESCAPE
1083 */
1084
1085typedef
1086struct {
1087 uint32 nsid;
1088 uint32 size;
1089 /* followed by 'size' bytes of data */
1090} SVGAFifoCmdEscape;
1091
1092
1093/*
1094 * SVGA_CMD_DEFINE_SCREEN --
1095 *
1096 * Define or redefine an SVGAScreenObject. See the description of
1097 * SVGAScreenObject above. The video driver is responsible for
1098 * generating new screen IDs. They should be small positive
1099 * integers. The virtual device will have an implementation
1100 * specific upper limit on the number of screen IDs
1101 * supported. Drivers are responsible for recycling IDs. The first
1102 * valid ID is zero.
1103 *
1104 * - Interaction with other registers:
1105 *
1106 * For backwards compatibility, when the GFB mode registers (WIDTH,
1107 * HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device
1108 * deletes all screens other than screen #0, and redefines screen
1109 * #0 according to the specified mode. Drivers that use
1110 * SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0.
1111 *
1112 * If you use screen objects, do not use the legacy multi-mon
1113 * registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
1114 *
1115 * Availability:
1116 * SVGA_FIFO_CAP_SCREEN_OBJECT
1117 */
1118
1119typedef
1120struct {
1121 SVGAScreenObject screen; // Variable-length according to version
1122} SVGAFifoCmdDefineScreen;
1123
1124
1125/*
1126 * SVGA_CMD_DESTROY_SCREEN --
1127 *
1128 * Destroy an SVGAScreenObject. Its ID is immediately available for
1129 * re-use.
1130 *
1131 * Availability:
1132 * SVGA_FIFO_CAP_SCREEN_OBJECT
1133 */
1134
1135typedef
1136struct {
1137 uint32 screenId;
1138} SVGAFifoCmdDestroyScreen;
1139
1140
1141/*
1142 * SVGA_CMD_DEFINE_GMRFB --
1143 *
1144 * This command sets a piece of SVGA device state called the
1145 * Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a
1146 * piece of light-weight state which identifies the location and
1147 * format of an image in guest memory or in BAR1. The GMRFB has
1148 * an arbitrary size, and it doesn't need to match the geometry
1149 * of the GFB or any screen object.
1150 *
1151 * The GMRFB can be redefined as often as you like. You could
1152 * always use the same GMRFB, you could redefine it before
1153 * rendering from a different guest screen, or you could even
1154 * redefine it before every blit.
1155 *
1156 * There are multiple ways to use this command. The simplest way is
1157 * to use it to move the framebuffer either to elsewhere in the GFB
1158 * (BAR1) memory region, or to a user-defined GMR. This lets a
1159 * driver use a framebuffer allocated entirely out of normal system
1160 * memory, which we encourage.
1161 *
1162 * Another way to use this command is to set up a ring buffer of
1163 * updates in GFB memory. If a driver wants to ensure that no
1164 * frames are skipped by the SVGA device, it is important that the
1165 * driver not modify the source data for a blit until the device is
1166 * done processing the command. One efficient way to accomplish
1167 * this is to use a ring of small DMA buffers. Each buffer is used
1168 * for one blit, then we move on to the next buffer in the
1169 * ring. The FENCE mechanism is used to protect each buffer from
1170 * re-use until the device is finished with that buffer's
1171 * corresponding blit.
1172 *
1173 * This command does not affect the meaning of SVGA_CMD_UPDATE.
1174 * UPDATEs always occur from the legacy GFB memory area. This
1175 * command has no support for pseudocolor GMRFBs. Currently only
1176 * true-color 15, 16, and 24-bit depths are supported. Future
1177 * devices may expose capabilities for additional framebuffer
1178 * formats.
1179 *
1180 * The default GMRFB value is undefined. Drivers must always send
1181 * this command at least once before performing any blit from the
1182 * GMRFB.
1183 *
1184 * Availability:
1185 * SVGA_FIFO_CAP_SCREEN_OBJECT
1186 */
1187
1188typedef
1189struct {
1190 SVGAGuestPtr ptr;
1191 uint32 bytesPerLine;
1192 SVGAGMRImageFormat format;
1193} SVGAFifoCmdDefineGMRFB;
1194
1195
1196/*
1197 * SVGA_CMD_BLIT_GMRFB_TO_SCREEN --
1198 *
1199 * This is a guest-to-host blit. It performs a DMA operation to
1200 * copy a rectangular region of pixels from the current GMRFB to
1201 * one or more Screen Objects.
1202 *
1203 * The destination coordinate may be specified relative to a
1204 * screen's origin (if a screen ID is specified) or relative to the
1205 * virtual coordinate system's origin (if the screen ID is
1206 * SVGA_ID_INVALID). The actual destination may span zero or more
1207 * screens, in the case of a virtual destination rect or a rect
1208 * which extends off the edge of the specified screen.
1209 *
1210 * This command writes to the screen's "base layer": the underlying
1211 * framebuffer which exists below any cursor or video overlays. No
1212 * action is necessary to explicitly hide or update any overlays
1213 * which exist on top of the updated region.
1214 *
1215 * The SVGA device is guaranteed to finish reading from the GMRFB
1216 * by the time any subsequent FENCE commands are reached.
1217 *
1218 * This command consumes an annotation. See the
1219 * SVGA_CMD_ANNOTATION_* commands for details.
1220 *
1221 * Availability:
1222 * SVGA_FIFO_CAP_SCREEN_OBJECT
1223 */
1224
1225typedef
1226struct {
1227 SVGASignedPoint srcOrigin;
1228 SVGASignedRect destRect;
1229 uint32 destScreenId;
1230} SVGAFifoCmdBlitGMRFBToScreen;
1231
1232
1233/*
1234 * SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
1235 *
1236 * This is a host-to-guest blit. It performs a DMA operation to
1237 * copy a rectangular region of pixels from a single Screen Object
1238 * back to the current GMRFB.
1239 *
1240 * Usage note: This command should be used rarely. It will
1241 * typically be inefficient, but it is necessary for some types of
1242 * synchronization between 3D (GPU) and 2D (CPU) rendering into
1243 * overlapping areas of a screen.
1244 *
1245 * The source coordinate is specified relative to a screen's
1246 * origin. The provided screen ID must be valid. If any parameters
1247 * are invalid, the resulting pixel values are undefined.
1248 *
1249 * This command reads the screen's "base layer". Overlays like
1250 * video and cursor are not included, but any data which was sent
1251 * using a blit-to-screen primitive will be available, no matter
1252 * whether the data's original source was the GMRFB or the 3D
1253 * acceleration hardware.
1254 *
1255 * Note that our guest-to-host blits and host-to-guest blits aren't
1256 * symmetric in their current implementation. While the parameters
1257 * are identical, host-to-guest blits are a lot less featureful.
1258 * They do not support clipping: If the source parameters don't
1259 * fully fit within a screen, the blit fails. They must originate
1260 * from exactly one screen. Virtual coordinates are not directly
1261 * supported.
1262 *
1263 * Host-to-guest blits do support the same set of GMRFB formats
1264 * offered by guest-to-host blits.
1265 *
1266 * The SVGA device is guaranteed to finish writing to the GMRFB by
1267 * the time any subsequent FENCE commands are reached.
1268 *
1269 * Availability:
1270 * SVGA_FIFO_CAP_SCREEN_OBJECT
1271 */
1272
1273typedef
1274struct {
1275 SVGASignedPoint destOrigin;
1276 SVGASignedRect srcRect;
1277 uint32 srcScreenId;
1278} SVGAFifoCmdBlitScreenToGMRFB;
1279
1280
1281/*
1282 * SVGA_CMD_ANNOTATION_FILL --
1283 *
1284 * This is a blit annotation. This command stores a small piece of
1285 * device state which is consumed by the next blit-to-screen
1286 * command. The state is only cleared by commands which are
1287 * specifically documented as consuming an annotation. Other
1288 * commands (such as ESCAPEs for debugging) may intervene between
1289 * the annotation and its associated blit.
1290 *
1291 * This annotation is a promise about the contents of the next
1292 * blit: The video driver is guaranteeing that all pixels in that
1293 * blit will have the same value, specified here as a color in
1294 * SVGAColorBGRX format.
1295 *
1296 * The SVGA device can still render the blit correctly even if it
1297 * ignores this annotation, but the annotation may allow it to
1298 * perform the blit more efficiently, for example by ignoring the
1299 * source data and performing a fill in hardware.
1300 *
1301 * This annotation is most important for performance when the
1302 * user's display is being remoted over a network connection.
1303 *
1304 * Availability:
1305 * SVGA_FIFO_CAP_SCREEN_OBJECT
1306 */
1307
1308typedef
1309struct {
1310 SVGAColorBGRX color;
1311} SVGAFifoCmdAnnotationFill;
1312
1313
1314/*
1315 * SVGA_CMD_ANNOTATION_COPY --
1316 *
1317 * This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more
1318 * information about annotations.
1319 *
1320 * This annotation is a promise about the contents of the next
1321 * blit: The video driver is guaranteeing that all pixels in that
1322 * blit will have the same value as those which already exist at an
1323 * identically-sized region on the same or a different screen.
1324 *
1325 * Note that the source pixels for the COPY in this annotation are
1326 * sampled before applying the anqnotation's associated blit. They
1327 * are allowed to overlap with the blit's destination pixels.
1328 *
1329 * The copy source rectangle is specified the same way as the blit
1330 * destination: it can be a rectangle which spans zero or more
1331 * screens, specified relative to either a screen or to the virtual
1332 * coordinate system's origin. If the source rectangle includes
1333 * pixels which are not from exactly one screen, the results are
1334 * undefined.
1335 *
1336 * Availability:
1337 * SVGA_FIFO_CAP_SCREEN_OBJECT
1338 */
1339
1340typedef
1341struct {
1342 SVGASignedPoint srcOrigin;
1343 uint32 srcScreenId;
1344} SVGAFifoCmdAnnotationCopy;
1345
1346#endif
diff --git a/drivers/gpu/drm/vmwgfx/svga_types.h b/drivers/gpu/drm/vmwgfx/svga_types.h
new file mode 100644
index 000000000000..55836dedcfc2
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_types.h
@@ -0,0 +1,45 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/**
29 * Silly typedefs for the svga headers. Currently the headers are shared
30 * between all components that talk to svga. And as such the headers are
31 * are in a completely different style and use weird defines.
32 *
33 * This file lets all the ugly be prefixed with svga*.
34 */
35
36#ifndef _SVGA_TYPES_H_
37#define _SVGA_TYPES_H_
38
39typedef uint16_t uint16;
40typedef uint32_t uint32;
41typedef uint8_t uint8;
42typedef int32_t int32;
43typedef bool Bool;
44
45#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
new file mode 100644
index 000000000000..d6f2d2b882e9
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -0,0 +1,229 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "ttm/ttm_bo_driver.h"
30#include "ttm/ttm_placement.h"
31
32static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
33 TTM_PL_FLAG_CACHED;
34
35static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
36 TTM_PL_FLAG_CACHED |
37 TTM_PL_FLAG_NO_EVICT;
38
39static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
40 TTM_PL_FLAG_CACHED;
41
42struct ttm_placement vmw_vram_placement = {
43 .fpfn = 0,
44 .lpfn = 0,
45 .num_placement = 1,
46 .placement = &vram_placement_flags,
47 .num_busy_placement = 1,
48 .busy_placement = &vram_placement_flags
49};
50
51struct ttm_placement vmw_vram_ne_placement = {
52 .fpfn = 0,
53 .lpfn = 0,
54 .num_placement = 1,
55 .placement = &vram_ne_placement_flags,
56 .num_busy_placement = 1,
57 .busy_placement = &vram_ne_placement_flags
58};
59
60struct ttm_placement vmw_sys_placement = {
61 .fpfn = 0,
62 .lpfn = 0,
63 .num_placement = 1,
64 .placement = &sys_placement_flags,
65 .num_busy_placement = 1,
66 .busy_placement = &sys_placement_flags
67};
68
69struct vmw_ttm_backend {
70 struct ttm_backend backend;
71};
72
73static int vmw_ttm_populate(struct ttm_backend *backend,
74 unsigned long num_pages, struct page **pages,
75 struct page *dummy_read_page)
76{
77 return 0;
78}
79
80static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
81{
82 return 0;
83}
84
85static int vmw_ttm_unbind(struct ttm_backend *backend)
86{
87 return 0;
88}
89
90static void vmw_ttm_clear(struct ttm_backend *backend)
91{
92}
93
94static void vmw_ttm_destroy(struct ttm_backend *backend)
95{
96 struct vmw_ttm_backend *vmw_be =
97 container_of(backend, struct vmw_ttm_backend, backend);
98
99 kfree(vmw_be);
100}
101
102static struct ttm_backend_func vmw_ttm_func = {
103 .populate = vmw_ttm_populate,
104 .clear = vmw_ttm_clear,
105 .bind = vmw_ttm_bind,
106 .unbind = vmw_ttm_unbind,
107 .destroy = vmw_ttm_destroy,
108};
109
110struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
111{
112 struct vmw_ttm_backend *vmw_be;
113
114 vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
115 if (!vmw_be)
116 return NULL;
117
118 vmw_be->backend.func = &vmw_ttm_func;
119
120 return &vmw_be->backend;
121}
122
123int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
124{
125 return 0;
126}
127
128int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
129 struct ttm_mem_type_manager *man)
130{
131 struct vmw_private *dev_priv =
132 container_of(bdev, struct vmw_private, bdev);
133
134 switch (type) {
135 case TTM_PL_SYSTEM:
136 /* System memory */
137
138 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
139 man->available_caching = TTM_PL_MASK_CACHING;
140 man->default_caching = TTM_PL_FLAG_CACHED;
141 break;
142 case TTM_PL_VRAM:
143 /* "On-card" video ram */
144 man->gpu_offset = 0;
145 man->io_offset = dev_priv->vram_start;
146 man->io_size = dev_priv->vram_size;
147 man->flags = TTM_MEMTYPE_FLAG_FIXED |
148 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
149 man->io_addr = NULL;
150 man->available_caching = TTM_PL_MASK_CACHING;
151 man->default_caching = TTM_PL_FLAG_WC;
152 break;
153 default:
154 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
155 return -EINVAL;
156 }
157 return 0;
158}
159
160void vmw_evict_flags(struct ttm_buffer_object *bo,
161 struct ttm_placement *placement)
162{
163 *placement = vmw_sys_placement;
164}
165
166/**
167 * FIXME: Proper access checks on buffers.
168 */
169
170static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
171{
172 return 0;
173}
174
175/**
176 * FIXME: We're using the old vmware polling method to sync.
177 * Do this with fences instead.
178 */
179
180static void *vmw_sync_obj_ref(void *sync_obj)
181{
182 return sync_obj;
183}
184
185static void vmw_sync_obj_unref(void **sync_obj)
186{
187 *sync_obj = NULL;
188}
189
190static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
191{
192 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
193
194 mutex_lock(&dev_priv->hw_mutex);
195 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
196 mutex_unlock(&dev_priv->hw_mutex);
197 return 0;
198}
199
200static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
201{
202 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
203 uint32_t sequence = (unsigned long) sync_obj;
204
205 return vmw_fence_signaled(dev_priv, sequence);
206}
207
208static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
209 bool lazy, bool interruptible)
210{
211 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
212 uint32_t sequence = (unsigned long) sync_obj;
213
214 return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ);
215}
216
217struct ttm_bo_driver vmw_bo_driver = {
218 .create_ttm_backend_entry = vmw_ttm_backend_init,
219 .invalidate_caches = vmw_invalidate_caches,
220 .init_mem_type = vmw_init_mem_type,
221 .evict_flags = vmw_evict_flags,
222 .move = NULL,
223 .verify_access = vmw_verify_access,
224 .sync_obj_signaled = vmw_sync_obj_signaled,
225 .sync_obj_wait = vmw_sync_obj_wait,
226 .sync_obj_flush = vmw_sync_obj_flush,
227 .sync_obj_unref = vmw_sync_obj_unref,
228 .sync_obj_ref = vmw_sync_obj_ref
229};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
new file mode 100644
index 000000000000..7b48bb3b63b2
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -0,0 +1,735 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "drmP.h"
29#include "vmwgfx_drv.h"
30#include "ttm/ttm_placement.h"
31#include "ttm/ttm_bo_driver.h"
32#include "ttm/ttm_object.h"
33#include "ttm/ttm_module.h"
34
35#define VMWGFX_DRIVER_NAME "vmwgfx"
36#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
37#define VMWGFX_CHIP_SVGAII 0
38#define VMW_FB_RESERVATION 0
39
40/**
41 * Fully encoded drm commands. Might move to vmw_drm.h
42 */
43
44#define DRM_IOCTL_VMW_GET_PARAM \
45 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
46 struct drm_vmw_getparam_arg)
47#define DRM_IOCTL_VMW_ALLOC_DMABUF \
48 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
49 union drm_vmw_alloc_dmabuf_arg)
50#define DRM_IOCTL_VMW_UNREF_DMABUF \
51 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
52 struct drm_vmw_unref_dmabuf_arg)
53#define DRM_IOCTL_VMW_CURSOR_BYPASS \
54 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
55 struct drm_vmw_cursor_bypass_arg)
56
57#define DRM_IOCTL_VMW_CONTROL_STREAM \
58 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
59 struct drm_vmw_control_stream_arg)
60#define DRM_IOCTL_VMW_CLAIM_STREAM \
61 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
62 struct drm_vmw_stream_arg)
63#define DRM_IOCTL_VMW_UNREF_STREAM \
64 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
65 struct drm_vmw_stream_arg)
66
67#define DRM_IOCTL_VMW_CREATE_CONTEXT \
68 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
69 struct drm_vmw_context_arg)
70#define DRM_IOCTL_VMW_UNREF_CONTEXT \
71 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
72 struct drm_vmw_context_arg)
73#define DRM_IOCTL_VMW_CREATE_SURFACE \
74 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
75 union drm_vmw_surface_create_arg)
76#define DRM_IOCTL_VMW_UNREF_SURFACE \
77 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
78 struct drm_vmw_surface_arg)
79#define DRM_IOCTL_VMW_REF_SURFACE \
80 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
81 union drm_vmw_surface_reference_arg)
82#define DRM_IOCTL_VMW_EXECBUF \
83 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
84 struct drm_vmw_execbuf_arg)
85#define DRM_IOCTL_VMW_FIFO_DEBUG \
86 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \
87 struct drm_vmw_fifo_debug_arg)
88#define DRM_IOCTL_VMW_FENCE_WAIT \
89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
90 struct drm_vmw_fence_wait_arg)
91
92
93/**
94 * The core DRM version of this macro doesn't account for
95 * DRM_COMMAND_BASE.
96 */
97
98#define VMW_IOCTL_DEF(ioctl, func, flags) \
99 [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
100
101/**
102 * Ioctl definitions.
103 */
104
105static struct drm_ioctl_desc vmw_ioctls[] = {
106 VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl, 0),
107 VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
108 0),
109 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
110 0),
111 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
112 vmw_kms_cursor_bypass_ioctl, 0),
113
114 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
115 0),
116 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
117 0),
118 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
119 0),
120
121 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
122 0),
123 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
124 0),
125 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
126 0),
127 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
128 0),
129 VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
130 0),
131 VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
132 0),
133 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
134 0),
135 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
136 0)
137};
138
139static struct pci_device_id vmw_pci_id_list[] = {
140 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
141 {0, 0, 0}
142};
143
144static char *vmw_devname = "vmwgfx";
145
146static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
147static void vmw_master_init(struct vmw_master *);
148
149static void vmw_print_capabilities(uint32_t capabilities)
150{
151 DRM_INFO("Capabilities:\n");
152 if (capabilities & SVGA_CAP_RECT_COPY)
153 DRM_INFO(" Rect copy.\n");
154 if (capabilities & SVGA_CAP_CURSOR)
155 DRM_INFO(" Cursor.\n");
156 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
157 DRM_INFO(" Cursor bypass.\n");
158 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
159 DRM_INFO(" Cursor bypass 2.\n");
160 if (capabilities & SVGA_CAP_8BIT_EMULATION)
161 DRM_INFO(" 8bit emulation.\n");
162 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
163 DRM_INFO(" Alpha cursor.\n");
164 if (capabilities & SVGA_CAP_3D)
165 DRM_INFO(" 3D.\n");
166 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
167 DRM_INFO(" Extended Fifo.\n");
168 if (capabilities & SVGA_CAP_MULTIMON)
169 DRM_INFO(" Multimon.\n");
170 if (capabilities & SVGA_CAP_PITCHLOCK)
171 DRM_INFO(" Pitchlock.\n");
172 if (capabilities & SVGA_CAP_IRQMASK)
173 DRM_INFO(" Irq mask.\n");
174 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
175 DRM_INFO(" Display Topology.\n");
176 if (capabilities & SVGA_CAP_GMR)
177 DRM_INFO(" GMR.\n");
178 if (capabilities & SVGA_CAP_TRACES)
179 DRM_INFO(" Traces.\n");
180}
181
182static int vmw_request_device(struct vmw_private *dev_priv)
183{
184 int ret;
185
186 vmw_kms_save_vga(dev_priv);
187
188 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
189 if (unlikely(ret != 0)) {
190 DRM_ERROR("Unable to initialize FIFO.\n");
191 return ret;
192 }
193
194 return 0;
195}
196
197static void vmw_release_device(struct vmw_private *dev_priv)
198{
199 vmw_fifo_release(dev_priv, &dev_priv->fifo);
200 vmw_kms_restore_vga(dev_priv);
201}
202
203
204static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
205{
206 struct vmw_private *dev_priv;
207 int ret;
208
209 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
210 if (unlikely(dev_priv == NULL)) {
211 DRM_ERROR("Failed allocating a device private struct.\n");
212 return -ENOMEM;
213 }
214 memset(dev_priv, 0, sizeof(*dev_priv));
215
216 dev_priv->dev = dev;
217 dev_priv->vmw_chipset = chipset;
218 mutex_init(&dev_priv->hw_mutex);
219 mutex_init(&dev_priv->cmdbuf_mutex);
220 rwlock_init(&dev_priv->resource_lock);
221 idr_init(&dev_priv->context_idr);
222 idr_init(&dev_priv->surface_idr);
223 idr_init(&dev_priv->stream_idr);
224 ida_init(&dev_priv->gmr_ida);
225 mutex_init(&dev_priv->init_mutex);
226 init_waitqueue_head(&dev_priv->fence_queue);
227 init_waitqueue_head(&dev_priv->fifo_queue);
228 atomic_set(&dev_priv->fence_queue_waiters, 0);
229 atomic_set(&dev_priv->fifo_queue_waiters, 0);
230 INIT_LIST_HEAD(&dev_priv->gmr_lru);
231
232 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
233 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
234 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
235
236 mutex_lock(&dev_priv->hw_mutex);
237 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
238
239 if (dev_priv->capabilities & SVGA_CAP_GMR) {
240 dev_priv->max_gmr_descriptors =
241 vmw_read(dev_priv,
242 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
243 dev_priv->max_gmr_ids =
244 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
245 }
246
247 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
248 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
249 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
250 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
251
252 mutex_unlock(&dev_priv->hw_mutex);
253
254 vmw_print_capabilities(dev_priv->capabilities);
255
256 if (dev_priv->capabilities & SVGA_CAP_GMR) {
257 DRM_INFO("Max GMR ids is %u\n",
258 (unsigned)dev_priv->max_gmr_ids);
259 DRM_INFO("Max GMR descriptors is %u\n",
260 (unsigned)dev_priv->max_gmr_descriptors);
261 }
262 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
263 dev_priv->vram_start, dev_priv->vram_size / 1024);
264 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
265 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
266
267 ret = vmw_ttm_global_init(dev_priv);
268 if (unlikely(ret != 0))
269 goto out_err0;
270
271
272 vmw_master_init(&dev_priv->fbdev_master);
273 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
274 dev_priv->active_master = &dev_priv->fbdev_master;
275
276
277 ret = ttm_bo_device_init(&dev_priv->bdev,
278 dev_priv->bo_global_ref.ref.object,
279 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
280 false);
281 if (unlikely(ret != 0)) {
282 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
283 goto out_err1;
284 }
285
286 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
287 (dev_priv->vram_size >> PAGE_SHIFT));
288 if (unlikely(ret != 0)) {
289 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
290 goto out_err2;
291 }
292
293 dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
294 dev_priv->mmio_size, DRM_MTRR_WC);
295
296 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
297 dev_priv->mmio_size);
298
299 if (unlikely(dev_priv->mmio_virt == NULL)) {
300 ret = -ENOMEM;
301 DRM_ERROR("Failed mapping MMIO.\n");
302 goto out_err3;
303 }
304
305 dev_priv->tdev = ttm_object_device_init
306 (dev_priv->mem_global_ref.object, 12);
307
308 if (unlikely(dev_priv->tdev == NULL)) {
309 DRM_ERROR("Unable to initialize TTM object management.\n");
310 ret = -ENOMEM;
311 goto out_err4;
312 }
313
314 dev->dev_private = dev_priv;
315
316 if (!dev->devname)
317 dev->devname = vmw_devname;
318
319 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
320 ret = drm_irq_install(dev);
321 if (unlikely(ret != 0)) {
322 DRM_ERROR("Failed installing irq: %d\n", ret);
323 goto out_no_irq;
324 }
325 }
326
327 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
328 dev_priv->stealth = (ret != 0);
329 if (dev_priv->stealth) {
330 /**
331 * Request at least the mmio PCI resource.
332 */
333
334 DRM_INFO("It appears like vesafb is loaded. "
335 "Ignore above error if any. Entering stealth mode.\n");
336 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
337 if (unlikely(ret != 0)) {
338 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
339 goto out_no_device;
340 }
341 vmw_kms_init(dev_priv);
342 vmw_overlay_init(dev_priv);
343 } else {
344 ret = vmw_request_device(dev_priv);
345 if (unlikely(ret != 0))
346 goto out_no_device;
347 vmw_kms_init(dev_priv);
348 vmw_overlay_init(dev_priv);
349 vmw_fb_init(dev_priv);
350 }
351
352 return 0;
353
354out_no_device:
355 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
356 drm_irq_uninstall(dev_priv->dev);
357 if (dev->devname == vmw_devname)
358 dev->devname = NULL;
359out_no_irq:
360 ttm_object_device_release(&dev_priv->tdev);
361out_err4:
362 iounmap(dev_priv->mmio_virt);
363out_err3:
364 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
365 dev_priv->mmio_size, DRM_MTRR_WC);
366 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
367out_err2:
368 (void)ttm_bo_device_release(&dev_priv->bdev);
369out_err1:
370 vmw_ttm_global_release(dev_priv);
371out_err0:
372 ida_destroy(&dev_priv->gmr_ida);
373 idr_destroy(&dev_priv->surface_idr);
374 idr_destroy(&dev_priv->context_idr);
375 idr_destroy(&dev_priv->stream_idr);
376 kfree(dev_priv);
377 return ret;
378}
379
380static int vmw_driver_unload(struct drm_device *dev)
381{
382 struct vmw_private *dev_priv = vmw_priv(dev);
383
384 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
385
386 if (!dev_priv->stealth) {
387 vmw_fb_close(dev_priv);
388 vmw_kms_close(dev_priv);
389 vmw_overlay_close(dev_priv);
390 vmw_release_device(dev_priv);
391 pci_release_regions(dev->pdev);
392 } else {
393 vmw_kms_close(dev_priv);
394 vmw_overlay_close(dev_priv);
395 pci_release_region(dev->pdev, 2);
396 }
397 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
398 drm_irq_uninstall(dev_priv->dev);
399 if (dev->devname == vmw_devname)
400 dev->devname = NULL;
401 ttm_object_device_release(&dev_priv->tdev);
402 iounmap(dev_priv->mmio_virt);
403 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
404 dev_priv->mmio_size, DRM_MTRR_WC);
405 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
406 (void)ttm_bo_device_release(&dev_priv->bdev);
407 vmw_ttm_global_release(dev_priv);
408 ida_destroy(&dev_priv->gmr_ida);
409 idr_destroy(&dev_priv->surface_idr);
410 idr_destroy(&dev_priv->context_idr);
411 idr_destroy(&dev_priv->stream_idr);
412
413 kfree(dev_priv);
414
415 return 0;
416}
417
418static void vmw_postclose(struct drm_device *dev,
419 struct drm_file *file_priv)
420{
421 struct vmw_fpriv *vmw_fp;
422
423 vmw_fp = vmw_fpriv(file_priv);
424 ttm_object_file_release(&vmw_fp->tfile);
425 if (vmw_fp->locked_master)
426 drm_master_put(&vmw_fp->locked_master);
427 kfree(vmw_fp);
428}
429
430static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
431{
432 struct vmw_private *dev_priv = vmw_priv(dev);
433 struct vmw_fpriv *vmw_fp;
434 int ret = -ENOMEM;
435
436 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
437 if (unlikely(vmw_fp == NULL))
438 return ret;
439
440 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
441 if (unlikely(vmw_fp->tfile == NULL))
442 goto out_no_tfile;
443
444 file_priv->driver_priv = vmw_fp;
445
446 if (unlikely(dev_priv->bdev.dev_mapping == NULL))
447 dev_priv->bdev.dev_mapping =
448 file_priv->filp->f_path.dentry->d_inode->i_mapping;
449
450 return 0;
451
452out_no_tfile:
453 kfree(vmw_fp);
454 return ret;
455}
456
457static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
458 unsigned long arg)
459{
460 struct drm_file *file_priv = filp->private_data;
461 struct drm_device *dev = file_priv->minor->dev;
462 unsigned int nr = DRM_IOCTL_NR(cmd);
463 long ret;
464
465 /*
466 * The driver private ioctls and TTM ioctls should be
467 * thread-safe.
468 */
469
470 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
471 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
472 struct drm_ioctl_desc *ioctl =
473 &vmw_ioctls[nr - DRM_COMMAND_BASE];
474
475 if (unlikely(ioctl->cmd != cmd)) {
476 DRM_ERROR("Invalid command format, ioctl %d\n",
477 nr - DRM_COMMAND_BASE);
478 return -EINVAL;
479 }
480 return drm_ioctl(filp->f_path.dentry->d_inode,
481 filp, cmd, arg);
482 }
483
484 /*
485 * Not all old drm ioctls are thread-safe.
486 */
487
488 lock_kernel();
489 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
490 unlock_kernel();
491 return ret;
492}
493
494static int vmw_firstopen(struct drm_device *dev)
495{
496 struct vmw_private *dev_priv = vmw_priv(dev);
497 dev_priv->is_opened = true;
498
499 return 0;
500}
501
502static void vmw_lastclose(struct drm_device *dev)
503{
504 struct vmw_private *dev_priv = vmw_priv(dev);
505 struct drm_crtc *crtc;
506 struct drm_mode_set set;
507 int ret;
508
509 /**
510 * Do nothing on the lastclose call from drm_unload.
511 */
512
513 if (!dev_priv->is_opened)
514 return;
515
516 dev_priv->is_opened = false;
517 set.x = 0;
518 set.y = 0;
519 set.fb = NULL;
520 set.mode = NULL;
521 set.connectors = NULL;
522 set.num_connectors = 0;
523
524 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
525 set.crtc = crtc;
526 ret = crtc->funcs->set_config(&set);
527 WARN_ON(ret != 0);
528 }
529
530}
531
532static void vmw_master_init(struct vmw_master *vmaster)
533{
534 ttm_lock_init(&vmaster->lock);
535}
536
537static int vmw_master_create(struct drm_device *dev,
538 struct drm_master *master)
539{
540 struct vmw_master *vmaster;
541
542 DRM_INFO("Master create.\n");
543 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
544 if (unlikely(vmaster == NULL))
545 return -ENOMEM;
546
547 ttm_lock_init(&vmaster->lock);
548 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
549 master->driver_priv = vmaster;
550
551 return 0;
552}
553
554static void vmw_master_destroy(struct drm_device *dev,
555 struct drm_master *master)
556{
557 struct vmw_master *vmaster = vmw_master(master);
558
559 DRM_INFO("Master destroy.\n");
560 master->driver_priv = NULL;
561 kfree(vmaster);
562}
563
564
565static int vmw_master_set(struct drm_device *dev,
566 struct drm_file *file_priv,
567 bool from_open)
568{
569 struct vmw_private *dev_priv = vmw_priv(dev);
570 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
571 struct vmw_master *active = dev_priv->active_master;
572 struct vmw_master *vmaster = vmw_master(file_priv->master);
573 int ret = 0;
574
575 DRM_INFO("Master set.\n");
576 if (dev_priv->stealth) {
577 ret = vmw_request_device(dev_priv);
578 if (unlikely(ret != 0))
579 return ret;
580 }
581
582 if (active) {
583 BUG_ON(active != &dev_priv->fbdev_master);
584 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
585 if (unlikely(ret != 0))
586 goto out_no_active_lock;
587
588 ttm_lock_set_kill(&active->lock, true, SIGTERM);
589 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
590 if (unlikely(ret != 0)) {
591 DRM_ERROR("Unable to clean VRAM on "
592 "master drop.\n");
593 }
594
595 dev_priv->active_master = NULL;
596 }
597
598 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
599 if (!from_open) {
600 ttm_vt_unlock(&vmaster->lock);
601 BUG_ON(vmw_fp->locked_master != file_priv->master);
602 drm_master_put(&vmw_fp->locked_master);
603 }
604
605 dev_priv->active_master = vmaster;
606
607 return 0;
608
609out_no_active_lock:
610 vmw_release_device(dev_priv);
611 return ret;
612}
613
614static void vmw_master_drop(struct drm_device *dev,
615 struct drm_file *file_priv,
616 bool from_release)
617{
618 struct vmw_private *dev_priv = vmw_priv(dev);
619 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
620 struct vmw_master *vmaster = vmw_master(file_priv->master);
621 int ret;
622
623 DRM_INFO("Master drop.\n");
624
625 /**
626 * Make sure the master doesn't disappear while we have
627 * it locked.
628 */
629
630 vmw_fp->locked_master = drm_master_get(file_priv->master);
631 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
632
633 if (unlikely((ret != 0))) {
634 DRM_ERROR("Unable to lock TTM at VT switch.\n");
635 drm_master_put(&vmw_fp->locked_master);
636 }
637
638 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
639
640 if (dev_priv->stealth) {
641 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
642 if (unlikely(ret != 0))
643 DRM_ERROR("Unable to clean VRAM on master drop.\n");
644 vmw_release_device(dev_priv);
645 }
646 dev_priv->active_master = &dev_priv->fbdev_master;
647 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
648 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
649
650 if (!dev_priv->stealth)
651 vmw_fb_on(dev_priv);
652}
653
654
655static void vmw_remove(struct pci_dev *pdev)
656{
657 struct drm_device *dev = pci_get_drvdata(pdev);
658
659 drm_put_dev(dev);
660}
661
662static struct drm_driver driver = {
663 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
664 DRIVER_MODESET,
665 .load = vmw_driver_load,
666 .unload = vmw_driver_unload,
667 .firstopen = vmw_firstopen,
668 .lastclose = vmw_lastclose,
669 .irq_preinstall = vmw_irq_preinstall,
670 .irq_postinstall = vmw_irq_postinstall,
671 .irq_uninstall = vmw_irq_uninstall,
672 .irq_handler = vmw_irq_handler,
673 .reclaim_buffers_locked = NULL,
674 .get_map_ofs = drm_core_get_map_ofs,
675 .get_reg_ofs = drm_core_get_reg_ofs,
676 .ioctls = vmw_ioctls,
677 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
678 .dma_quiescent = NULL, /*vmw_dma_quiescent, */
679 .master_create = vmw_master_create,
680 .master_destroy = vmw_master_destroy,
681 .master_set = vmw_master_set,
682 .master_drop = vmw_master_drop,
683 .open = vmw_driver_open,
684 .postclose = vmw_postclose,
685 .fops = {
686 .owner = THIS_MODULE,
687 .open = drm_open,
688 .release = drm_release,
689 .unlocked_ioctl = vmw_unlocked_ioctl,
690 .mmap = vmw_mmap,
691 .poll = drm_poll,
692 .fasync = drm_fasync,
693#if defined(CONFIG_COMPAT)
694 .compat_ioctl = drm_compat_ioctl,
695#endif
696 },
697 .pci_driver = {
698 .name = VMWGFX_DRIVER_NAME,
699 .id_table = vmw_pci_id_list,
700 .probe = vmw_probe,
701 .remove = vmw_remove
702 },
703 .name = VMWGFX_DRIVER_NAME,
704 .desc = VMWGFX_DRIVER_DESC,
705 .date = VMWGFX_DRIVER_DATE,
706 .major = VMWGFX_DRIVER_MAJOR,
707 .minor = VMWGFX_DRIVER_MINOR,
708 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
709};
710
711static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
712{
713 return drm_get_dev(pdev, ent, &driver);
714}
715
716static int __init vmwgfx_init(void)
717{
718 int ret;
719 ret = drm_init(&driver);
720 if (ret)
721 DRM_ERROR("Failed initializing DRM.\n");
722 return ret;
723}
724
725static void __exit vmwgfx_exit(void)
726{
727 drm_exit(&driver);
728}
729
730module_init(vmwgfx_init);
731module_exit(vmwgfx_exit);
732
733MODULE_AUTHOR("VMware Inc. and others");
734MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
735MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
new file mode 100644
index 000000000000..43546d09d1b0
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -0,0 +1,511 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_DRV_H_
29#define _VMWGFX_DRV_H_
30
31#include "vmwgfx_reg.h"
32#include "drmP.h"
33#include "vmwgfx_drm.h"
34#include "drm_hashtab.h"
35#include "ttm/ttm_bo_driver.h"
36#include "ttm/ttm_object.h"
37#include "ttm/ttm_lock.h"
38#include "ttm/ttm_execbuf_util.h"
39#include "ttm/ttm_module.h"
40
41#define VMWGFX_DRIVER_DATE "20090724"
42#define VMWGFX_DRIVER_MAJOR 0
43#define VMWGFX_DRIVER_MINOR 1
44#define VMWGFX_DRIVER_PATCHLEVEL 2
45#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
46#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
47#define VMWGFX_MAX_RELOCATIONS 2048
48#define VMWGFX_MAX_GMRS 2048
49
50struct vmw_fpriv {
51 struct drm_master *locked_master;
52 struct ttm_object_file *tfile;
53};
54
55struct vmw_dma_buffer {
56 struct ttm_buffer_object base;
57 struct list_head validate_list;
58 struct list_head gmr_lru;
59 uint32_t gmr_id;
60 bool gmr_bound;
61 uint32_t cur_validate_node;
62 bool on_validate_list;
63};
64
65struct vmw_resource {
66 struct kref kref;
67 struct vmw_private *dev_priv;
68 struct idr *idr;
69 int id;
70 enum ttm_object_type res_type;
71 bool avail;
72 void (*hw_destroy) (struct vmw_resource *res);
73 void (*res_free) (struct vmw_resource *res);
74
75 /* TODO is a generic snooper needed? */
76#if 0
77 void (*snoop)(struct vmw_resource *res,
78 struct ttm_object_file *tfile,
79 SVGA3dCmdHeader *header);
80 void *snoop_priv;
81#endif
82};
83
84struct vmw_cursor_snooper {
85 struct drm_crtc *crtc;
86 size_t age;
87 uint32_t *image;
88};
89
90struct vmw_surface {
91 struct vmw_resource res;
92 uint32_t flags;
93 uint32_t format;
94 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
95 struct drm_vmw_size *sizes;
96 uint32_t num_sizes;
97
98 /* TODO so far just a extra pointer */
99 struct vmw_cursor_snooper snooper;
100};
101
102struct vmw_fifo_state {
103 unsigned long reserved_size;
104 __le32 *dynamic_buffer;
105 __le32 *static_buffer;
106 __le32 *last_buffer;
107 uint32_t last_data_size;
108 uint32_t last_buffer_size;
109 bool last_buffer_add;
110 unsigned long static_buffer_size;
111 bool using_bounce_buffer;
112 uint32_t capabilities;
113 struct rw_semaphore rwsem;
114};
115
116struct vmw_relocation {
117 SVGAGuestPtr *location;
118 uint32_t index;
119};
120
121struct vmw_sw_context{
122 struct ida bo_list;
123 uint32_t last_cid;
124 bool cid_valid;
125 uint32_t last_sid;
126 bool sid_valid;
127 struct ttm_object_file *tfile;
128 struct list_head validate_nodes;
129 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
130 uint32_t cur_reloc;
131 struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS];
132 uint32_t cur_val_buf;
133};
134
135struct vmw_legacy_display;
136struct vmw_overlay;
137
138struct vmw_master {
139 struct ttm_lock lock;
140};
141
142struct vmw_private {
143 struct ttm_bo_device bdev;
144 struct ttm_bo_global_ref bo_global_ref;
145 struct ttm_global_reference mem_global_ref;
146
147 struct vmw_fifo_state fifo;
148
149 struct drm_device *dev;
150 unsigned long vmw_chipset;
151 unsigned int io_start;
152 uint32_t vram_start;
153 uint32_t vram_size;
154 uint32_t mmio_start;
155 uint32_t mmio_size;
156 uint32_t fb_max_width;
157 uint32_t fb_max_height;
158 __le32 __iomem *mmio_virt;
159 int mmio_mtrr;
160 uint32_t capabilities;
161 uint32_t max_gmr_descriptors;
162 uint32_t max_gmr_ids;
163 struct mutex hw_mutex;
164
165 /*
166 * VGA registers.
167 */
168
169 uint32_t vga_width;
170 uint32_t vga_height;
171 uint32_t vga_depth;
172 uint32_t vga_bpp;
173 uint32_t vga_pseudo;
174 uint32_t vga_red_mask;
175 uint32_t vga_blue_mask;
176 uint32_t vga_green_mask;
177
178 /*
179 * Framebuffer info.
180 */
181
182 void *fb_info;
183 struct vmw_legacy_display *ldu_priv;
184 struct vmw_overlay *overlay_priv;
185
186 /*
187 * Context and surface management.
188 */
189
190 rwlock_t resource_lock;
191 struct idr context_idr;
192 struct idr surface_idr;
193 struct idr stream_idr;
194
195 /*
196 * Block lastclose from racing with firstopen.
197 */
198
199 struct mutex init_mutex;
200
201 /*
202 * A resource manager for kernel-only surfaces and
203 * contexts.
204 */
205
206 struct ttm_object_device *tdev;
207
208 /*
209 * Fencing and IRQs.
210 */
211
212 uint32_t fence_seq;
213 wait_queue_head_t fence_queue;
214 wait_queue_head_t fifo_queue;
215 atomic_t fence_queue_waiters;
216 atomic_t fifo_queue_waiters;
217 uint32_t last_read_sequence;
218 spinlock_t irq_lock;
219
220 /*
221 * Device state
222 */
223
224 uint32_t traces_state;
225 uint32_t enable_state;
226 uint32_t config_done_state;
227
228 /**
229 * Execbuf
230 */
231 /**
232 * Protected by the cmdbuf mutex.
233 */
234
235 struct vmw_sw_context ctx;
236 uint32_t val_seq;
237 struct mutex cmdbuf_mutex;
238
239 /**
240 * GMR management. Protected by the lru spinlock.
241 */
242
243 struct ida gmr_ida;
244 struct list_head gmr_lru;
245
246
247 /**
248 * Operating mode.
249 */
250
251 bool stealth;
252 bool is_opened;
253
254 /**
255 * Master management.
256 */
257
258 struct vmw_master *active_master;
259 struct vmw_master fbdev_master;
260};
261
262static inline struct vmw_private *vmw_priv(struct drm_device *dev)
263{
264 return (struct vmw_private *)dev->dev_private;
265}
266
267static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
268{
269 return (struct vmw_fpriv *)file_priv->driver_priv;
270}
271
272static inline struct vmw_master *vmw_master(struct drm_master *master)
273{
274 return (struct vmw_master *) master->driver_priv;
275}
276
277static inline void vmw_write(struct vmw_private *dev_priv,
278 unsigned int offset, uint32_t value)
279{
280 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
281 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
282}
283
284static inline uint32_t vmw_read(struct vmw_private *dev_priv,
285 unsigned int offset)
286{
287 uint32_t val;
288
289 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
290 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
291 return val;
292}
293
294/**
295 * GMR utilities - vmwgfx_gmr.c
296 */
297
298extern int vmw_gmr_bind(struct vmw_private *dev_priv,
299 struct ttm_buffer_object *bo);
300extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
301
302/**
303 * Resource utilities - vmwgfx_resource.c
304 */
305
306extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
307extern void vmw_resource_unreference(struct vmw_resource **p_res);
308extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
309extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
310 struct drm_file *file_priv);
311extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
312 struct drm_file *file_priv);
313extern int vmw_context_check(struct vmw_private *dev_priv,
314 struct ttm_object_file *tfile,
315 int id);
316extern void vmw_surface_res_free(struct vmw_resource *res);
317extern int vmw_surface_init(struct vmw_private *dev_priv,
318 struct vmw_surface *srf,
319 void (*res_free) (struct vmw_resource *res));
320extern int vmw_user_surface_lookup(struct vmw_private *dev_priv,
321 struct ttm_object_file *tfile,
322 int sid, struct vmw_surface **out);
323extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
324 struct drm_file *file_priv);
325extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
326 struct drm_file *file_priv);
327extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
328 struct drm_file *file_priv);
329extern int vmw_surface_check(struct vmw_private *dev_priv,
330 struct ttm_object_file *tfile,
331 int id);
332extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
333extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
334 struct vmw_dma_buffer *vmw_bo,
335 size_t size, struct ttm_placement *placement,
336 bool interuptable,
337 void (*bo_free) (struct ttm_buffer_object *bo));
338extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
339 struct drm_file *file_priv);
340extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
341 struct drm_file *file_priv);
342extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
343 uint32_t cur_validate_node);
344extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
345extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
346 uint32_t id, struct vmw_dma_buffer **out);
347extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
348extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
349extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
350extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
351 struct vmw_dma_buffer *bo);
352extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
353 struct vmw_dma_buffer *bo);
354extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
355 struct drm_file *file_priv);
356extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
357 struct drm_file *file_priv);
358extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
359 struct ttm_object_file *tfile,
360 uint32_t *inout_id,
361 struct vmw_resource **out);
362
363
364/**
365 * Misc Ioctl functionality - vmwgfx_ioctl.c
366 */
367
368extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
369 struct drm_file *file_priv);
370extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
371 struct drm_file *file_priv);
372
373/**
374 * Fifo utilities - vmwgfx_fifo.c
375 */
376
377extern int vmw_fifo_init(struct vmw_private *dev_priv,
378 struct vmw_fifo_state *fifo);
379extern void vmw_fifo_release(struct vmw_private *dev_priv,
380 struct vmw_fifo_state *fifo);
381extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
382extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
383extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
384 uint32_t *sequence);
385extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
386extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
387
388/**
389 * TTM glue - vmwgfx_ttm_glue.c
390 */
391
392extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
393extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
394extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
395
396/**
397 * TTM buffer object driver - vmwgfx_buffer.c
398 */
399
400extern struct ttm_placement vmw_vram_placement;
401extern struct ttm_placement vmw_vram_ne_placement;
402extern struct ttm_placement vmw_sys_placement;
403extern struct ttm_bo_driver vmw_bo_driver;
404extern int vmw_dma_quiescent(struct drm_device *dev);
405
406/**
407 * Command submission - vmwgfx_execbuf.c
408 */
409
410extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
411 struct drm_file *file_priv);
412
413/**
414 * IRQs and wating - vmwgfx_irq.c
415 */
416
417extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
418extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy,
419 uint32_t sequence, bool interruptible,
420 unsigned long timeout);
421extern void vmw_irq_preinstall(struct drm_device *dev);
422extern int vmw_irq_postinstall(struct drm_device *dev);
423extern void vmw_irq_uninstall(struct drm_device *dev);
424extern bool vmw_fence_signaled(struct vmw_private *dev_priv,
425 uint32_t sequence);
426extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
427 struct drm_file *file_priv);
428extern int vmw_fallback_wait(struct vmw_private *dev_priv,
429 bool lazy,
430 bool fifo_idle,
431 uint32_t sequence,
432 bool interruptible,
433 unsigned long timeout);
434
435/**
436 * Kernel framebuffer - vmwgfx_fb.c
437 */
438
439int vmw_fb_init(struct vmw_private *vmw_priv);
440int vmw_fb_close(struct vmw_private *dev_priv);
441int vmw_fb_off(struct vmw_private *vmw_priv);
442int vmw_fb_on(struct vmw_private *vmw_priv);
443
444/**
445 * Kernel modesetting - vmwgfx_kms.c
446 */
447
448int vmw_kms_init(struct vmw_private *dev_priv);
449int vmw_kms_close(struct vmw_private *dev_priv);
450int vmw_kms_save_vga(struct vmw_private *vmw_priv);
451int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
452int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
453 struct drm_file *file_priv);
454void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
455void vmw_kms_cursor_snoop(struct vmw_surface *srf,
456 struct ttm_object_file *tfile,
457 struct ttm_buffer_object *bo,
458 SVGA3dCmdHeader *header);
459
460/**
461 * Overlay control - vmwgfx_overlay.c
462 */
463
464int vmw_overlay_init(struct vmw_private *dev_priv);
465int vmw_overlay_close(struct vmw_private *dev_priv);
466int vmw_overlay_ioctl(struct drm_device *dev, void *data,
467 struct drm_file *file_priv);
468int vmw_overlay_stop_all(struct vmw_private *dev_priv);
469int vmw_overlay_resume_all(struct vmw_private *dev_priv);
470int vmw_overlay_pause_all(struct vmw_private *dev_priv);
471int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
472int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
473int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
474int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
475
476/**
477 * Inline helper functions
478 */
479
480static inline void vmw_surface_unreference(struct vmw_surface **srf)
481{
482 struct vmw_surface *tmp_srf = *srf;
483 struct vmw_resource *res = &tmp_srf->res;
484 *srf = NULL;
485
486 vmw_resource_unreference(&res);
487}
488
489static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
490{
491 (void) vmw_resource_reference(&srf->res);
492 return srf;
493}
494
495static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
496{
497 struct vmw_dma_buffer *tmp_buf = *buf;
498 struct ttm_buffer_object *bo = &tmp_buf->base;
499 *buf = NULL;
500
501 ttm_bo_unref(&bo);
502}
503
504static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
505{
506 if (ttm_bo_reference(&buf->base))
507 return buf;
508 return NULL;
509}
510
511#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
new file mode 100644
index 000000000000..7a39f3e6dc2c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -0,0 +1,516 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
30#include "ttm/ttm_bo_api.h"
31#include "ttm/ttm_placement.h"
32
33static int vmw_cmd_invalid(struct vmw_private *dev_priv,
34 struct vmw_sw_context *sw_context,
35 SVGA3dCmdHeader *header)
36{
37 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
38}
39
40static int vmw_cmd_ok(struct vmw_private *dev_priv,
41 struct vmw_sw_context *sw_context,
42 SVGA3dCmdHeader *header)
43{
44 return 0;
45}
46
47static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
48 struct vmw_sw_context *sw_context,
49 SVGA3dCmdHeader *header)
50{
51 struct vmw_cid_cmd {
52 SVGA3dCmdHeader header;
53 __le32 cid;
54 } *cmd;
55 int ret;
56
57 cmd = container_of(header, struct vmw_cid_cmd, header);
58 if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
59 return 0;
60
61 ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
62 if (unlikely(ret != 0)) {
63 DRM_ERROR("Could not find or use context %u\n",
64 (unsigned) cmd->cid);
65 return ret;
66 }
67
68 sw_context->last_cid = cmd->cid;
69 sw_context->cid_valid = true;
70
71 return 0;
72}
73
74static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
75 struct vmw_sw_context *sw_context,
76 uint32_t sid)
77{
78 if (unlikely((!sw_context->sid_valid || sid != sw_context->last_sid) &&
79 sid != SVGA3D_INVALID_ID)) {
80 int ret = vmw_surface_check(dev_priv, sw_context->tfile, sid);
81
82 if (unlikely(ret != 0)) {
83 DRM_ERROR("Could ot find or use surface %u\n",
84 (unsigned) sid);
85 return ret;
86 }
87
88 sw_context->last_sid = sid;
89 sw_context->sid_valid = true;
90 }
91 return 0;
92}
93
94
95static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
96 struct vmw_sw_context *sw_context,
97 SVGA3dCmdHeader *header)
98{
99 struct vmw_sid_cmd {
100 SVGA3dCmdHeader header;
101 SVGA3dCmdSetRenderTarget body;
102 } *cmd;
103 int ret;
104
105 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
106 if (unlikely(ret != 0))
107 return ret;
108
109 cmd = container_of(header, struct vmw_sid_cmd, header);
110 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.target.sid);
111}
112
113static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
114 struct vmw_sw_context *sw_context,
115 SVGA3dCmdHeader *header)
116{
117 struct vmw_sid_cmd {
118 SVGA3dCmdHeader header;
119 SVGA3dCmdSurfaceCopy body;
120 } *cmd;
121 int ret;
122
123 cmd = container_of(header, struct vmw_sid_cmd, header);
124 ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid);
125 if (unlikely(ret != 0))
126 return ret;
127 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid);
128}
129
130static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
131 struct vmw_sw_context *sw_context,
132 SVGA3dCmdHeader *header)
133{
134 struct vmw_sid_cmd {
135 SVGA3dCmdHeader header;
136 SVGA3dCmdSurfaceStretchBlt body;
137 } *cmd;
138 int ret;
139
140 cmd = container_of(header, struct vmw_sid_cmd, header);
141 ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid);
142 if (unlikely(ret != 0))
143 return ret;
144 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid);
145}
146
147static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
148 struct vmw_sw_context *sw_context,
149 SVGA3dCmdHeader *header)
150{
151 struct vmw_sid_cmd {
152 SVGA3dCmdHeader header;
153 SVGA3dCmdBlitSurfaceToScreen body;
154 } *cmd;
155
156 cmd = container_of(header, struct vmw_sid_cmd, header);
157 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.srcImage.sid);
158}
159
160static int vmw_cmd_present_check(struct vmw_private *dev_priv,
161 struct vmw_sw_context *sw_context,
162 SVGA3dCmdHeader *header)
163{
164 struct vmw_sid_cmd {
165 SVGA3dCmdHeader header;
166 SVGA3dCmdPresent body;
167 } *cmd;
168
169 cmd = container_of(header, struct vmw_sid_cmd, header);
170 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.sid);
171}
172
173static int vmw_cmd_dma(struct vmw_private *dev_priv,
174 struct vmw_sw_context *sw_context,
175 SVGA3dCmdHeader *header)
176{
177 uint32_t handle;
178 struct vmw_dma_buffer *vmw_bo = NULL;
179 struct ttm_buffer_object *bo;
180 struct vmw_surface *srf = NULL;
181 struct vmw_dma_cmd {
182 SVGA3dCmdHeader header;
183 SVGA3dCmdSurfaceDMA dma;
184 } *cmd;
185 struct vmw_relocation *reloc;
186 int ret;
187 uint32_t cur_validate_node;
188 struct ttm_validate_buffer *val_buf;
189
190
191 cmd = container_of(header, struct vmw_dma_cmd, header);
192 ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->dma.host.sid);
193 if (unlikely(ret != 0))
194 return ret;
195
196 handle = cmd->dma.guest.ptr.gmrId;
197 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
198 if (unlikely(ret != 0)) {
199 DRM_ERROR("Could not find or use GMR region.\n");
200 return -EINVAL;
201 }
202 bo = &vmw_bo->base;
203
204 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
205 DRM_ERROR("Max number of DMA commands per submission"
206 " exceeded\n");
207 ret = -EINVAL;
208 goto out_no_reloc;
209 }
210
211 reloc = &sw_context->relocs[sw_context->cur_reloc++];
212 reloc->location = &cmd->dma.guest.ptr;
213
214 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
215 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
216 DRM_ERROR("Max number of DMA buffers per submission"
217 " exceeded.\n");
218 ret = -EINVAL;
219 goto out_no_reloc;
220 }
221
222 reloc->index = cur_validate_node;
223 if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
224 val_buf = &sw_context->val_bufs[cur_validate_node];
225 val_buf->bo = ttm_bo_reference(bo);
226 val_buf->new_sync_obj_arg = (void *) dev_priv;
227 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
228 ++sw_context->cur_val_buf;
229 }
230
231 ret = vmw_user_surface_lookup(dev_priv, sw_context->tfile,
232 cmd->dma.host.sid, &srf);
233 if (ret) {
234 DRM_ERROR("could not find surface\n");
235 goto out_no_reloc;
236 }
237
238 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
239 vmw_surface_unreference(&srf);
240
241out_no_reloc:
242 vmw_dmabuf_unreference(&vmw_bo);
243 return ret;
244}
245
246
247typedef int (*vmw_cmd_func) (struct vmw_private *,
248 struct vmw_sw_context *,
249 SVGA3dCmdHeader *);
250
251#define VMW_CMD_DEF(cmd, func) \
252 [cmd - SVGA_3D_CMD_BASE] = func
253
254static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
255 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
256 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
257 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
258 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
259 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
260 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
261 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
262 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
263 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
264 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
265 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
266 &vmw_cmd_set_render_target_check),
267 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_cid_check),
268 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
269 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
270 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
271 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
272 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
273 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
274 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
275 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
276 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
277 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
278 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
279 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_cid_check),
280 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
281 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
282 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
283 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check),
284 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
285 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
286 &vmw_cmd_blt_surf_screen_check)
287};
288
289static int vmw_cmd_check(struct vmw_private *dev_priv,
290 struct vmw_sw_context *sw_context,
291 void *buf, uint32_t *size)
292{
293 uint32_t cmd_id;
294 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
295 int ret;
296
297 cmd_id = ((uint32_t *)buf)[0];
298 if (cmd_id == SVGA_CMD_UPDATE) {
299 *size = 5 << 2;
300 return 0;
301 }
302
303 cmd_id = le32_to_cpu(header->id);
304 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
305
306 cmd_id -= SVGA_3D_CMD_BASE;
307 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
308 goto out_err;
309
310 ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
311 if (unlikely(ret != 0))
312 goto out_err;
313
314 return 0;
315out_err:
316 DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
317 cmd_id + SVGA_3D_CMD_BASE);
318 return -EINVAL;
319}
320
321static int vmw_cmd_check_all(struct vmw_private *dev_priv,
322 struct vmw_sw_context *sw_context,
323 void *buf, uint32_t size)
324{
325 int32_t cur_size = size;
326 int ret;
327
328 while (cur_size > 0) {
329 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
330 if (unlikely(ret != 0))
331 return ret;
332 buf = (void *)((unsigned long) buf + size);
333 cur_size -= size;
334 }
335
336 if (unlikely(cur_size != 0)) {
337 DRM_ERROR("Command verifier out of sync.\n");
338 return -EINVAL;
339 }
340
341 return 0;
342}
343
344static void vmw_free_relocations(struct vmw_sw_context *sw_context)
345{
346 sw_context->cur_reloc = 0;
347}
348
349static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
350{
351 uint32_t i;
352 struct vmw_relocation *reloc;
353 struct ttm_validate_buffer *validate;
354 struct ttm_buffer_object *bo;
355
356 for (i = 0; i < sw_context->cur_reloc; ++i) {
357 reloc = &sw_context->relocs[i];
358 validate = &sw_context->val_bufs[reloc->index];
359 bo = validate->bo;
360 reloc->location->offset += bo->offset;
361 reloc->location->gmrId = vmw_dmabuf_gmr(bo);
362 }
363 vmw_free_relocations(sw_context);
364}
365
366static void vmw_clear_validations(struct vmw_sw_context *sw_context)
367{
368 struct ttm_validate_buffer *entry, *next;
369
370 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
371 head) {
372 list_del(&entry->head);
373 vmw_dmabuf_validate_clear(entry->bo);
374 ttm_bo_unref(&entry->bo);
375 sw_context->cur_val_buf--;
376 }
377 BUG_ON(sw_context->cur_val_buf != 0);
378}
379
380static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
381 struct ttm_buffer_object *bo)
382{
383 int ret;
384
385 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
386 return 0;
387
388 ret = vmw_gmr_bind(dev_priv, bo);
389 if (likely(ret == 0 || ret == -ERESTART))
390 return ret;
391
392
393 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
394 return ret;
395}
396
397
398static int vmw_validate_buffers(struct vmw_private *dev_priv,
399 struct vmw_sw_context *sw_context)
400{
401 struct ttm_validate_buffer *entry;
402 int ret;
403
404 list_for_each_entry(entry, &sw_context->validate_nodes, head) {
405 ret = vmw_validate_single_buffer(dev_priv, entry->bo);
406 if (unlikely(ret != 0))
407 return ret;
408 }
409 return 0;
410}
411
412int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
413 struct drm_file *file_priv)
414{
415 struct vmw_private *dev_priv = vmw_priv(dev);
416 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
417 struct drm_vmw_fence_rep fence_rep;
418 struct drm_vmw_fence_rep __user *user_fence_rep;
419 int ret;
420 void *user_cmd;
421 void *cmd;
422 uint32_t sequence;
423 struct vmw_sw_context *sw_context = &dev_priv->ctx;
424 struct vmw_master *vmaster = vmw_master(file_priv->master);
425
426 ret = ttm_read_lock(&vmaster->lock, true);
427 if (unlikely(ret != 0))
428 return ret;
429
430 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
431 if (unlikely(ret != 0)) {
432 ret = -ERESTART;
433 goto out_no_cmd_mutex;
434 }
435
436 cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
437 if (unlikely(cmd == NULL)) {
438 DRM_ERROR("Failed reserving fifo space for commands.\n");
439 ret = -ENOMEM;
440 goto out_unlock;
441 }
442
443 user_cmd = (void __user *)(unsigned long)arg->commands;
444 ret = copy_from_user(cmd, user_cmd, arg->command_size);
445
446 if (unlikely(ret != 0)) {
447 DRM_ERROR("Failed copying commands.\n");
448 goto out_commit;
449 }
450
451 sw_context->tfile = vmw_fpriv(file_priv)->tfile;
452 sw_context->cid_valid = false;
453 sw_context->sid_valid = false;
454 sw_context->cur_reloc = 0;
455 sw_context->cur_val_buf = 0;
456
457 INIT_LIST_HEAD(&sw_context->validate_nodes);
458
459 ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
460 if (unlikely(ret != 0))
461 goto out_err;
462 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
463 dev_priv->val_seq++);
464 if (unlikely(ret != 0))
465 goto out_err;
466
467 ret = vmw_validate_buffers(dev_priv, sw_context);
468 if (unlikely(ret != 0))
469 goto out_err;
470
471 vmw_apply_relocations(sw_context);
472 vmw_fifo_commit(dev_priv, arg->command_size);
473
474 ret = vmw_fifo_send_fence(dev_priv, &sequence);
475
476 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
477 (void *)(unsigned long) sequence);
478 vmw_clear_validations(sw_context);
479 mutex_unlock(&dev_priv->cmdbuf_mutex);
480
481 /*
482 * This error is harmless, because if fence submission fails,
483 * vmw_fifo_send_fence will sync.
484 */
485
486 if (ret != 0)
487 DRM_ERROR("Fence submission error. Syncing.\n");
488
489 fence_rep.error = ret;
490 fence_rep.fence_seq = (uint64_t) sequence;
491
492 user_fence_rep = (struct drm_vmw_fence_rep __user *)
493 (unsigned long)arg->fence_rep;
494
495 /*
496 * copy_to_user errors will be detected by user space not
497 * seeing fence_rep::error filled in.
498 */
499
500 ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
501
502 vmw_kms_cursor_post_execbuf(dev_priv);
503 ttm_read_unlock(&vmaster->lock);
504 return 0;
505out_err:
506 vmw_free_relocations(sw_context);
507 ttm_eu_backoff_reservation(&sw_context->validate_nodes);
508 vmw_clear_validations(sw_context);
509out_commit:
510 vmw_fifo_commit(dev_priv, 0);
511out_unlock:
512 mutex_unlock(&dev_priv->cmdbuf_mutex);
513out_no_cmd_mutex:
514 ttm_read_unlock(&vmaster->lock);
515 return ret;
516}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
new file mode 100644
index 000000000000..641dde76ada1
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -0,0 +1,742 @@
1/**************************************************************************
2 *
3 * Copyright © 2007 David Airlie
4 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29#include "drmP.h"
30#include "vmwgfx_drv.h"
31
32#include "ttm/ttm_placement.h"
33
34#define VMW_DIRTY_DELAY (HZ / 30)
35
36struct vmw_fb_par {
37 struct vmw_private *vmw_priv;
38
39 void *vmalloc;
40
41 struct vmw_dma_buffer *vmw_bo;
42 struct ttm_bo_kmap_obj map;
43
44 u32 pseudo_palette[17];
45
46 unsigned depth;
47 unsigned bpp;
48
49 unsigned max_width;
50 unsigned max_height;
51
52 void *bo_ptr;
53 unsigned bo_size;
54 bool bo_iowrite;
55
56 struct {
57 spinlock_t lock;
58 bool active;
59 unsigned x1;
60 unsigned y1;
61 unsigned x2;
62 unsigned y2;
63 } dirty;
64};
65
66static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
67 unsigned blue, unsigned transp,
68 struct fb_info *info)
69{
70 struct vmw_fb_par *par = info->par;
71 u32 *pal = par->pseudo_palette;
72
73 if (regno > 15) {
74 DRM_ERROR("Bad regno %u.\n", regno);
75 return 1;
76 }
77
78 switch (par->depth) {
79 case 24:
80 case 32:
81 pal[regno] = ((red & 0xff00) << 8) |
82 (green & 0xff00) |
83 ((blue & 0xff00) >> 8);
84 break;
85 default:
86 DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
87 return 1;
88 }
89
90 return 0;
91}
92
93static int vmw_fb_check_var(struct fb_var_screeninfo *var,
94 struct fb_info *info)
95{
96 int depth = var->bits_per_pixel;
97 struct vmw_fb_par *par = info->par;
98 struct vmw_private *vmw_priv = par->vmw_priv;
99
100 switch (var->bits_per_pixel) {
101 case 32:
102 depth = (var->transp.length > 0) ? 32 : 24;
103 break;
104 default:
105 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
106 return -EINVAL;
107 }
108
109 switch (depth) {
110 case 24:
111 var->red.offset = 16;
112 var->green.offset = 8;
113 var->blue.offset = 0;
114 var->red.length = 8;
115 var->green.length = 8;
116 var->blue.length = 8;
117 var->transp.length = 0;
118 var->transp.offset = 0;
119 break;
120 case 32:
121 var->red.offset = 16;
122 var->green.offset = 8;
123 var->blue.offset = 0;
124 var->red.length = 8;
125 var->green.length = 8;
126 var->blue.length = 8;
127 var->transp.length = 8;
128 var->transp.offset = 24;
129 break;
130 default:
131 DRM_ERROR("Bad depth %u.\n", depth);
132 return -EINVAL;
133 }
134
135 /* without multimon its hard to resize */
136 if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
137 (var->xres != par->max_width ||
138 var->yres != par->max_height)) {
139 DRM_ERROR("Tried to resize, but we don't have multimon\n");
140 return -EINVAL;
141 }
142
143 if (var->xres > par->max_width ||
144 var->yres > par->max_height) {
145 DRM_ERROR("Requested geom can not fit in framebuffer\n");
146 return -EINVAL;
147 }
148
149 return 0;
150}
151
152static int vmw_fb_set_par(struct fb_info *info)
153{
154 struct vmw_fb_par *par = info->par;
155 struct vmw_private *vmw_priv = par->vmw_priv;
156
157 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
158 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
159 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
160 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
161 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
162 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
163 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
164 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
165 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
166
167 vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
168 vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
169 vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
170 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
171 vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
172 vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
173 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
174 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
175
176 /* TODO check if pitch and offset changes */
177
178 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
179 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
180 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
181 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
182 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
183 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
184 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
185 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
186 } else {
187 vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
188 vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
189
190 /* TODO check if pitch and offset changes */
191 }
192
193 return 0;
194}
195
196static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
197 struct fb_info *info)
198{
199 return 0;
200}
201
202static int vmw_fb_blank(int blank, struct fb_info *info)
203{
204 return 0;
205}
206
207/*
208 * Dirty code
209 */
210
211static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
212{
213 struct vmw_private *vmw_priv = par->vmw_priv;
214 struct fb_info *info = vmw_priv->fb_info;
215 int stride = (info->fix.line_length / 4);
216 int *src = (int *)info->screen_base;
217 __le32 __iomem *vram_mem = par->bo_ptr;
218 unsigned long flags;
219 unsigned x, y, w, h;
220 int i, k;
221 struct {
222 uint32_t header;
223 SVGAFifoCmdUpdate body;
224 } *cmd;
225
226 spin_lock_irqsave(&par->dirty.lock, flags);
227 if (!par->dirty.active) {
228 spin_unlock_irqrestore(&par->dirty.lock, flags);
229 return;
230 }
231 x = par->dirty.x1;
232 y = par->dirty.y1;
233 w = min(par->dirty.x2, info->var.xres) - x;
234 h = min(par->dirty.y2, info->var.yres) - y;
235 par->dirty.x1 = par->dirty.x2 = 0;
236 par->dirty.y1 = par->dirty.y2 = 0;
237 spin_unlock_irqrestore(&par->dirty.lock, flags);
238
239 for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
240 for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
241 iowrite32(src[k], vram_mem + k);
242 }
243
244#if 0
245 DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
246#endif
247
248 cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
249 if (unlikely(cmd == NULL)) {
250 DRM_ERROR("Fifo reserve failed.\n");
251 return;
252 }
253
254 cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
255 cmd->body.x = cpu_to_le32(x);
256 cmd->body.y = cpu_to_le32(y);
257 cmd->body.width = cpu_to_le32(w);
258 cmd->body.height = cpu_to_le32(h);
259 vmw_fifo_commit(vmw_priv, sizeof(*cmd));
260}
261
262static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
263 unsigned x1, unsigned y1,
264 unsigned width, unsigned height)
265{
266 struct fb_info *info = par->vmw_priv->fb_info;
267 unsigned long flags;
268 unsigned x2 = x1 + width;
269 unsigned y2 = y1 + height;
270
271 spin_lock_irqsave(&par->dirty.lock, flags);
272 if (par->dirty.x1 == par->dirty.x2) {
273 par->dirty.x1 = x1;
274 par->dirty.y1 = y1;
275 par->dirty.x2 = x2;
276 par->dirty.y2 = y2;
277 /* if we are active start the dirty work
278 * we share the work with the defio system */
279 if (par->dirty.active)
280 schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
281 } else {
282 if (x1 < par->dirty.x1)
283 par->dirty.x1 = x1;
284 if (y1 < par->dirty.y1)
285 par->dirty.y1 = y1;
286 if (x2 > par->dirty.x2)
287 par->dirty.x2 = x2;
288 if (y2 > par->dirty.y2)
289 par->dirty.y2 = y2;
290 }
291 spin_unlock_irqrestore(&par->dirty.lock, flags);
292}
293
294static void vmw_deferred_io(struct fb_info *info,
295 struct list_head *pagelist)
296{
297 struct vmw_fb_par *par = info->par;
298 unsigned long start, end, min, max;
299 unsigned long flags;
300 struct page *page;
301 int y1, y2;
302
303 min = ULONG_MAX;
304 max = 0;
305 list_for_each_entry(page, pagelist, lru) {
306 start = page->index << PAGE_SHIFT;
307 end = start + PAGE_SIZE - 1;
308 min = min(min, start);
309 max = max(max, end);
310 }
311
312 if (min < max) {
313 y1 = min / info->fix.line_length;
314 y2 = (max / info->fix.line_length) + 1;
315
316 spin_lock_irqsave(&par->dirty.lock, flags);
317 par->dirty.x1 = 0;
318 par->dirty.y1 = y1;
319 par->dirty.x2 = info->var.xres;
320 par->dirty.y2 = y2;
321 spin_unlock_irqrestore(&par->dirty.lock, flags);
322 }
323
324 vmw_fb_dirty_flush(par);
325};
326
327struct fb_deferred_io vmw_defio = {
328 .delay = VMW_DIRTY_DELAY,
329 .deferred_io = vmw_deferred_io,
330};
331
332/*
333 * Draw code
334 */
335
336static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
337{
338 cfb_fillrect(info, rect);
339 vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
340 rect->width, rect->height);
341}
342
343static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
344{
345 cfb_copyarea(info, region);
346 vmw_fb_dirty_mark(info->par, region->dx, region->dy,
347 region->width, region->height);
348}
349
350static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
351{
352 cfb_imageblit(info, image);
353 vmw_fb_dirty_mark(info->par, image->dx, image->dy,
354 image->width, image->height);
355}
356
357/*
358 * Bring up code
359 */
360
361static struct fb_ops vmw_fb_ops = {
362 .owner = THIS_MODULE,
363 .fb_check_var = vmw_fb_check_var,
364 .fb_set_par = vmw_fb_set_par,
365 .fb_setcolreg = vmw_fb_setcolreg,
366 .fb_fillrect = vmw_fb_fillrect,
367 .fb_copyarea = vmw_fb_copyarea,
368 .fb_imageblit = vmw_fb_imageblit,
369 .fb_pan_display = vmw_fb_pan_display,
370 .fb_blank = vmw_fb_blank,
371};
372
373static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
374 size_t size, struct vmw_dma_buffer **out)
375{
376 struct vmw_dma_buffer *vmw_bo;
377 struct ttm_placement ne_placement = vmw_vram_ne_placement;
378 int ret;
379
380 ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
381
382 /* interuptable? */
383 ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
384 if (unlikely(ret != 0))
385 return ret;
386
387 vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
388 if (!vmw_bo)
389 goto err_unlock;
390
391 ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
392 &ne_placement,
393 false,
394 &vmw_dmabuf_bo_free);
395 if (unlikely(ret != 0))
396 goto err_unlock; /* init frees the buffer on failure */
397
398 *out = vmw_bo;
399
400 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
401
402 return 0;
403
404err_unlock:
405 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
406 return ret;
407}
408
409int vmw_fb_init(struct vmw_private *vmw_priv)
410{
411 struct device *device = &vmw_priv->dev->pdev->dev;
412 struct vmw_fb_par *par;
413 struct fb_info *info;
414 unsigned initial_width, initial_height;
415 unsigned fb_width, fb_height;
416 unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
417 int ret;
418
419 initial_width = 800;
420 initial_height = 600;
421
422 fb_bbp = 32;
423 fb_depth = 24;
424
425 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
426 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
427 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
428 } else {
429 fb_width = min(vmw_priv->fb_max_width, initial_width);
430 fb_height = min(vmw_priv->fb_max_height, initial_height);
431 }
432
433 initial_width = min(fb_width, initial_width);
434 initial_height = min(fb_height, initial_height);
435
436 vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
437 vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
438 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
439 vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
440 vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
441 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
442 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
443
444 fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
445 fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
446 fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
447
448 DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
449 DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
450 DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
451 DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
452 DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
453 DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
454 DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
455 DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
456 DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
457 DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
458 DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
459 DRM_DEBUG("fb_pitch %u\n", fb_pitch);
460 DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
461
462 info = framebuffer_alloc(sizeof(*par), device);
463 if (!info)
464 return -ENOMEM;
465
466 /*
467 * Par
468 */
469 vmw_priv->fb_info = info;
470 par = info->par;
471 par->vmw_priv = vmw_priv;
472 par->depth = fb_depth;
473 par->bpp = fb_bbp;
474 par->vmalloc = NULL;
475 par->max_width = fb_width;
476 par->max_height = fb_height;
477
478 /*
479 * Create buffers and alloc memory
480 */
481 par->vmalloc = vmalloc(fb_size);
482 if (unlikely(par->vmalloc == NULL)) {
483 ret = -ENOMEM;
484 goto err_free;
485 }
486
487 ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
488 if (unlikely(ret != 0))
489 goto err_free;
490
491 ret = ttm_bo_kmap(&par->vmw_bo->base,
492 0,
493 par->vmw_bo->base.num_pages,
494 &par->map);
495 if (unlikely(ret != 0))
496 goto err_unref;
497 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
498 par->bo_size = fb_size;
499
500 /*
501 * Fixed and var
502 */
503 strcpy(info->fix.id, "svgadrmfb");
504 info->fix.type = FB_TYPE_PACKED_PIXELS;
505 info->fix.visual = FB_VISUAL_TRUECOLOR;
506 info->fix.type_aux = 0;
507 info->fix.xpanstep = 1; /* doing it in hw */
508 info->fix.ypanstep = 1; /* doing it in hw */
509 info->fix.ywrapstep = 0;
510 info->fix.accel = FB_ACCEL_NONE;
511 info->fix.line_length = fb_pitch;
512
513 info->fix.smem_start = 0;
514 info->fix.smem_len = fb_size;
515
516 info->fix.mmio_start = 0;
517 info->fix.mmio_len = 0;
518
519 info->pseudo_palette = par->pseudo_palette;
520 info->screen_base = par->vmalloc;
521 info->screen_size = fb_size;
522
523 info->flags = FBINFO_DEFAULT;
524 info->fbops = &vmw_fb_ops;
525
526 /* 24 depth per default */
527 info->var.red.offset = 16;
528 info->var.green.offset = 8;
529 info->var.blue.offset = 0;
530 info->var.red.length = 8;
531 info->var.green.length = 8;
532 info->var.blue.length = 8;
533 info->var.transp.offset = 0;
534 info->var.transp.length = 0;
535
536 info->var.xres_virtual = fb_width;
537 info->var.yres_virtual = fb_height;
538 info->var.bits_per_pixel = par->bpp;
539 info->var.xoffset = 0;
540 info->var.yoffset = 0;
541 info->var.activate = FB_ACTIVATE_NOW;
542 info->var.height = -1;
543 info->var.width = -1;
544
545 info->var.xres = initial_width;
546 info->var.yres = initial_height;
547
548#if 0
549 info->pixmap.size = 64*1024;
550 info->pixmap.buf_align = 8;
551 info->pixmap.access_align = 32;
552 info->pixmap.flags = FB_PIXMAP_SYSTEM;
553 info->pixmap.scan_align = 1;
554#else
555 info->pixmap.size = 0;
556 info->pixmap.buf_align = 8;
557 info->pixmap.access_align = 32;
558 info->pixmap.flags = FB_PIXMAP_SYSTEM;
559 info->pixmap.scan_align = 1;
560#endif
561
562 /*
563 * Dirty & Deferred IO
564 */
565 par->dirty.x1 = par->dirty.x2 = 0;
566 par->dirty.y1 = par->dirty.y1 = 0;
567 par->dirty.active = true;
568 spin_lock_init(&par->dirty.lock);
569 info->fbdefio = &vmw_defio;
570 fb_deferred_io_init(info);
571
572 ret = register_framebuffer(info);
573 if (unlikely(ret != 0))
574 goto err_defio;
575
576 return 0;
577
578err_defio:
579 fb_deferred_io_cleanup(info);
580 ttm_bo_kunmap(&par->map);
581err_unref:
582 ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
583err_free:
584 vfree(par->vmalloc);
585 framebuffer_release(info);
586 vmw_priv->fb_info = NULL;
587
588 return ret;
589}
590
591int vmw_fb_close(struct vmw_private *vmw_priv)
592{
593 struct fb_info *info;
594 struct vmw_fb_par *par;
595 struct ttm_buffer_object *bo;
596
597 if (!vmw_priv->fb_info)
598 return 0;
599
600 info = vmw_priv->fb_info;
601 par = info->par;
602 bo = &par->vmw_bo->base;
603 par->vmw_bo = NULL;
604
605 /* ??? order */
606 fb_deferred_io_cleanup(info);
607 unregister_framebuffer(info);
608
609 ttm_bo_kunmap(&par->map);
610 ttm_bo_unref(&bo);
611
612 vfree(par->vmalloc);
613 framebuffer_release(info);
614
615 return 0;
616}
617
618int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
619 struct vmw_dma_buffer *vmw_bo)
620{
621 struct ttm_buffer_object *bo = &vmw_bo->base;
622 int ret = 0;
623
624 ret = ttm_bo_reserve(bo, false, false, false, 0);
625 if (unlikely(ret != 0))
626 return ret;
627
628 ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
629 ttm_bo_unreserve(bo);
630
631 return ret;
632}
633
634int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
635 struct vmw_dma_buffer *vmw_bo)
636{
637 struct ttm_buffer_object *bo = &vmw_bo->base;
638 struct ttm_placement ne_placement = vmw_vram_ne_placement;
639 int ret = 0;
640
641 ne_placement.lpfn = bo->num_pages;
642
643 /* interuptable? */
644 ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
645 if (unlikely(ret != 0))
646 return ret;
647
648 ret = ttm_bo_reserve(bo, false, false, false, 0);
649 if (unlikely(ret != 0))
650 goto err_unlock;
651
652 if (vmw_bo->gmr_bound) {
653 vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id);
654 spin_lock(&bo->glob->lru_lock);
655 ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id);
656 spin_unlock(&bo->glob->lru_lock);
657 vmw_bo->gmr_bound = NULL;
658 }
659
660 ret = ttm_bo_validate(bo, &ne_placement, false, false);
661 ttm_bo_unreserve(bo);
662err_unlock:
663 ttm_write_unlock(&vmw_priv->active_master->lock);
664
665 return ret;
666}
667
668int vmw_fb_off(struct vmw_private *vmw_priv)
669{
670 struct fb_info *info;
671 struct vmw_fb_par *par;
672 unsigned long flags;
673
674 if (!vmw_priv->fb_info)
675 return -EINVAL;
676
677 info = vmw_priv->fb_info;
678 par = info->par;
679
680 spin_lock_irqsave(&par->dirty.lock, flags);
681 par->dirty.active = false;
682 spin_unlock_irqrestore(&par->dirty.lock, flags);
683
684 flush_scheduled_work();
685
686 par->bo_ptr = NULL;
687 ttm_bo_kunmap(&par->map);
688
689 vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
690
691 return 0;
692}
693
694int vmw_fb_on(struct vmw_private *vmw_priv)
695{
696 struct fb_info *info;
697 struct vmw_fb_par *par;
698 unsigned long flags;
699 bool dummy;
700 int ret;
701
702 if (!vmw_priv->fb_info)
703 return -EINVAL;
704
705 info = vmw_priv->fb_info;
706 par = info->par;
707
708 /* we are already active */
709 if (par->bo_ptr != NULL)
710 return 0;
711
712 /* Make sure that all overlays are stoped when we take over */
713 vmw_overlay_stop_all(vmw_priv);
714
715 ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
716 if (unlikely(ret != 0)) {
717 DRM_ERROR("could not move buffer to start of VRAM\n");
718 goto err_no_buffer;
719 }
720
721 ret = ttm_bo_kmap(&par->vmw_bo->base,
722 0,
723 par->vmw_bo->base.num_pages,
724 &par->map);
725 BUG_ON(ret != 0);
726 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
727
728 spin_lock_irqsave(&par->dirty.lock, flags);
729 par->dirty.active = true;
730 spin_unlock_irqrestore(&par->dirty.lock, flags);
731
732err_no_buffer:
733 vmw_fb_set_par(info);
734
735 vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
736
737 /* If there already was stuff dirty we wont
738 * schedule a new work, so lets do it now */
739 schedule_delayed_work(&info->deferred_work, 0);
740
741 return 0;
742}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
new file mode 100644
index 000000000000..76b0693e2458
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -0,0 +1,521 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "drmP.h"
30#include "ttm/ttm_placement.h"
31
32int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
33{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t max;
36 uint32_t min;
37 uint32_t dummy;
38 int ret;
39
40 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
41 fifo->static_buffer = vmalloc(fifo->static_buffer_size);
42 if (unlikely(fifo->static_buffer == NULL))
43 return -ENOMEM;
44
45 fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
46 fifo->last_data_size = 0;
47 fifo->last_buffer_add = false;
48 fifo->last_buffer = vmalloc(fifo->last_buffer_size);
49 if (unlikely(fifo->last_buffer == NULL)) {
50 ret = -ENOMEM;
51 goto out_err;
52 }
53
54 fifo->dynamic_buffer = NULL;
55 fifo->reserved_size = 0;
56 fifo->using_bounce_buffer = false;
57
58 init_rwsem(&fifo->rwsem);
59
60 /*
61 * Allow mapping the first page read-only to user-space.
62 */
63
64 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
65 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
66 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
67
68 mutex_lock(&dev_priv->hw_mutex);
69 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
70 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
71 vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
72
73 min = 4;
74 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
75 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
76 min <<= 2;
77
78 if (min < PAGE_SIZE)
79 min = PAGE_SIZE;
80
81 iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
82 iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
83 wmb();
84 iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
85 iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
86 iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
87 mb();
88
89 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
90 mutex_unlock(&dev_priv->hw_mutex);
91
92 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
93 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
94 fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
95
96 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
97 (unsigned int) max,
98 (unsigned int) min,
99 (unsigned int) fifo->capabilities);
100
101 dev_priv->fence_seq = (uint32_t) -100;
102 dev_priv->last_read_sequence = (uint32_t) -100;
103 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
104
105 return vmw_fifo_send_fence(dev_priv, &dummy);
106out_err:
107 vfree(fifo->static_buffer);
108 fifo->static_buffer = NULL;
109 return ret;
110}
111
112void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
113{
114 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
115
116 mutex_lock(&dev_priv->hw_mutex);
117
118 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
119 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
120 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
121 }
122
123 mutex_unlock(&dev_priv->hw_mutex);
124}
125
126void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
127{
128 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
129
130 mutex_lock(&dev_priv->hw_mutex);
131
132 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
133 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
134
135 dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
136
137 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
138 dev_priv->config_done_state);
139 vmw_write(dev_priv, SVGA_REG_ENABLE,
140 dev_priv->enable_state);
141
142 mutex_unlock(&dev_priv->hw_mutex);
143
144 if (likely(fifo->last_buffer != NULL)) {
145 vfree(fifo->last_buffer);
146 fifo->last_buffer = NULL;
147 }
148
149 if (likely(fifo->static_buffer != NULL)) {
150 vfree(fifo->static_buffer);
151 fifo->static_buffer = NULL;
152 }
153
154 if (likely(fifo->dynamic_buffer != NULL)) {
155 vfree(fifo->dynamic_buffer);
156 fifo->dynamic_buffer = NULL;
157 }
158}
159
160static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
161{
162 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
163 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
164 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
165 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
166 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
167
168 return ((max - next_cmd) + (stop - min) <= bytes);
169}
170
171static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
172 uint32_t bytes, bool interruptible,
173 unsigned long timeout)
174{
175 int ret = 0;
176 unsigned long end_jiffies = jiffies + timeout;
177 DEFINE_WAIT(__wait);
178
179 DRM_INFO("Fifo wait noirq.\n");
180
181 for (;;) {
182 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
183 (interruptible) ?
184 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
185 if (!vmw_fifo_is_full(dev_priv, bytes))
186 break;
187 if (time_after_eq(jiffies, end_jiffies)) {
188 ret = -EBUSY;
189 DRM_ERROR("SVGA device lockup.\n");
190 break;
191 }
192 schedule_timeout(1);
193 if (interruptible && signal_pending(current)) {
194 ret = -ERESTART;
195 break;
196 }
197 }
198 finish_wait(&dev_priv->fifo_queue, &__wait);
199 wake_up_all(&dev_priv->fifo_queue);
200 DRM_INFO("Fifo noirq exit.\n");
201 return ret;
202}
203
204static int vmw_fifo_wait(struct vmw_private *dev_priv,
205 uint32_t bytes, bool interruptible,
206 unsigned long timeout)
207{
208 long ret = 1L;
209 unsigned long irq_flags;
210
211 if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
212 return 0;
213
214 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
215 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
216 return vmw_fifo_wait_noirq(dev_priv, bytes,
217 interruptible, timeout);
218
219 mutex_lock(&dev_priv->hw_mutex);
220 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
221 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
222 outl(SVGA_IRQFLAG_FIFO_PROGRESS,
223 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
224 vmw_write(dev_priv, SVGA_REG_IRQMASK,
225 vmw_read(dev_priv, SVGA_REG_IRQMASK) |
226 SVGA_IRQFLAG_FIFO_PROGRESS);
227 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
228 }
229 mutex_unlock(&dev_priv->hw_mutex);
230
231 if (interruptible)
232 ret = wait_event_interruptible_timeout
233 (dev_priv->fifo_queue,
234 !vmw_fifo_is_full(dev_priv, bytes), timeout);
235 else
236 ret = wait_event_timeout
237 (dev_priv->fifo_queue,
238 !vmw_fifo_is_full(dev_priv, bytes), timeout);
239
240 if (unlikely(ret == -ERESTARTSYS))
241 ret = -ERESTART;
242 else if (unlikely(ret == 0))
243 ret = -EBUSY;
244 else if (likely(ret > 0))
245 ret = 0;
246
247 mutex_lock(&dev_priv->hw_mutex);
248 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
249 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
250 vmw_write(dev_priv, SVGA_REG_IRQMASK,
251 vmw_read(dev_priv, SVGA_REG_IRQMASK) &
252 ~SVGA_IRQFLAG_FIFO_PROGRESS);
253 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
254 }
255 mutex_unlock(&dev_priv->hw_mutex);
256
257 return ret;
258}
259
260void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
261{
262 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
263 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
264 uint32_t max;
265 uint32_t min;
266 uint32_t next_cmd;
267 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
268 int ret;
269
270 down_write(&fifo_state->rwsem);
271 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
272 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
273 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
274
275 if (unlikely(bytes >= (max - min)))
276 goto out_err;
277
278 BUG_ON(fifo_state->reserved_size != 0);
279 BUG_ON(fifo_state->dynamic_buffer != NULL);
280
281 fifo_state->reserved_size = bytes;
282
283 while (1) {
284 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
285 bool need_bounce = false;
286 bool reserve_in_place = false;
287
288 if (next_cmd >= stop) {
289 if (likely((next_cmd + bytes < max ||
290 (next_cmd + bytes == max && stop > min))))
291 reserve_in_place = true;
292
293 else if (vmw_fifo_is_full(dev_priv, bytes)) {
294 ret = vmw_fifo_wait(dev_priv, bytes,
295 false, 3 * HZ);
296 if (unlikely(ret != 0))
297 goto out_err;
298 } else
299 need_bounce = true;
300
301 } else {
302
303 if (likely((next_cmd + bytes < stop)))
304 reserve_in_place = true;
305 else {
306 ret = vmw_fifo_wait(dev_priv, bytes,
307 false, 3 * HZ);
308 if (unlikely(ret != 0))
309 goto out_err;
310 }
311 }
312
313 if (reserve_in_place) {
314 if (reserveable || bytes <= sizeof(uint32_t)) {
315 fifo_state->using_bounce_buffer = false;
316
317 if (reserveable)
318 iowrite32(bytes, fifo_mem +
319 SVGA_FIFO_RESERVED);
320 return fifo_mem + (next_cmd >> 2);
321 } else {
322 need_bounce = true;
323 }
324 }
325
326 if (need_bounce) {
327 fifo_state->using_bounce_buffer = true;
328 if (bytes < fifo_state->static_buffer_size)
329 return fifo_state->static_buffer;
330 else {
331 fifo_state->dynamic_buffer = vmalloc(bytes);
332 return fifo_state->dynamic_buffer;
333 }
334 }
335 }
336out_err:
337 fifo_state->reserved_size = 0;
338 up_write(&fifo_state->rwsem);
339 return NULL;
340}
341
342static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
343 __le32 __iomem *fifo_mem,
344 uint32_t next_cmd,
345 uint32_t max, uint32_t min, uint32_t bytes)
346{
347 uint32_t chunk_size = max - next_cmd;
348 uint32_t rest;
349 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
350 fifo_state->dynamic_buffer : fifo_state->static_buffer;
351
352 if (bytes < chunk_size)
353 chunk_size = bytes;
354
355 iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
356 mb();
357 memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
358 rest = bytes - chunk_size;
359 if (rest)
360 memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
361 rest);
362}
363
364static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
365 __le32 __iomem *fifo_mem,
366 uint32_t next_cmd,
367 uint32_t max, uint32_t min, uint32_t bytes)
368{
369 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
370 fifo_state->dynamic_buffer : fifo_state->static_buffer;
371
372 while (bytes > 0) {
373 iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
374 next_cmd += sizeof(uint32_t);
375 if (unlikely(next_cmd == max))
376 next_cmd = min;
377 mb();
378 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
379 mb();
380 bytes -= sizeof(uint32_t);
381 }
382}
383
384void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
385{
386 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
387 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
388 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
389 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
390 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
391 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
392
393 BUG_ON((bytes & 3) != 0);
394 BUG_ON(bytes > fifo_state->reserved_size);
395
396 fifo_state->reserved_size = 0;
397
398 if (fifo_state->using_bounce_buffer) {
399 if (reserveable)
400 vmw_fifo_res_copy(fifo_state, fifo_mem,
401 next_cmd, max, min, bytes);
402 else
403 vmw_fifo_slow_copy(fifo_state, fifo_mem,
404 next_cmd, max, min, bytes);
405
406 if (fifo_state->dynamic_buffer) {
407 vfree(fifo_state->dynamic_buffer);
408 fifo_state->dynamic_buffer = NULL;
409 }
410
411 }
412
413 if (fifo_state->using_bounce_buffer || reserveable) {
414 next_cmd += bytes;
415 if (next_cmd >= max)
416 next_cmd -= max - min;
417 mb();
418 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
419 }
420
421 if (reserveable)
422 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
423 mb();
424 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
425 up_write(&fifo_state->rwsem);
426}
427
428int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
429{
430 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
431 struct svga_fifo_cmd_fence *cmd_fence;
432 void *fm;
433 int ret = 0;
434 uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
435
436 fm = vmw_fifo_reserve(dev_priv, bytes);
437 if (unlikely(fm == NULL)) {
438 down_write(&fifo_state->rwsem);
439 *sequence = dev_priv->fence_seq;
440 up_write(&fifo_state->rwsem);
441 ret = -ENOMEM;
442 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
443 false, 3*HZ);
444 goto out_err;
445 }
446
447 do {
448 *sequence = dev_priv->fence_seq++;
449 } while (*sequence == 0);
450
451 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
452
453 /*
454 * Don't request hardware to send a fence. The
455 * waiting code in vmwgfx_irq.c will emulate this.
456 */
457
458 vmw_fifo_commit(dev_priv, 0);
459 return 0;
460 }
461
462 *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
463 cmd_fence = (struct svga_fifo_cmd_fence *)
464 ((unsigned long)fm + sizeof(__le32));
465
466 iowrite32(*sequence, &cmd_fence->fence);
467 fifo_state->last_buffer_add = true;
468 vmw_fifo_commit(dev_priv, bytes);
469 fifo_state->last_buffer_add = false;
470
471out_err:
472 return ret;
473}
474
475/**
476 * Map the first page of the FIFO read-only to user-space.
477 */
478
479static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
480{
481 int ret;
482 unsigned long address = (unsigned long)vmf->virtual_address;
483
484 if (address != vma->vm_start)
485 return VM_FAULT_SIGBUS;
486
487 ret = vm_insert_pfn(vma, address, vma->vm_pgoff);
488 if (likely(ret == -EBUSY || ret == 0))
489 return VM_FAULT_NOPAGE;
490 else if (ret == -ENOMEM)
491 return VM_FAULT_OOM;
492
493 return VM_FAULT_SIGBUS;
494}
495
496static struct vm_operations_struct vmw_fifo_vm_ops = {
497 .fault = vmw_fifo_vm_fault,
498 .open = NULL,
499 .close = NULL
500};
501
502int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma)
503{
504 struct drm_file *file_priv;
505 struct vmw_private *dev_priv;
506
507 file_priv = (struct drm_file *)filp->private_data;
508 dev_priv = vmw_priv(file_priv->minor->dev);
509
510 if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||
511 (vma->vm_end - vma->vm_start) != PAGE_SIZE)
512 return -EINVAL;
513
514 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
515 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED;
516 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
517 vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED,
518 vma->vm_page_prot);
519 vma->vm_ops = &vmw_fifo_vm_ops;
520 return 0;
521}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
new file mode 100644
index 000000000000..5f8908a5d7fd
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -0,0 +1,213 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "drmP.h"
30#include "ttm/ttm_bo_driver.h"
31
32/**
33 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
34 * the number of used descriptors.
35 */
36
37static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
38 struct page *pages[],
39 unsigned long num_pages)
40{
41 struct page *page, *next;
42 struct svga_guest_mem_descriptor *page_virtual = NULL;
43 struct svga_guest_mem_descriptor *desc_virtual = NULL;
44 unsigned int desc_per_page;
45 unsigned long prev_pfn;
46 unsigned long pfn;
47 int ret;
48
49 desc_per_page = PAGE_SIZE /
50 sizeof(struct svga_guest_mem_descriptor) - 1;
51
52 while (likely(num_pages != 0)) {
53 page = alloc_page(__GFP_HIGHMEM);
54 if (unlikely(page == NULL)) {
55 ret = -ENOMEM;
56 goto out_err;
57 }
58
59 list_add_tail(&page->lru, desc_pages);
60
61 /*
62 * Point previous page terminating descriptor to this
63 * page before unmapping it.
64 */
65
66 if (likely(page_virtual != NULL)) {
67 desc_virtual->ppn = page_to_pfn(page);
68 kunmap_atomic(page_virtual, KM_USER0);
69 }
70
71 page_virtual = kmap_atomic(page, KM_USER0);
72 desc_virtual = page_virtual - 1;
73 prev_pfn = ~(0UL);
74
75 while (likely(num_pages != 0)) {
76 pfn = page_to_pfn(*pages);
77
78 if (pfn != prev_pfn + 1) {
79
80 if (desc_virtual - page_virtual ==
81 desc_per_page - 1)
82 break;
83
84 (++desc_virtual)->ppn = cpu_to_le32(pfn);
85 desc_virtual->num_pages = cpu_to_le32(1);
86 } else {
87 uint32_t tmp =
88 le32_to_cpu(desc_virtual->num_pages);
89 desc_virtual->num_pages = cpu_to_le32(tmp + 1);
90 }
91 prev_pfn = pfn;
92 --num_pages;
93 ++pages;
94 }
95
96 (++desc_virtual)->ppn = cpu_to_le32(0);
97 desc_virtual->num_pages = cpu_to_le32(0);
98 }
99
100 if (likely(page_virtual != NULL))
101 kunmap_atomic(page_virtual, KM_USER0);
102
103 return 0;
104out_err:
105 list_for_each_entry_safe(page, next, desc_pages, lru) {
106 list_del_init(&page->lru);
107 __free_page(page);
108 }
109 return ret;
110}
111
112static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
113{
114 struct page *page, *next;
115
116 list_for_each_entry_safe(page, next, desc_pages, lru) {
117 list_del_init(&page->lru);
118 __free_page(page);
119 }
120}
121
122static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
123 int gmr_id, struct list_head *desc_pages)
124{
125 struct page *page;
126
127 if (unlikely(list_empty(desc_pages)))
128 return;
129
130 page = list_entry(desc_pages->next, struct page, lru);
131
132 mutex_lock(&dev_priv->hw_mutex);
133
134 vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
135 wmb();
136 vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
137 mb();
138
139 mutex_unlock(&dev_priv->hw_mutex);
140
141}
142
143/**
144 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
145 * the number of used descriptors.
146 */
147
148static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
149 unsigned long num_pages)
150{
151 unsigned long prev_pfn = ~(0UL);
152 unsigned long pfn;
153 unsigned long descriptors = 0;
154
155 while (num_pages--) {
156 pfn = page_to_pfn(*pages++);
157 if (prev_pfn + 1 != pfn)
158 ++descriptors;
159 prev_pfn = pfn;
160 }
161
162 return descriptors;
163}
164
165int vmw_gmr_bind(struct vmw_private *dev_priv,
166 struct ttm_buffer_object *bo)
167{
168 struct ttm_tt *ttm = bo->ttm;
169 unsigned long descriptors;
170 int ret;
171 uint32_t id;
172 struct list_head desc_pages;
173
174 if (!(dev_priv->capabilities & SVGA_CAP_GMR))
175 return -EINVAL;
176
177 ret = ttm_tt_populate(ttm);
178 if (unlikely(ret != 0))
179 return ret;
180
181 descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
182 if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
183 return -EINVAL;
184
185 INIT_LIST_HEAD(&desc_pages);
186 ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
187 ttm->num_pages);
188 if (unlikely(ret != 0))
189 return ret;
190
191 ret = vmw_gmr_id_alloc(dev_priv, &id);
192 if (unlikely(ret != 0))
193 goto out_no_id;
194
195 vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages);
196 vmw_gmr_free_descriptors(&desc_pages);
197 vmw_dmabuf_set_gmr(bo, id);
198 return 0;
199
200out_no_id:
201 vmw_gmr_free_descriptors(&desc_pages);
202 return ret;
203}
204
205void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
206{
207 mutex_lock(&dev_priv->hw_mutex);
208 vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
209 wmb();
210 vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
211 mb();
212 mutex_unlock(&dev_priv->hw_mutex);
213}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
new file mode 100644
index 000000000000..5fa6a4ed238a
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -0,0 +1,81 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_drm.h"
30
31int vmw_getparam_ioctl(struct drm_device *dev, void *data,
32 struct drm_file *file_priv)
33{
34 struct vmw_private *dev_priv = vmw_priv(dev);
35 struct drm_vmw_getparam_arg *param =
36 (struct drm_vmw_getparam_arg *)data;
37
38 switch (param->param) {
39 case DRM_VMW_PARAM_NUM_STREAMS:
40 param->value = vmw_overlay_num_overlays(dev_priv);
41 break;
42 case DRM_VMW_PARAM_NUM_FREE_STREAMS:
43 param->value = vmw_overlay_num_free_overlays(dev_priv);
44 break;
45 case DRM_VMW_PARAM_3D:
46 param->value = dev_priv->capabilities & SVGA_CAP_3D ? 1 : 0;
47 break;
48 case DRM_VMW_PARAM_FIFO_OFFSET:
49 param->value = dev_priv->mmio_start;
50 break;
51 default:
52 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
53 param->param);
54 return -EINVAL;
55 }
56
57 return 0;
58}
59
60int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
61 struct drm_file *file_priv)
62{
63 struct vmw_private *dev_priv = vmw_priv(dev);
64 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
65 struct drm_vmw_fifo_debug_arg *arg =
66 (struct drm_vmw_fifo_debug_arg *)data;
67 __le32 __user *buffer = (__le32 __user *)
68 (unsigned long)arg->debug_buffer;
69
70 if (unlikely(fifo_state->last_buffer == NULL))
71 return -EINVAL;
72
73 if (arg->debug_buffer_size < fifo_state->last_data_size) {
74 arg->used_size = arg->debug_buffer_size;
75 arg->did_not_fit = 1;
76 } else {
77 arg->used_size = fifo_state->last_data_size;
78 arg->did_not_fit = 0;
79 }
80 return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size);
81}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
new file mode 100644
index 000000000000..9e0f0306eedb
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -0,0 +1,295 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "drmP.h"
29#include "vmwgfx_drv.h"
30
31#define VMW_FENCE_WRAP (1 << 24)
32
33irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
34{
35 struct drm_device *dev = (struct drm_device *)arg;
36 struct vmw_private *dev_priv = vmw_priv(dev);
37 uint32_t status;
38
39 spin_lock(&dev_priv->irq_lock);
40 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
41 spin_unlock(&dev_priv->irq_lock);
42
43 if (status & SVGA_IRQFLAG_ANY_FENCE)
44 wake_up_all(&dev_priv->fence_queue);
45 if (status & SVGA_IRQFLAG_FIFO_PROGRESS)
46 wake_up_all(&dev_priv->fifo_queue);
47
48 if (likely(status)) {
49 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
50 return IRQ_HANDLED;
51 }
52
53 return IRQ_NONE;
54}
55
56static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
57{
58 uint32_t busy;
59
60 mutex_lock(&dev_priv->hw_mutex);
61 busy = vmw_read(dev_priv, SVGA_REG_BUSY);
62 mutex_unlock(&dev_priv->hw_mutex);
63
64 return (busy == 0);
65}
66
67
68bool vmw_fence_signaled(struct vmw_private *dev_priv,
69 uint32_t sequence)
70{
71 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
72 struct vmw_fifo_state *fifo_state;
73 bool ret;
74
75 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
76 return true;
77
78 dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
79 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
80 return true;
81
82 fifo_state = &dev_priv->fifo;
83 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
84 vmw_fifo_idle(dev_priv, sequence))
85 return true;
86
87 /**
88 * Below is to signal stale fences that have wrapped.
89 * First, block fence submission.
90 */
91
92 down_read(&fifo_state->rwsem);
93
94 /**
95 * Then check if the sequence is higher than what we've actually
96 * emitted. Then the fence is stale and signaled.
97 */
98
99 ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP);
100 up_read(&fifo_state->rwsem);
101
102 return ret;
103}
104
105int vmw_fallback_wait(struct vmw_private *dev_priv,
106 bool lazy,
107 bool fifo_idle,
108 uint32_t sequence,
109 bool interruptible,
110 unsigned long timeout)
111{
112 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
113
114 uint32_t count = 0;
115 uint32_t signal_seq;
116 int ret;
117 unsigned long end_jiffies = jiffies + timeout;
118 bool (*wait_condition)(struct vmw_private *, uint32_t);
119 DEFINE_WAIT(__wait);
120
121 wait_condition = (fifo_idle) ? &vmw_fifo_idle :
122 &vmw_fence_signaled;
123
124 /**
125 * Block command submission while waiting for idle.
126 */
127
128 if (fifo_idle)
129 down_read(&fifo_state->rwsem);
130 signal_seq = dev_priv->fence_seq;
131 ret = 0;
132
133 for (;;) {
134 prepare_to_wait(&dev_priv->fence_queue, &__wait,
135 (interruptible) ?
136 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
137 if (wait_condition(dev_priv, sequence))
138 break;
139 if (time_after_eq(jiffies, end_jiffies)) {
140 DRM_ERROR("SVGA device lockup.\n");
141 break;
142 }
143 if (lazy)
144 schedule_timeout(1);
145 else if ((++count & 0x0F) == 0) {
146 /**
147 * FIXME: Use schedule_hr_timeout here for
148 * newer kernels and lower CPU utilization.
149 */
150
151 __set_current_state(TASK_RUNNING);
152 schedule();
153 __set_current_state((interruptible) ?
154 TASK_INTERRUPTIBLE :
155 TASK_UNINTERRUPTIBLE);
156 }
157 if (interruptible && signal_pending(current)) {
158 ret = -ERESTART;
159 break;
160 }
161 }
162 finish_wait(&dev_priv->fence_queue, &__wait);
163 if (ret == 0 && fifo_idle) {
164 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
165 iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
166 }
167 wake_up_all(&dev_priv->fence_queue);
168 if (fifo_idle)
169 up_read(&fifo_state->rwsem);
170
171 return ret;
172}
173
174int vmw_wait_fence(struct vmw_private *dev_priv,
175 bool lazy, uint32_t sequence,
176 bool interruptible, unsigned long timeout)
177{
178 long ret;
179 unsigned long irq_flags;
180 struct vmw_fifo_state *fifo = &dev_priv->fifo;
181
182 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
183 return 0;
184
185 if (likely(vmw_fence_signaled(dev_priv, sequence)))
186 return 0;
187
188 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
189
190 if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
191 return vmw_fallback_wait(dev_priv, lazy, true, sequence,
192 interruptible, timeout);
193
194 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
195 return vmw_fallback_wait(dev_priv, lazy, false, sequence,
196 interruptible, timeout);
197
198 mutex_lock(&dev_priv->hw_mutex);
199 if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) {
200 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
201 outl(SVGA_IRQFLAG_ANY_FENCE,
202 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
203 vmw_write(dev_priv, SVGA_REG_IRQMASK,
204 vmw_read(dev_priv, SVGA_REG_IRQMASK) |
205 SVGA_IRQFLAG_ANY_FENCE);
206 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
207 }
208 mutex_unlock(&dev_priv->hw_mutex);
209
210 if (interruptible)
211 ret = wait_event_interruptible_timeout
212 (dev_priv->fence_queue,
213 vmw_fence_signaled(dev_priv, sequence),
214 timeout);
215 else
216 ret = wait_event_timeout
217 (dev_priv->fence_queue,
218 vmw_fence_signaled(dev_priv, sequence),
219 timeout);
220
221 if (unlikely(ret == -ERESTARTSYS))
222 ret = -ERESTART;
223 else if (unlikely(ret == 0))
224 ret = -EBUSY;
225 else if (likely(ret > 0))
226 ret = 0;
227
228 mutex_lock(&dev_priv->hw_mutex);
229 if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) {
230 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
231 vmw_write(dev_priv, SVGA_REG_IRQMASK,
232 vmw_read(dev_priv, SVGA_REG_IRQMASK) &
233 ~SVGA_IRQFLAG_ANY_FENCE);
234 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
235 }
236 mutex_unlock(&dev_priv->hw_mutex);
237
238 return ret;
239}
240
241void vmw_irq_preinstall(struct drm_device *dev)
242{
243 struct vmw_private *dev_priv = vmw_priv(dev);
244 uint32_t status;
245
246 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
247 return;
248
249 spin_lock_init(&dev_priv->irq_lock);
250 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
251 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
252}
253
254int vmw_irq_postinstall(struct drm_device *dev)
255{
256 return 0;
257}
258
259void vmw_irq_uninstall(struct drm_device *dev)
260{
261 struct vmw_private *dev_priv = vmw_priv(dev);
262 uint32_t status;
263
264 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
265 return;
266
267 mutex_lock(&dev_priv->hw_mutex);
268 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
269 mutex_unlock(&dev_priv->hw_mutex);
270
271 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
272 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
273}
274
275#define VMW_FENCE_WAIT_TIMEOUT 3*HZ;
276
277int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
278 struct drm_file *file_priv)
279{
280 struct drm_vmw_fence_wait_arg *arg =
281 (struct drm_vmw_fence_wait_arg *)data;
282 unsigned long timeout;
283
284 if (!arg->cookie_valid) {
285 arg->cookie_valid = 1;
286 arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT;
287 }
288
289 timeout = jiffies;
290 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie))
291 return -EBUSY;
292
293 timeout = (unsigned long)arg->kernel_cookie - timeout;
294 return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout);
295}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
new file mode 100644
index 000000000000..e9403be446fe
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -0,0 +1,872 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_kms.h"
29
30/* Might need a hrtimer here? */
31#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
32
33
34void vmw_display_unit_cleanup(struct vmw_display_unit *du)
35{
36 if (du->cursor_surface)
37 vmw_surface_unreference(&du->cursor_surface);
38 if (du->cursor_dmabuf)
39 vmw_dmabuf_unreference(&du->cursor_dmabuf);
40 drm_crtc_cleanup(&du->crtc);
41 drm_encoder_cleanup(&du->encoder);
42 drm_connector_cleanup(&du->connector);
43}
44
45/*
46 * Display Unit Cursor functions
47 */
48
49int vmw_cursor_update_image(struct vmw_private *dev_priv,
50 u32 *image, u32 width, u32 height,
51 u32 hotspotX, u32 hotspotY)
52{
53 struct {
54 u32 cmd;
55 SVGAFifoCmdDefineAlphaCursor cursor;
56 } *cmd;
57 u32 image_size = width * height * 4;
58 u32 cmd_size = sizeof(*cmd) + image_size;
59
60 if (!image)
61 return -EINVAL;
62
63 cmd = vmw_fifo_reserve(dev_priv, cmd_size);
64 if (unlikely(cmd == NULL)) {
65 DRM_ERROR("Fifo reserve failed.\n");
66 return -ENOMEM;
67 }
68
69 memset(cmd, 0, sizeof(*cmd));
70
71 memcpy(&cmd[1], image, image_size);
72
73 cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
74 cmd->cursor.id = cpu_to_le32(0);
75 cmd->cursor.width = cpu_to_le32(width);
76 cmd->cursor.height = cpu_to_le32(height);
77 cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
78 cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
79
80 vmw_fifo_commit(dev_priv, cmd_size);
81
82 return 0;
83}
84
85void vmw_cursor_update_position(struct vmw_private *dev_priv,
86 bool show, int x, int y)
87{
88 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
89 uint32_t count;
90
91 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
92 iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
93 iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
94 count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
95 iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
96}
97
98int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
99 uint32_t handle, uint32_t width, uint32_t height)
100{
101 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
102 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
103 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
104 struct vmw_surface *surface = NULL;
105 struct vmw_dma_buffer *dmabuf = NULL;
106 int ret;
107
108 if (handle) {
109 ret = vmw_user_surface_lookup(dev_priv, tfile,
110 handle, &surface);
111 if (!ret) {
112 if (!surface->snooper.image) {
113 DRM_ERROR("surface not suitable for cursor\n");
114 return -EINVAL;
115 }
116 } else {
117 ret = vmw_user_dmabuf_lookup(tfile,
118 handle, &dmabuf);
119 if (ret) {
120 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
121 return -EINVAL;
122 }
123 }
124 }
125
126 /* takedown old cursor */
127 if (du->cursor_surface) {
128 du->cursor_surface->snooper.crtc = NULL;
129 vmw_surface_unreference(&du->cursor_surface);
130 }
131 if (du->cursor_dmabuf)
132 vmw_dmabuf_unreference(&du->cursor_dmabuf);
133
134 /* setup new image */
135 if (surface) {
136 /* vmw_user_surface_lookup takes one reference */
137 du->cursor_surface = surface;
138
139 du->cursor_surface->snooper.crtc = crtc;
140 du->cursor_age = du->cursor_surface->snooper.age;
141 vmw_cursor_update_image(dev_priv, surface->snooper.image,
142 64, 64, du->hotspot_x, du->hotspot_y);
143 } else if (dmabuf) {
144 struct ttm_bo_kmap_obj map;
145 unsigned long kmap_offset;
146 unsigned long kmap_num;
147 void *virtual;
148 bool dummy;
149
150 /* vmw_user_surface_lookup takes one reference */
151 du->cursor_dmabuf = dmabuf;
152
153 kmap_offset = 0;
154 kmap_num = (64*64*4) >> PAGE_SHIFT;
155
156 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
157 if (unlikely(ret != 0)) {
158 DRM_ERROR("reserve failed\n");
159 return -EINVAL;
160 }
161
162 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
163 if (unlikely(ret != 0))
164 goto err_unreserve;
165
166 virtual = ttm_kmap_obj_virtual(&map, &dummy);
167 vmw_cursor_update_image(dev_priv, virtual, 64, 64,
168 du->hotspot_x, du->hotspot_y);
169
170 ttm_bo_kunmap(&map);
171err_unreserve:
172 ttm_bo_unreserve(&dmabuf->base);
173
174 } else {
175 vmw_cursor_update_position(dev_priv, false, 0, 0);
176 return 0;
177 }
178
179 vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
180
181 return 0;
182}
183
184int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
185{
186 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
187 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
188 bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
189
190 du->cursor_x = x + crtc->x;
191 du->cursor_y = y + crtc->y;
192
193 vmw_cursor_update_position(dev_priv, shown,
194 du->cursor_x, du->cursor_y);
195
196 return 0;
197}
198
199void vmw_kms_cursor_snoop(struct vmw_surface *srf,
200 struct ttm_object_file *tfile,
201 struct ttm_buffer_object *bo,
202 SVGA3dCmdHeader *header)
203{
204 struct ttm_bo_kmap_obj map;
205 unsigned long kmap_offset;
206 unsigned long kmap_num;
207 SVGA3dCopyBox *box;
208 unsigned box_count;
209 void *virtual;
210 bool dummy;
211 struct vmw_dma_cmd {
212 SVGA3dCmdHeader header;
213 SVGA3dCmdSurfaceDMA dma;
214 } *cmd;
215 int ret;
216
217 cmd = container_of(header, struct vmw_dma_cmd, header);
218
219 /* No snooper installed */
220 if (!srf->snooper.image)
221 return;
222
223 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
224 DRM_ERROR("face and mipmap for cursors should never != 0\n");
225 return;
226 }
227
228 if (cmd->header.size < 64) {
229 DRM_ERROR("at least one full copy box must be given\n");
230 return;
231 }
232
233 box = (SVGA3dCopyBox *)&cmd[1];
234 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
235 sizeof(SVGA3dCopyBox);
236
237 if (cmd->dma.guest.pitch != (64 * 4) ||
238 cmd->dma.guest.ptr.offset % PAGE_SIZE ||
239 box->x != 0 || box->y != 0 || box->z != 0 ||
240 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
241 box->w != 64 || box->h != 64 || box->d != 1 ||
242 box_count != 1) {
243 /* TODO handle none page aligned offsets */
244 /* TODO handle partial uploads and pitch != 256 */
245 /* TODO handle more then one copy (size != 64) */
246 DRM_ERROR("lazy programer, cant handle wierd stuff\n");
247 return;
248 }
249
250 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
251 kmap_num = (64*64*4) >> PAGE_SHIFT;
252
253 ret = ttm_bo_reserve(bo, true, false, false, 0);
254 if (unlikely(ret != 0)) {
255 DRM_ERROR("reserve failed\n");
256 return;
257 }
258
259 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
260 if (unlikely(ret != 0))
261 goto err_unreserve;
262
263 virtual = ttm_kmap_obj_virtual(&map, &dummy);
264
265 memcpy(srf->snooper.image, virtual, 64*64*4);
266 srf->snooper.age++;
267
268 /* we can't call this function from this function since execbuf has
269 * reserved fifo space.
270 *
271 * if (srf->snooper.crtc)
272 * vmw_ldu_crtc_cursor_update_image(dev_priv,
273 * srf->snooper.image, 64, 64,
274 * du->hotspot_x, du->hotspot_y);
275 */
276
277 ttm_bo_kunmap(&map);
278err_unreserve:
279 ttm_bo_unreserve(bo);
280}
281
282void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
283{
284 struct drm_device *dev = dev_priv->dev;
285 struct vmw_display_unit *du;
286 struct drm_crtc *crtc;
287
288 mutex_lock(&dev->mode_config.mutex);
289
290 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
291 du = vmw_crtc_to_du(crtc);
292 if (!du->cursor_surface ||
293 du->cursor_age == du->cursor_surface->snooper.age)
294 continue;
295
296 du->cursor_age = du->cursor_surface->snooper.age;
297 vmw_cursor_update_image(dev_priv,
298 du->cursor_surface->snooper.image,
299 64, 64, du->hotspot_x, du->hotspot_y);
300 }
301
302 mutex_unlock(&dev->mode_config.mutex);
303}
304
305/*
306 * Generic framebuffer code
307 */
308
309int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
310 struct drm_file *file_priv,
311 unsigned int *handle)
312{
313 if (handle)
314 handle = 0;
315
316 return 0;
317}
318
319/*
320 * Surface framebuffer code
321 */
322
323#define vmw_framebuffer_to_vfbs(x) \
324 container_of(x, struct vmw_framebuffer_surface, base.base)
325
326struct vmw_framebuffer_surface {
327 struct vmw_framebuffer base;
328 struct vmw_surface *surface;
329 struct delayed_work d_work;
330 struct mutex work_lock;
331 bool present_fs;
332};
333
334void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
335{
336 struct vmw_framebuffer_surface *vfb =
337 vmw_framebuffer_to_vfbs(framebuffer);
338
339 cancel_delayed_work_sync(&vfb->d_work);
340 drm_framebuffer_cleanup(framebuffer);
341 vmw_surface_unreference(&vfb->surface);
342
343 kfree(framebuffer);
344}
345
346static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
347{
348 struct delayed_work *d_work =
349 container_of(work, struct delayed_work, work);
350 struct vmw_framebuffer_surface *vfbs =
351 container_of(d_work, struct vmw_framebuffer_surface, d_work);
352 struct vmw_surface *surf = vfbs->surface;
353 struct drm_framebuffer *framebuffer = &vfbs->base.base;
354 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
355
356 struct {
357 SVGA3dCmdHeader header;
358 SVGA3dCmdPresent body;
359 SVGA3dCopyRect cr;
360 } *cmd;
361
362 mutex_lock(&vfbs->work_lock);
363 if (!vfbs->present_fs)
364 goto out_unlock;
365
366 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
367 if (unlikely(cmd == NULL))
368 goto out_resched;
369
370 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
371 cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
372 cmd->body.sid = cpu_to_le32(surf->res.id);
373 cmd->cr.x = cpu_to_le32(0);
374 cmd->cr.y = cpu_to_le32(0);
375 cmd->cr.srcx = cmd->cr.x;
376 cmd->cr.srcy = cmd->cr.y;
377 cmd->cr.w = cpu_to_le32(framebuffer->width);
378 cmd->cr.h = cpu_to_le32(framebuffer->height);
379 vfbs->present_fs = false;
380 vmw_fifo_commit(dev_priv, sizeof(*cmd));
381out_resched:
382 /**
383 * Will not re-add if already pending.
384 */
385 schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
386out_unlock:
387 mutex_unlock(&vfbs->work_lock);
388}
389
390
391int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
392 unsigned flags, unsigned color,
393 struct drm_clip_rect *clips,
394 unsigned num_clips)
395{
396 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
397 struct vmw_framebuffer_surface *vfbs =
398 vmw_framebuffer_to_vfbs(framebuffer);
399 struct vmw_surface *surf = vfbs->surface;
400 struct drm_clip_rect norect;
401 SVGA3dCopyRect *cr;
402 int i, inc = 1;
403
404 struct {
405 SVGA3dCmdHeader header;
406 SVGA3dCmdPresent body;
407 SVGA3dCopyRect cr;
408 } *cmd;
409
410 if (!num_clips ||
411 !(dev_priv->fifo.capabilities &
412 SVGA_FIFO_CAP_SCREEN_OBJECT)) {
413 int ret;
414
415 mutex_lock(&vfbs->work_lock);
416 vfbs->present_fs = true;
417 ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
418 mutex_unlock(&vfbs->work_lock);
419 if (ret) {
420 /**
421 * No work pending, Force immediate present.
422 */
423 vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
424 }
425 return 0;
426 }
427
428 if (!num_clips) {
429 num_clips = 1;
430 clips = &norect;
431 norect.x1 = norect.y1 = 0;
432 norect.x2 = framebuffer->width;
433 norect.y2 = framebuffer->height;
434 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
435 num_clips /= 2;
436 inc = 2; /* skip source rects */
437 }
438
439 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
440 if (unlikely(cmd == NULL)) {
441 DRM_ERROR("Fifo reserve failed.\n");
442 return -ENOMEM;
443 }
444
445 memset(cmd, 0, sizeof(*cmd));
446
447 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
448 cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
449 cmd->body.sid = cpu_to_le32(surf->res.id);
450
451 for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
452 cr->x = cpu_to_le16(clips->x1);
453 cr->y = cpu_to_le16(clips->y1);
454 cr->srcx = cr->x;
455 cr->srcy = cr->y;
456 cr->w = cpu_to_le16(clips->x2 - clips->x1);
457 cr->h = cpu_to_le16(clips->y2 - clips->y1);
458 }
459
460 vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
461
462 return 0;
463}
464
465static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
466 .destroy = vmw_framebuffer_surface_destroy,
467 .dirty = vmw_framebuffer_surface_dirty,
468 .create_handle = vmw_framebuffer_create_handle,
469};
470
471int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
472 struct vmw_surface *surface,
473 struct vmw_framebuffer **out,
474 unsigned width, unsigned height)
475
476{
477 struct drm_device *dev = dev_priv->dev;
478 struct vmw_framebuffer_surface *vfbs;
479 int ret;
480
481 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
482 if (!vfbs) {
483 ret = -ENOMEM;
484 goto out_err1;
485 }
486
487 ret = drm_framebuffer_init(dev, &vfbs->base.base,
488 &vmw_framebuffer_surface_funcs);
489 if (ret)
490 goto out_err2;
491
492 if (!vmw_surface_reference(surface)) {
493 DRM_ERROR("failed to reference surface %p\n", surface);
494 goto out_err3;
495 }
496
497 /* XXX get the first 3 from the surface info */
498 vfbs->base.base.bits_per_pixel = 32;
499 vfbs->base.base.pitch = width * 32 / 4;
500 vfbs->base.base.depth = 24;
501 vfbs->base.base.width = width;
502 vfbs->base.base.height = height;
503 vfbs->base.pin = NULL;
504 vfbs->base.unpin = NULL;
505 vfbs->surface = surface;
506 mutex_init(&vfbs->work_lock);
507 INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
508 *out = &vfbs->base;
509
510 return 0;
511
512out_err3:
513 drm_framebuffer_cleanup(&vfbs->base.base);
514out_err2:
515 kfree(vfbs);
516out_err1:
517 return ret;
518}
519
520/*
521 * Dmabuf framebuffer code
522 */
523
524#define vmw_framebuffer_to_vfbd(x) \
525 container_of(x, struct vmw_framebuffer_dmabuf, base.base)
526
527struct vmw_framebuffer_dmabuf {
528 struct vmw_framebuffer base;
529 struct vmw_dma_buffer *buffer;
530};
531
532void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
533{
534 struct vmw_framebuffer_dmabuf *vfbd =
535 vmw_framebuffer_to_vfbd(framebuffer);
536
537 drm_framebuffer_cleanup(framebuffer);
538 vmw_dmabuf_unreference(&vfbd->buffer);
539
540 kfree(vfbd);
541}
542
543int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
544 unsigned flags, unsigned color,
545 struct drm_clip_rect *clips,
546 unsigned num_clips)
547{
548 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
549 struct drm_clip_rect norect;
550 struct {
551 uint32_t header;
552 SVGAFifoCmdUpdate body;
553 } *cmd;
554 int i, increment = 1;
555
556 if (!num_clips ||
557 !(dev_priv->fifo.capabilities &
558 SVGA_FIFO_CAP_SCREEN_OBJECT)) {
559 num_clips = 1;
560 clips = &norect;
561 norect.x1 = norect.y1 = 0;
562 norect.x2 = framebuffer->width;
563 norect.y2 = framebuffer->height;
564 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
565 num_clips /= 2;
566 increment = 2;
567 }
568
569 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
570 if (unlikely(cmd == NULL)) {
571 DRM_ERROR("Fifo reserve failed.\n");
572 return -ENOMEM;
573 }
574
575 for (i = 0; i < num_clips; i++, clips += increment) {
576 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
577 cmd[i].body.x = cpu_to_le32(clips[i].x1);
578 cmd[i].body.y = cpu_to_le32(clips[i].y1);
579 cmd[i].body.width = cpu_to_le32(clips[i].x2 - clips[i].x1);
580 cmd[i].body.height = cpu_to_le32(clips[i].y2 - clips[i].y1);
581 }
582
583 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
584
585 return 0;
586}
587
588static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
589 .destroy = vmw_framebuffer_dmabuf_destroy,
590 .dirty = vmw_framebuffer_dmabuf_dirty,
591 .create_handle = vmw_framebuffer_create_handle,
592};
593
594static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
595{
596 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
597 struct vmw_framebuffer_dmabuf *vfbd =
598 vmw_framebuffer_to_vfbd(&vfb->base);
599 int ret;
600
601 vmw_overlay_pause_all(dev_priv);
602
603 ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
604
605 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
606 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
607 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
608 vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
609 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
610 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
611 vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
612 vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
613 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
614
615 vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
616 vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
617 vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
618 vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
619 vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
620 vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
621 vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
622 vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
623 } else
624 WARN_ON(true);
625
626 vmw_overlay_resume_all(dev_priv);
627
628 return 0;
629}
630
631static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
632{
633 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
634 struct vmw_framebuffer_dmabuf *vfbd =
635 vmw_framebuffer_to_vfbd(&vfb->base);
636
637 if (!vfbd->buffer) {
638 WARN_ON(!vfbd->buffer);
639 return 0;
640 }
641
642 return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
643}
644
645int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
646 struct vmw_dma_buffer *dmabuf,
647 struct vmw_framebuffer **out,
648 unsigned width, unsigned height)
649
650{
651 struct drm_device *dev = dev_priv->dev;
652 struct vmw_framebuffer_dmabuf *vfbd;
653 int ret;
654
655 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
656 if (!vfbd) {
657 ret = -ENOMEM;
658 goto out_err1;
659 }
660
661 ret = drm_framebuffer_init(dev, &vfbd->base.base,
662 &vmw_framebuffer_dmabuf_funcs);
663 if (ret)
664 goto out_err2;
665
666 if (!vmw_dmabuf_reference(dmabuf)) {
667 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
668 goto out_err3;
669 }
670
671 /* XXX get the first 3 from the surface info */
672 vfbd->base.base.bits_per_pixel = 32;
673 vfbd->base.base.pitch = width * 32 / 4;
674 vfbd->base.base.depth = 24;
675 vfbd->base.base.width = width;
676 vfbd->base.base.height = height;
677 vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
678 vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
679 vfbd->buffer = dmabuf;
680 *out = &vfbd->base;
681
682 return 0;
683
684out_err3:
685 drm_framebuffer_cleanup(&vfbd->base.base);
686out_err2:
687 kfree(vfbd);
688out_err1:
689 return ret;
690}
691
692/*
693 * Generic Kernel modesetting functions
694 */
695
696static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
697 struct drm_file *file_priv,
698 struct drm_mode_fb_cmd *mode_cmd)
699{
700 struct vmw_private *dev_priv = vmw_priv(dev);
701 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
702 struct vmw_framebuffer *vfb = NULL;
703 struct vmw_surface *surface = NULL;
704 struct vmw_dma_buffer *bo = NULL;
705 int ret;
706
707 ret = vmw_user_surface_lookup(dev_priv, tfile,
708 mode_cmd->handle, &surface);
709 if (ret)
710 goto try_dmabuf;
711
712 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
713 mode_cmd->width, mode_cmd->height);
714
715 /* vmw_user_surface_lookup takes one ref so does new_fb */
716 vmw_surface_unreference(&surface);
717
718 if (ret) {
719 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
720 return NULL;
721 }
722 return &vfb->base;
723
724try_dmabuf:
725 DRM_INFO("%s: trying buffer\n", __func__);
726
727 ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
728 if (ret) {
729 DRM_ERROR("failed to find buffer: %i\n", ret);
730 return NULL;
731 }
732
733 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
734 mode_cmd->width, mode_cmd->height);
735
736 /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
737 vmw_dmabuf_unreference(&bo);
738
739 if (ret) {
740 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
741 return NULL;
742 }
743
744 return &vfb->base;
745}
746
747static int vmw_kms_fb_changed(struct drm_device *dev)
748{
749 return 0;
750}
751
752static struct drm_mode_config_funcs vmw_kms_funcs = {
753 .fb_create = vmw_kms_fb_create,
754 .fb_changed = vmw_kms_fb_changed,
755};
756
757int vmw_kms_init(struct vmw_private *dev_priv)
758{
759 struct drm_device *dev = dev_priv->dev;
760 int ret;
761
762 drm_mode_config_init(dev);
763 dev->mode_config.funcs = &vmw_kms_funcs;
764 dev->mode_config.min_width = 640;
765 dev->mode_config.min_height = 480;
766 dev->mode_config.max_width = 2048;
767 dev->mode_config.max_height = 2048;
768
769 ret = vmw_kms_init_legacy_display_system(dev_priv);
770
771 return 0;
772}
773
774int vmw_kms_close(struct vmw_private *dev_priv)
775{
776 /*
777 * Docs says we should take the lock before calling this function
778 * but since it destroys encoders and our destructor calls
779 * drm_encoder_cleanup which takes the lock we deadlock.
780 */
781 drm_mode_config_cleanup(dev_priv->dev);
782 vmw_kms_close_legacy_display_system(dev_priv);
783 return 0;
784}
785
786int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
787 struct drm_file *file_priv)
788{
789 struct drm_vmw_cursor_bypass_arg *arg = data;
790 struct vmw_display_unit *du;
791 struct drm_mode_object *obj;
792 struct drm_crtc *crtc;
793 int ret = 0;
794
795
796 mutex_lock(&dev->mode_config.mutex);
797 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
798
799 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
800 du = vmw_crtc_to_du(crtc);
801 du->hotspot_x = arg->xhot;
802 du->hotspot_y = arg->yhot;
803 }
804
805 mutex_unlock(&dev->mode_config.mutex);
806 return 0;
807 }
808
809 obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
810 if (!obj) {
811 ret = -EINVAL;
812 goto out;
813 }
814
815 crtc = obj_to_crtc(obj);
816 du = vmw_crtc_to_du(crtc);
817
818 du->hotspot_x = arg->xhot;
819 du->hotspot_y = arg->yhot;
820
821out:
822 mutex_unlock(&dev->mode_config.mutex);
823
824 return ret;
825}
826
827int vmw_kms_save_vga(struct vmw_private *vmw_priv)
828{
829 /*
830 * setup a single multimon monitor with the size
831 * of 0x0, this stops the UI from resizing when we
832 * change the framebuffer size
833 */
834 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
835 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
836 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
837 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
838 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
839 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
840 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
841 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
842 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
843 }
844
845 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
846 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
847 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
848 vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
849 vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
850 vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
851 vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
852 vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
853
854 return 0;
855}
856
857int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
858{
859 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
860 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
861 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
862 vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
863 vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
864 vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
865 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
866 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
867
868 /* TODO check for multimon */
869 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
870
871 return 0;
872}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
new file mode 100644
index 000000000000..8b95249f0531
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -0,0 +1,102 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef VMWGFX_KMS_H_
29#define VMWGFX_KMS_H_
30
31#include "drmP.h"
32#include "vmwgfx_drv.h"
33
34
35#define vmw_framebuffer_to_vfb(x) \
36 container_of(x, struct vmw_framebuffer, base)
37
38/**
39 * Base class for framebuffers
40 *
41 * @pin is called the when ever a crtc uses this framebuffer
42 * @unpin is called
43 */
44struct vmw_framebuffer {
45 struct drm_framebuffer base;
46 int (*pin)(struct vmw_framebuffer *fb);
47 int (*unpin)(struct vmw_framebuffer *fb);
48};
49
50
51#define vmw_crtc_to_du(x) \
52 container_of(x, struct vmw_display_unit, crtc)
53
54/*
55 * Basic cursor manipulation
56 */
57int vmw_cursor_update_image(struct vmw_private *dev_priv,
58 u32 *image, u32 width, u32 height,
59 u32 hotspotX, u32 hotspotY);
60void vmw_cursor_update_position(struct vmw_private *dev_priv,
61 bool show, int x, int y);
62
63/**
64 * Base class display unit.
65 *
66 * Since the SVGA hw doesn't have a concept of a crtc, encoder or connector
67 * so the display unit is all of them at the same time. This is true for both
68 * legacy multimon and screen objects.
69 */
70struct vmw_display_unit {
71 struct drm_crtc crtc;
72 struct drm_encoder encoder;
73 struct drm_connector connector;
74
75 struct vmw_surface *cursor_surface;
76 struct vmw_dma_buffer *cursor_dmabuf;
77 size_t cursor_age;
78
79 int cursor_x;
80 int cursor_y;
81
82 int hotspot_x;
83 int hotspot_y;
84
85 unsigned unit;
86};
87
88/*
89 * Shared display unit functions - vmwgfx_kms.c
90 */
91void vmw_display_unit_cleanup(struct vmw_display_unit *du);
92int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
93 uint32_t handle, uint32_t width, uint32_t height);
94int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
95
96/*
97 * Legacy display unit functions - vmwgfx_ldu.h
98 */
99int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
100int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
101
102#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
new file mode 100644
index 000000000000..90891593bf6c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -0,0 +1,516 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_kms.h"
29
30#define vmw_crtc_to_ldu(x) \
31 container_of(x, struct vmw_legacy_display_unit, base.crtc)
32#define vmw_encoder_to_ldu(x) \
33 container_of(x, struct vmw_legacy_display_unit, base.encoder)
34#define vmw_connector_to_ldu(x) \
35 container_of(x, struct vmw_legacy_display_unit, base.connector)
36
37struct vmw_legacy_display {
38 struct list_head active;
39
40 unsigned num_active;
41
42 struct vmw_framebuffer *fb;
43};
44
45/**
46 * Display unit using the legacy register interface.
47 */
48struct vmw_legacy_display_unit {
49 struct vmw_display_unit base;
50
51 struct list_head active;
52
53 unsigned unit;
54};
55
56static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
57{
58 list_del_init(&ldu->active);
59 vmw_display_unit_cleanup(&ldu->base);
60 kfree(ldu);
61}
62
63
64/*
65 * Legacy Display Unit CRTC functions
66 */
67
68static void vmw_ldu_crtc_save(struct drm_crtc *crtc)
69{
70}
71
72static void vmw_ldu_crtc_restore(struct drm_crtc *crtc)
73{
74}
75
76static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc,
77 u16 *r, u16 *g, u16 *b,
78 uint32_t size)
79{
80}
81
82static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
83{
84 vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
85}
86
87static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
88{
89 struct vmw_legacy_display *lds = dev_priv->ldu_priv;
90 struct vmw_legacy_display_unit *entry;
91 struct drm_crtc *crtc;
92 int i = 0;
93
94 /* to stop the screen from changing size on resize */
95 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
96 for (i = 0; i < lds->num_active; i++) {
97 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
98 vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
99 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
100 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
101 vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
102 vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
103 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
104 }
105
106 /* Now set the mode */
107 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active);
108 i = 0;
109 list_for_each_entry(entry, &lds->active, active) {
110 crtc = &entry->base.crtc;
111
112 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
113 vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
114 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x);
115 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
116 vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
117 vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
118 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
119
120 i++;
121 }
122
123 return 0;
124}
125
126static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
127 struct vmw_legacy_display_unit *ldu)
128{
129 struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
130 if (list_empty(&ldu->active))
131 return 0;
132
133 list_del_init(&ldu->active);
134 if (--(ld->num_active) == 0) {
135 BUG_ON(!ld->fb);
136 if (ld->fb->unpin)
137 ld->fb->unpin(ld->fb);
138 ld->fb = NULL;
139 }
140
141 return 0;
142}
143
144static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
145 struct vmw_legacy_display_unit *ldu,
146 struct vmw_framebuffer *vfb)
147{
148 struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
149 struct vmw_legacy_display_unit *entry;
150 struct list_head *at;
151
152 if (!list_empty(&ldu->active))
153 return 0;
154
155 at = &ld->active;
156 list_for_each_entry(entry, &ld->active, active) {
157 if (entry->unit > ldu->unit)
158 break;
159
160 at = &entry->active;
161 }
162
163 list_add(&ldu->active, at);
164 if (ld->num_active++ == 0) {
165 BUG_ON(ld->fb);
166 if (vfb->pin)
167 vfb->pin(vfb);
168 ld->fb = vfb;
169 }
170
171 return 0;
172}
173
174static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
175{
176 struct vmw_private *dev_priv;
177 struct vmw_legacy_display_unit *ldu;
178 struct drm_connector *connector;
179 struct drm_display_mode *mode;
180 struct drm_encoder *encoder;
181 struct vmw_framebuffer *vfb;
182 struct drm_framebuffer *fb;
183 struct drm_crtc *crtc;
184
185 if (!set)
186 return -EINVAL;
187
188 if (!set->crtc)
189 return -EINVAL;
190
191 /* get the ldu */
192 crtc = set->crtc;
193 ldu = vmw_crtc_to_ldu(crtc);
194 vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
195 dev_priv = vmw_priv(crtc->dev);
196
197 if (set->num_connectors > 1) {
198 DRM_ERROR("to many connectors\n");
199 return -EINVAL;
200 }
201
202 if (set->num_connectors == 1 &&
203 set->connectors[0] != &ldu->base.connector) {
204 DRM_ERROR("connector doesn't match %p %p\n",
205 set->connectors[0], &ldu->base.connector);
206 return -EINVAL;
207 }
208
209 /* ldu only supports one fb active at the time */
210 if (dev_priv->ldu_priv->fb && vfb &&
211 dev_priv->ldu_priv->fb != vfb) {
212 DRM_ERROR("Multiple framebuffers not supported\n");
213 return -EINVAL;
214 }
215
216 /* since they always map one to one these are safe */
217 connector = &ldu->base.connector;
218 encoder = &ldu->base.encoder;
219
220 /* should we turn the crtc off? */
221 if (set->num_connectors == 0 || !set->mode || !set->fb) {
222
223 connector->encoder = NULL;
224 encoder->crtc = NULL;
225 crtc->fb = NULL;
226
227 vmw_ldu_del_active(dev_priv, ldu);
228
229 vmw_ldu_commit_list(dev_priv);
230
231 return 0;
232 }
233
234
235 /* we now know we want to set a mode */
236 mode = set->mode;
237 fb = set->fb;
238
239 if (set->x + mode->hdisplay > fb->width ||
240 set->y + mode->vdisplay > fb->height) {
241 DRM_ERROR("set outside of framebuffer\n");
242 return -EINVAL;
243 }
244
245 vmw_fb_off(dev_priv);
246
247 crtc->fb = fb;
248 encoder->crtc = crtc;
249 connector->encoder = encoder;
250 crtc->x = set->x;
251 crtc->y = set->y;
252 crtc->mode = *mode;
253
254 vmw_ldu_add_active(dev_priv, ldu, vfb);
255
256 vmw_ldu_commit_list(dev_priv);
257
258 return 0;
259}
260
261static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
262 .save = vmw_ldu_crtc_save,
263 .restore = vmw_ldu_crtc_restore,
264 .cursor_set = vmw_du_crtc_cursor_set,
265 .cursor_move = vmw_du_crtc_cursor_move,
266 .gamma_set = vmw_ldu_crtc_gamma_set,
267 .destroy = vmw_ldu_crtc_destroy,
268 .set_config = vmw_ldu_crtc_set_config,
269};
270
271/*
272 * Legacy Display Unit encoder functions
273 */
274
275static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder)
276{
277 vmw_ldu_destroy(vmw_encoder_to_ldu(encoder));
278}
279
280static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
281 .destroy = vmw_ldu_encoder_destroy,
282};
283
284/*
285 * Legacy Display Unit connector functions
286 */
287
288static void vmw_ldu_connector_dpms(struct drm_connector *connector, int mode)
289{
290}
291
292static void vmw_ldu_connector_save(struct drm_connector *connector)
293{
294}
295
296static void vmw_ldu_connector_restore(struct drm_connector *connector)
297{
298}
299
300static enum drm_connector_status
301 vmw_ldu_connector_detect(struct drm_connector *connector)
302{
303 /* XXX vmwctrl should control connection status */
304 if (vmw_connector_to_ldu(connector)->base.unit == 0)
305 return connector_status_connected;
306 return connector_status_disconnected;
307}
308
309static struct drm_display_mode vmw_ldu_connector_builtin[] = {
310 /* 640x480@60Hz */
311 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
312 752, 800, 0, 480, 489, 492, 525, 0,
313 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
314 /* 800x600@60Hz */
315 { DRM_MODE("800x600",
316 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
317 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628,
318 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
319 /* 1024x768@60Hz */
320 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
321 1184, 1344, 0, 768, 771, 777, 806, 0,
322 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
323 /* 1152x864@75Hz */
324 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
325 1344, 1600, 0, 864, 865, 868, 900, 0,
326 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
327 /* 1280x768@60Hz */
328 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
329 1472, 1664, 0, 768, 771, 778, 798, 0,
330 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
331 /* 1280x800@60Hz */
332 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
333 1480, 1680, 0, 800, 803, 809, 831, 0,
334 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
335 /* 1280x960@60Hz */
336 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
337 1488, 1800, 0, 960, 961, 964, 1000, 0,
338 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
339 /* 1280x1024@60Hz */
340 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
341 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
342 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
343 /* 1360x768@60Hz */
344 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
345 1536, 1792, 0, 768, 771, 777, 795, 0,
346 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
347 /* 1440x1050@60Hz */
348 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
349 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
350 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
351 /* 1440x900@60Hz */
352 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
353 1672, 1904, 0, 900, 903, 909, 934, 0,
354 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
355 /* 1600x1200@60Hz */
356 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
357 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
358 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
359 /* 1680x1050@60Hz */
360 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
361 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
362 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
363 /* 1792x1344@60Hz */
364 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
365 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
366 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
367 /* 1853x1392@60Hz */
368 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
369 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
370 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
371 /* 1920x1200@60Hz */
372 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
373 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
374 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
375 /* 1920x1440@60Hz */
376 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
377 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
378 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
379 /* 2560x1600@60Hz */
380 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
381 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
382 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
383 /* Terminate */
384 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
385};
386
387static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
388 uint32_t max_width, uint32_t max_height)
389{
390 struct drm_device *dev = connector->dev;
391 struct drm_display_mode *mode = NULL;
392 int i;
393
394 for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
395 if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
396 vmw_ldu_connector_builtin[i].vdisplay > max_height)
397 continue;
398
399 mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]);
400 if (!mode)
401 return 0;
402 mode->vrefresh = drm_mode_vrefresh(mode);
403
404 drm_mode_probed_add(connector, mode);
405 }
406
407 drm_mode_connector_list_update(connector);
408
409 return 1;
410}
411
412static int vmw_ldu_connector_set_property(struct drm_connector *connector,
413 struct drm_property *property,
414 uint64_t val)
415{
416 return 0;
417}
418
419static void vmw_ldu_connector_destroy(struct drm_connector *connector)
420{
421 vmw_ldu_destroy(vmw_connector_to_ldu(connector));
422}
423
424static struct drm_connector_funcs vmw_legacy_connector_funcs = {
425 .dpms = vmw_ldu_connector_dpms,
426 .save = vmw_ldu_connector_save,
427 .restore = vmw_ldu_connector_restore,
428 .detect = vmw_ldu_connector_detect,
429 .fill_modes = vmw_ldu_connector_fill_modes,
430 .set_property = vmw_ldu_connector_set_property,
431 .destroy = vmw_ldu_connector_destroy,
432};
433
434static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
435{
436 struct vmw_legacy_display_unit *ldu;
437 struct drm_device *dev = dev_priv->dev;
438 struct drm_connector *connector;
439 struct drm_encoder *encoder;
440 struct drm_crtc *crtc;
441
442 ldu = kzalloc(sizeof(*ldu), GFP_KERNEL);
443 if (!ldu)
444 return -ENOMEM;
445
446 ldu->unit = unit;
447 crtc = &ldu->base.crtc;
448 encoder = &ldu->base.encoder;
449 connector = &ldu->base.connector;
450
451 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
452 DRM_MODE_CONNECTOR_LVDS);
453 /* Initial status */
454 if (unit == 0)
455 connector->status = connector_status_connected;
456 else
457 connector->status = connector_status_disconnected;
458
459 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
460 DRM_MODE_ENCODER_LVDS);
461 drm_mode_connector_attach_encoder(connector, encoder);
462 encoder->possible_crtcs = (1 << unit);
463 encoder->possible_clones = 0;
464
465 INIT_LIST_HEAD(&ldu->active);
466
467 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
468
469 drm_connector_attach_property(connector,
470 dev->mode_config.dirty_info_property,
471 1);
472
473 return 0;
474}
475
476int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
477{
478 if (dev_priv->ldu_priv) {
479 DRM_INFO("ldu system already on\n");
480 return -EINVAL;
481 }
482
483 dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv));
484
485 if (!dev_priv->ldu_priv)
486 return -ENOMEM;
487
488 INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
489 dev_priv->ldu_priv->num_active = 0;
490 dev_priv->ldu_priv->fb = NULL;
491
492 drm_mode_create_dirty_info_property(dev_priv->dev);
493
494 vmw_ldu_init(dev_priv, 0);
495 vmw_ldu_init(dev_priv, 1);
496 vmw_ldu_init(dev_priv, 2);
497 vmw_ldu_init(dev_priv, 3);
498 vmw_ldu_init(dev_priv, 4);
499 vmw_ldu_init(dev_priv, 5);
500 vmw_ldu_init(dev_priv, 6);
501 vmw_ldu_init(dev_priv, 7);
502
503 return 0;
504}
505
506int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
507{
508 if (!dev_priv->ldu_priv)
509 return -ENOSYS;
510
511 BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
512
513 kfree(dev_priv->ldu_priv);
514
515 return 0;
516}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
new file mode 100644
index 000000000000..bb6e6a096d25
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -0,0 +1,634 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29#include "drmP.h"
30#include "vmwgfx_drv.h"
31
32#include "ttm/ttm_placement.h"
33
34#include "svga_overlay.h"
35#include "svga_escape.h"
36
37#define VMW_MAX_NUM_STREAMS 1
38
39struct vmw_stream {
40 struct vmw_dma_buffer *buf;
41 bool claimed;
42 bool paused;
43 struct drm_vmw_control_stream_arg saved;
44};
45
46/**
47 * Overlay control
48 */
49struct vmw_overlay {
50 /*
51 * Each stream is a single overlay. In Xv these are called ports.
52 */
53 struct mutex mutex;
54 struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
55};
56
57static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
58{
59 struct vmw_private *dev_priv = vmw_priv(dev);
60 return dev_priv ? dev_priv->overlay_priv : NULL;
61}
62
63struct vmw_escape_header {
64 uint32_t cmd;
65 SVGAFifoCmdEscape body;
66};
67
68struct vmw_escape_video_flush {
69 struct vmw_escape_header escape;
70 SVGAEscapeVideoFlush flush;
71};
72
73static inline void fill_escape(struct vmw_escape_header *header,
74 uint32_t size)
75{
76 header->cmd = SVGA_CMD_ESCAPE;
77 header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
78 header->body.size = size;
79}
80
81static inline void fill_flush(struct vmw_escape_video_flush *cmd,
82 uint32_t stream_id)
83{
84 fill_escape(&cmd->escape, sizeof(cmd->flush));
85 cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
86 cmd->flush.streamId = stream_id;
87}
88
89/**
90 * Pin or unpin a buffer in vram.
91 *
92 * @dev_priv: Driver private.
93 * @buf: DMA buffer to pin or unpin.
94 * @pin: Pin buffer in vram if true.
95 * @interruptible: Use interruptible wait.
96 *
97 * Takes the current masters ttm lock in read.
98 *
99 * Returns
100 * -ERESTARTSYS if interrupted by a signal.
101 */
102static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
103 struct vmw_dma_buffer *buf,
104 bool pin, bool interruptible)
105{
106 struct ttm_buffer_object *bo = &buf->base;
107 struct ttm_bo_global *glob = bo->glob;
108 struct ttm_placement *overlay_placement = &vmw_vram_placement;
109 int ret;
110
111 ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
112 if (unlikely(ret != 0))
113 return ret;
114
115 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
116 if (unlikely(ret != 0))
117 goto err;
118
119 if (buf->gmr_bound) {
120 vmw_gmr_unbind(dev_priv, buf->gmr_id);
121 spin_lock(&glob->lru_lock);
122 ida_remove(&dev_priv->gmr_ida, buf->gmr_id);
123 spin_unlock(&glob->lru_lock);
124 buf->gmr_bound = NULL;
125 }
126
127 if (pin)
128 overlay_placement = &vmw_vram_ne_placement;
129
130 ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
131
132 ttm_bo_unreserve(bo);
133
134err:
135 ttm_read_unlock(&dev_priv->active_master->lock);
136
137 return ret;
138}
139
140/**
141 * Send put command to hw.
142 *
143 * Returns
144 * -ERESTARTSYS if interrupted by a signal.
145 */
146static int vmw_overlay_send_put(struct vmw_private *dev_priv,
147 struct vmw_dma_buffer *buf,
148 struct drm_vmw_control_stream_arg *arg,
149 bool interruptible)
150{
151 struct {
152 struct vmw_escape_header escape;
153 struct {
154 struct {
155 uint32_t cmdType;
156 uint32_t streamId;
157 } header;
158 struct {
159 uint32_t registerId;
160 uint32_t value;
161 } items[SVGA_VIDEO_PITCH_3 + 1];
162 } body;
163 struct vmw_escape_video_flush flush;
164 } *cmds;
165 uint32_t offset;
166 int i, ret;
167
168 for (;;) {
169 cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
170 if (cmds)
171 break;
172
173 ret = vmw_fallback_wait(dev_priv, false, true, 0,
174 interruptible, 3*HZ);
175 if (interruptible && ret == -ERESTARTSYS)
176 return ret;
177 else
178 BUG_ON(ret != 0);
179 }
180
181 fill_escape(&cmds->escape, sizeof(cmds->body));
182 cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
183 cmds->body.header.streamId = arg->stream_id;
184
185 for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++)
186 cmds->body.items[i].registerId = i;
187
188 offset = buf->base.offset + arg->offset;
189
190 cmds->body.items[SVGA_VIDEO_ENABLED].value = true;
191 cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags;
192 cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset;
193 cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format;
194 cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
195 cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size;
196 cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width;
197 cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height;
198 cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x;
199 cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
200 cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
201 cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
202 cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x;
203 cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
204 cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
205 cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
206 cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
207 cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
208 cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
209
210 fill_flush(&cmds->flush, arg->stream_id);
211
212 vmw_fifo_commit(dev_priv, sizeof(*cmds));
213
214 return 0;
215}
216
217/**
218 * Send stop command to hw.
219 *
220 * Returns
221 * -ERESTARTSYS if interrupted by a signal.
222 */
223static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
224 uint32_t stream_id,
225 bool interruptible)
226{
227 struct {
228 struct vmw_escape_header escape;
229 SVGAEscapeVideoSetRegs body;
230 struct vmw_escape_video_flush flush;
231 } *cmds;
232 int ret;
233
234 for (;;) {
235 cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
236 if (cmds)
237 break;
238
239 ret = vmw_fallback_wait(dev_priv, false, true, 0,
240 interruptible, 3*HZ);
241 if (interruptible && ret == -ERESTARTSYS)
242 return ret;
243 else
244 BUG_ON(ret != 0);
245 }
246
247 fill_escape(&cmds->escape, sizeof(cmds->body));
248 cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
249 cmds->body.header.streamId = stream_id;
250 cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
251 cmds->body.items[0].value = false;
252 fill_flush(&cmds->flush, stream_id);
253
254 vmw_fifo_commit(dev_priv, sizeof(*cmds));
255
256 return 0;
257}
258
259/**
260 * Stop or pause a stream.
261 *
262 * If the stream is paused the no evict flag is removed from the buffer
263 * but left in vram. This allows for instance mode_set to evict it
264 * should it need to.
265 *
266 * The caller must hold the overlay lock.
267 *
268 * @stream_id which stream to stop/pause.
269 * @pause true to pause, false to stop completely.
270 */
271static int vmw_overlay_stop(struct vmw_private *dev_priv,
272 uint32_t stream_id, bool pause,
273 bool interruptible)
274{
275 struct vmw_overlay *overlay = dev_priv->overlay_priv;
276 struct vmw_stream *stream = &overlay->stream[stream_id];
277 int ret;
278
279 /* no buffer attached the stream is completely stopped */
280 if (!stream->buf)
281 return 0;
282
283 /* If the stream is paused this is already done */
284 if (!stream->paused) {
285 ret = vmw_overlay_send_stop(dev_priv, stream_id,
286 interruptible);
287 if (ret)
288 return ret;
289
290 /* We just remove the NO_EVICT flag so no -ENOMEM */
291 ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false,
292 interruptible);
293 if (interruptible && ret == -ERESTARTSYS)
294 return ret;
295 else
296 BUG_ON(ret != 0);
297 }
298
299 if (!pause) {
300 vmw_dmabuf_unreference(&stream->buf);
301 stream->paused = false;
302 } else {
303 stream->paused = true;
304 }
305
306 return 0;
307}
308
309/**
310 * Update a stream and send any put or stop fifo commands needed.
311 *
312 * The caller must hold the overlay lock.
313 *
314 * Returns
315 * -ENOMEM if buffer doesn't fit in vram.
316 * -ERESTARTSYS if interrupted.
317 */
318static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
319 struct vmw_dma_buffer *buf,
320 struct drm_vmw_control_stream_arg *arg,
321 bool interruptible)
322{
323 struct vmw_overlay *overlay = dev_priv->overlay_priv;
324 struct vmw_stream *stream = &overlay->stream[arg->stream_id];
325 int ret = 0;
326
327 if (!buf)
328 return -EINVAL;
329
330 DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
331 stream->buf, buf, stream->paused ? "" : "not ");
332
333 if (stream->buf != buf) {
334 ret = vmw_overlay_stop(dev_priv, arg->stream_id,
335 false, interruptible);
336 if (ret)
337 return ret;
338 } else if (!stream->paused) {
339 /* If the buffers match and not paused then just send
340 * the put command, no need to do anything else.
341 */
342 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
343 if (ret == 0)
344 stream->saved = *arg;
345 else
346 BUG_ON(!interruptible);
347
348 return ret;
349 }
350
351 /* We don't start the old stream if we are interrupted.
352 * Might return -ENOMEM if it can't fit the buffer in vram.
353 */
354 ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible);
355 if (ret)
356 return ret;
357
358 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
359 if (ret) {
360 /* This one needs to happen no matter what. We only remove
361 * the NO_EVICT flag so this is safe from -ENOMEM.
362 */
363 BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0);
364 return ret;
365 }
366
367 if (stream->buf != buf)
368 stream->buf = vmw_dmabuf_reference(buf);
369 stream->saved = *arg;
370
371 return 0;
372}
373
374/**
375 * Stop all streams.
376 *
377 * Used by the fb code when starting.
378 *
379 * Takes the overlay lock.
380 */
381int vmw_overlay_stop_all(struct vmw_private *dev_priv)
382{
383 struct vmw_overlay *overlay = dev_priv->overlay_priv;
384 int i, ret;
385
386 if (!overlay)
387 return 0;
388
389 mutex_lock(&overlay->mutex);
390
391 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
392 struct vmw_stream *stream = &overlay->stream[i];
393 if (!stream->buf)
394 continue;
395
396 ret = vmw_overlay_stop(dev_priv, i, false, false);
397 WARN_ON(ret != 0);
398 }
399
400 mutex_unlock(&overlay->mutex);
401
402 return 0;
403}
404
405/**
406 * Try to resume all paused streams.
407 *
408 * Used by the kms code after moving a new scanout buffer to vram.
409 *
410 * Takes the overlay lock.
411 */
412int vmw_overlay_resume_all(struct vmw_private *dev_priv)
413{
414 struct vmw_overlay *overlay = dev_priv->overlay_priv;
415 int i, ret;
416
417 if (!overlay)
418 return 0;
419
420 mutex_lock(&overlay->mutex);
421
422 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
423 struct vmw_stream *stream = &overlay->stream[i];
424 if (!stream->paused)
425 continue;
426
427 ret = vmw_overlay_update_stream(dev_priv, stream->buf,
428 &stream->saved, false);
429 if (ret != 0)
430 DRM_INFO("%s: *warning* failed to resume stream %i\n",
431 __func__, i);
432 }
433
434 mutex_unlock(&overlay->mutex);
435
436 return 0;
437}
438
439/**
440 * Pauses all active streams.
441 *
442 * Used by the kms code when moving a new scanout buffer to vram.
443 *
444 * Takes the overlay lock.
445 */
446int vmw_overlay_pause_all(struct vmw_private *dev_priv)
447{
448 struct vmw_overlay *overlay = dev_priv->overlay_priv;
449 int i, ret;
450
451 if (!overlay)
452 return 0;
453
454 mutex_lock(&overlay->mutex);
455
456 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
457 if (overlay->stream[i].paused)
458 DRM_INFO("%s: *warning* stream %i already paused\n",
459 __func__, i);
460 ret = vmw_overlay_stop(dev_priv, i, true, false);
461 WARN_ON(ret != 0);
462 }
463
464 mutex_unlock(&overlay->mutex);
465
466 return 0;
467}
468
469int vmw_overlay_ioctl(struct drm_device *dev, void *data,
470 struct drm_file *file_priv)
471{
472 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
473 struct vmw_private *dev_priv = vmw_priv(dev);
474 struct vmw_overlay *overlay = dev_priv->overlay_priv;
475 struct drm_vmw_control_stream_arg *arg =
476 (struct drm_vmw_control_stream_arg *)data;
477 struct vmw_dma_buffer *buf;
478 struct vmw_resource *res;
479 int ret;
480
481 if (!overlay)
482 return -ENOSYS;
483
484 ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
485 if (ret)
486 return ret;
487
488 mutex_lock(&overlay->mutex);
489
490 if (!arg->enabled) {
491 ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
492 goto out_unlock;
493 }
494
495 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
496 if (ret)
497 goto out_unlock;
498
499 ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
500
501 vmw_dmabuf_unreference(&buf);
502
503out_unlock:
504 mutex_unlock(&overlay->mutex);
505 vmw_resource_unreference(&res);
506
507 return ret;
508}
509
510int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
511{
512 if (!dev_priv->overlay_priv)
513 return 0;
514
515 return VMW_MAX_NUM_STREAMS;
516}
517
518int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
519{
520 struct vmw_overlay *overlay = dev_priv->overlay_priv;
521 int i, k;
522
523 if (!overlay)
524 return 0;
525
526 mutex_lock(&overlay->mutex);
527
528 for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
529 if (!overlay->stream[i].claimed)
530 k++;
531
532 mutex_unlock(&overlay->mutex);
533
534 return k;
535}
536
537int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
538{
539 struct vmw_overlay *overlay = dev_priv->overlay_priv;
540 int i;
541
542 if (!overlay)
543 return -ENOSYS;
544
545 mutex_lock(&overlay->mutex);
546
547 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
548
549 if (overlay->stream[i].claimed)
550 continue;
551
552 overlay->stream[i].claimed = true;
553 *out = i;
554 mutex_unlock(&overlay->mutex);
555 return 0;
556 }
557
558 mutex_unlock(&overlay->mutex);
559 return -ESRCH;
560}
561
562int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
563{
564 struct vmw_overlay *overlay = dev_priv->overlay_priv;
565
566 BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
567
568 if (!overlay)
569 return -ENOSYS;
570
571 mutex_lock(&overlay->mutex);
572
573 WARN_ON(!overlay->stream[stream_id].claimed);
574 vmw_overlay_stop(dev_priv, stream_id, false, false);
575 overlay->stream[stream_id].claimed = false;
576
577 mutex_unlock(&overlay->mutex);
578 return 0;
579}
580
581int vmw_overlay_init(struct vmw_private *dev_priv)
582{
583 struct vmw_overlay *overlay;
584 int i;
585
586 if (dev_priv->overlay_priv)
587 return -EINVAL;
588
589 if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
590 (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
591 DRM_INFO("hardware doesn't support overlays\n");
592 return -ENOSYS;
593 }
594
595 overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
596 if (!overlay)
597 return -ENOMEM;
598
599 memset(overlay, 0, sizeof(*overlay));
600 mutex_init(&overlay->mutex);
601 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
602 overlay->stream[i].buf = NULL;
603 overlay->stream[i].paused = false;
604 overlay->stream[i].claimed = false;
605 }
606
607 dev_priv->overlay_priv = overlay;
608
609 return 0;
610}
611
612int vmw_overlay_close(struct vmw_private *dev_priv)
613{
614 struct vmw_overlay *overlay = dev_priv->overlay_priv;
615 bool forgotten_buffer = false;
616 int i;
617
618 if (!overlay)
619 return -ENOSYS;
620
621 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
622 if (overlay->stream[i].buf) {
623 forgotten_buffer = true;
624 vmw_overlay_stop(dev_priv, i, false, false);
625 }
626 }
627
628 WARN_ON(forgotten_buffer);
629
630 dev_priv->overlay_priv = NULL;
631 kfree(overlay);
632
633 return 0;
634}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
new file mode 100644
index 000000000000..9d0dd3a342eb
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -0,0 +1,57 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/**
29 * This file contains virtual hardware defines for kernel space.
30 */
31
32#ifndef _VMWGFX_REG_H_
33#define _VMWGFX_REG_H_
34
35#include <linux/types.h>
36
37#define VMWGFX_INDEX_PORT 0x0
38#define VMWGFX_VALUE_PORT 0x1
39#define VMWGFX_IRQSTATUS_PORT 0x8
40
41struct svga_guest_mem_descriptor {
42 __le32 ppn;
43 __le32 num_pages;
44};
45
46struct svga_fifo_cmd_fence {
47 __le32 fence;
48};
49
50#define SVGA_SYNC_GENERIC 1
51#define SVGA_SYNC_FIFOFULL 2
52
53#include "svga_types.h"
54
55#include "svga3d_reg.h"
56
57#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
new file mode 100644
index 000000000000..a1ceed0c8e07
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -0,0 +1,1192 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_drm.h"
30#include "ttm/ttm_object.h"
31#include "ttm/ttm_placement.h"
32#include "drmP.h"
33
34#define VMW_RES_CONTEXT ttm_driver_type0
35#define VMW_RES_SURFACE ttm_driver_type1
36#define VMW_RES_STREAM ttm_driver_type2
37
38struct vmw_user_context {
39 struct ttm_base_object base;
40 struct vmw_resource res;
41};
42
43struct vmw_user_surface {
44 struct ttm_base_object base;
45 struct vmw_surface srf;
46};
47
48struct vmw_user_dma_buffer {
49 struct ttm_base_object base;
50 struct vmw_dma_buffer dma;
51};
52
53struct vmw_bo_user_rep {
54 uint32_t handle;
55 uint64_t map_handle;
56};
57
58struct vmw_stream {
59 struct vmw_resource res;
60 uint32_t stream_id;
61};
62
63struct vmw_user_stream {
64 struct ttm_base_object base;
65 struct vmw_stream stream;
66};
67
68static inline struct vmw_dma_buffer *
69vmw_dma_buffer(struct ttm_buffer_object *bo)
70{
71 return container_of(bo, struct vmw_dma_buffer, base);
72}
73
74static inline struct vmw_user_dma_buffer *
75vmw_user_dma_buffer(struct ttm_buffer_object *bo)
76{
77 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
78 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
79}
80
81struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
82{
83 kref_get(&res->kref);
84 return res;
85}
86
87static void vmw_resource_release(struct kref *kref)
88{
89 struct vmw_resource *res =
90 container_of(kref, struct vmw_resource, kref);
91 struct vmw_private *dev_priv = res->dev_priv;
92
93 idr_remove(res->idr, res->id);
94 write_unlock(&dev_priv->resource_lock);
95
96 if (likely(res->hw_destroy != NULL))
97 res->hw_destroy(res);
98
99 if (res->res_free != NULL)
100 res->res_free(res);
101 else
102 kfree(res);
103
104 write_lock(&dev_priv->resource_lock);
105}
106
107void vmw_resource_unreference(struct vmw_resource **p_res)
108{
109 struct vmw_resource *res = *p_res;
110 struct vmw_private *dev_priv = res->dev_priv;
111
112 *p_res = NULL;
113 write_lock(&dev_priv->resource_lock);
114 kref_put(&res->kref, vmw_resource_release);
115 write_unlock(&dev_priv->resource_lock);
116}
117
118static int vmw_resource_init(struct vmw_private *dev_priv,
119 struct vmw_resource *res,
120 struct idr *idr,
121 enum ttm_object_type obj_type,
122 void (*res_free) (struct vmw_resource *res))
123{
124 int ret;
125
126 kref_init(&res->kref);
127 res->hw_destroy = NULL;
128 res->res_free = res_free;
129 res->res_type = obj_type;
130 res->idr = idr;
131 res->avail = false;
132 res->dev_priv = dev_priv;
133
134 do {
135 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
136 return -ENOMEM;
137
138 write_lock(&dev_priv->resource_lock);
139 ret = idr_get_new_above(idr, res, 1, &res->id);
140 write_unlock(&dev_priv->resource_lock);
141
142 } while (ret == -EAGAIN);
143
144 return ret;
145}
146
147/**
148 * vmw_resource_activate
149 *
150 * @res: Pointer to the newly created resource
151 * @hw_destroy: Destroy function. NULL if none.
152 *
153 * Activate a resource after the hardware has been made aware of it.
154 * Set tye destroy function to @destroy. Typically this frees the
155 * resource and destroys the hardware resources associated with it.
156 * Activate basically means that the function vmw_resource_lookup will
157 * find it.
158 */
159
160static void vmw_resource_activate(struct vmw_resource *res,
161 void (*hw_destroy) (struct vmw_resource *))
162{
163 struct vmw_private *dev_priv = res->dev_priv;
164
165 write_lock(&dev_priv->resource_lock);
166 res->avail = true;
167 res->hw_destroy = hw_destroy;
168 write_unlock(&dev_priv->resource_lock);
169}
170
171struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
172 struct idr *idr, int id)
173{
174 struct vmw_resource *res;
175
176 read_lock(&dev_priv->resource_lock);
177 res = idr_find(idr, id);
178 if (res && res->avail)
179 kref_get(&res->kref);
180 else
181 res = NULL;
182 read_unlock(&dev_priv->resource_lock);
183
184 if (unlikely(res == NULL))
185 return NULL;
186
187 return res;
188}
189
190/**
191 * Context management:
192 */
193
194static void vmw_hw_context_destroy(struct vmw_resource *res)
195{
196
197 struct vmw_private *dev_priv = res->dev_priv;
198 struct {
199 SVGA3dCmdHeader header;
200 SVGA3dCmdDestroyContext body;
201 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
202
203 if (unlikely(cmd == NULL)) {
204 DRM_ERROR("Failed reserving FIFO space for surface "
205 "destruction.\n");
206 return;
207 }
208
209 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
210 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
211 cmd->body.cid = cpu_to_le32(res->id);
212
213 vmw_fifo_commit(dev_priv, sizeof(*cmd));
214}
215
216static int vmw_context_init(struct vmw_private *dev_priv,
217 struct vmw_resource *res,
218 void (*res_free) (struct vmw_resource *res))
219{
220 int ret;
221
222 struct {
223 SVGA3dCmdHeader header;
224 SVGA3dCmdDefineContext body;
225 } *cmd;
226
227 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
228 VMW_RES_CONTEXT, res_free);
229
230 if (unlikely(ret != 0)) {
231 if (res_free == NULL)
232 kfree(res);
233 else
234 res_free(res);
235 return ret;
236 }
237
238 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
239 if (unlikely(cmd == NULL)) {
240 DRM_ERROR("Fifo reserve failed.\n");
241 vmw_resource_unreference(&res);
242 return -ENOMEM;
243 }
244
245 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
246 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
247 cmd->body.cid = cpu_to_le32(res->id);
248
249 vmw_fifo_commit(dev_priv, sizeof(*cmd));
250 vmw_resource_activate(res, vmw_hw_context_destroy);
251 return 0;
252}
253
254struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
255{
256 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
257 int ret;
258
259 if (unlikely(res == NULL))
260 return NULL;
261
262 ret = vmw_context_init(dev_priv, res, NULL);
263 return (ret == 0) ? res : NULL;
264}
265
266/**
267 * User-space context management:
268 */
269
270static void vmw_user_context_free(struct vmw_resource *res)
271{
272 struct vmw_user_context *ctx =
273 container_of(res, struct vmw_user_context, res);
274
275 kfree(ctx);
276}
277
278/**
279 * This function is called when user space has no more references on the
280 * base object. It releases the base-object's reference on the resource object.
281 */
282
283static void vmw_user_context_base_release(struct ttm_base_object **p_base)
284{
285 struct ttm_base_object *base = *p_base;
286 struct vmw_user_context *ctx =
287 container_of(base, struct vmw_user_context, base);
288 struct vmw_resource *res = &ctx->res;
289
290 *p_base = NULL;
291 vmw_resource_unreference(&res);
292}
293
294int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
295 struct drm_file *file_priv)
296{
297 struct vmw_private *dev_priv = vmw_priv(dev);
298 struct vmw_resource *res;
299 struct vmw_user_context *ctx;
300 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
301 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
302 int ret = 0;
303
304 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
305 if (unlikely(res == NULL))
306 return -EINVAL;
307
308 if (res->res_free != &vmw_user_context_free) {
309 ret = -EINVAL;
310 goto out;
311 }
312
313 ctx = container_of(res, struct vmw_user_context, res);
314 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
315 ret = -EPERM;
316 goto out;
317 }
318
319 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
320out:
321 vmw_resource_unreference(&res);
322 return ret;
323}
324
325int vmw_context_define_ioctl(struct drm_device *dev, void *data,
326 struct drm_file *file_priv)
327{
328 struct vmw_private *dev_priv = vmw_priv(dev);
329 struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
330 struct vmw_resource *res;
331 struct vmw_resource *tmp;
332 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
333 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
334 int ret;
335
336 if (unlikely(ctx == NULL))
337 return -ENOMEM;
338
339 res = &ctx->res;
340 ctx->base.shareable = false;
341 ctx->base.tfile = NULL;
342
343 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
344 if (unlikely(ret != 0))
345 return ret;
346
347 tmp = vmw_resource_reference(&ctx->res);
348 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
349 &vmw_user_context_base_release, NULL);
350
351 if (unlikely(ret != 0)) {
352 vmw_resource_unreference(&tmp);
353 goto out_err;
354 }
355
356 arg->cid = res->id;
357out_err:
358 vmw_resource_unreference(&res);
359 return ret;
360
361}
362
363int vmw_context_check(struct vmw_private *dev_priv,
364 struct ttm_object_file *tfile,
365 int id)
366{
367 struct vmw_resource *res;
368 int ret = 0;
369
370 read_lock(&dev_priv->resource_lock);
371 res = idr_find(&dev_priv->context_idr, id);
372 if (res && res->avail) {
373 struct vmw_user_context *ctx =
374 container_of(res, struct vmw_user_context, res);
375 if (ctx->base.tfile != tfile && !ctx->base.shareable)
376 ret = -EPERM;
377 } else
378 ret = -EINVAL;
379 read_unlock(&dev_priv->resource_lock);
380
381 return ret;
382}
383
384
385/**
386 * Surface management.
387 */
388
389static void vmw_hw_surface_destroy(struct vmw_resource *res)
390{
391
392 struct vmw_private *dev_priv = res->dev_priv;
393 struct {
394 SVGA3dCmdHeader header;
395 SVGA3dCmdDestroySurface body;
396 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
397
398 if (unlikely(cmd == NULL)) {
399 DRM_ERROR("Failed reserving FIFO space for surface "
400 "destruction.\n");
401 return;
402 }
403
404 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
405 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
406 cmd->body.sid = cpu_to_le32(res->id);
407
408 vmw_fifo_commit(dev_priv, sizeof(*cmd));
409}
410
411void vmw_surface_res_free(struct vmw_resource *res)
412{
413 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
414
415 kfree(srf->sizes);
416 kfree(srf->snooper.image);
417 kfree(srf);
418}
419
420int vmw_surface_init(struct vmw_private *dev_priv,
421 struct vmw_surface *srf,
422 void (*res_free) (struct vmw_resource *res))
423{
424 int ret;
425 struct {
426 SVGA3dCmdHeader header;
427 SVGA3dCmdDefineSurface body;
428 } *cmd;
429 SVGA3dSize *cmd_size;
430 struct vmw_resource *res = &srf->res;
431 struct drm_vmw_size *src_size;
432 size_t submit_size;
433 uint32_t cmd_len;
434 int i;
435
436 BUG_ON(res_free == NULL);
437 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
438 VMW_RES_SURFACE, res_free);
439
440 if (unlikely(ret != 0)) {
441 res_free(res);
442 return ret;
443 }
444
445 submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
446 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
447
448 cmd = vmw_fifo_reserve(dev_priv, submit_size);
449 if (unlikely(cmd == NULL)) {
450 DRM_ERROR("Fifo reserve failed for create surface.\n");
451 vmw_resource_unreference(&res);
452 return -ENOMEM;
453 }
454
455 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
456 cmd->header.size = cpu_to_le32(cmd_len);
457 cmd->body.sid = cpu_to_le32(res->id);
458 cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
459 cmd->body.format = cpu_to_le32(srf->format);
460 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
461 cmd->body.face[i].numMipLevels =
462 cpu_to_le32(srf->mip_levels[i]);
463 }
464
465 cmd += 1;
466 cmd_size = (SVGA3dSize *) cmd;
467 src_size = srf->sizes;
468
469 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
470 cmd_size->width = cpu_to_le32(src_size->width);
471 cmd_size->height = cpu_to_le32(src_size->height);
472 cmd_size->depth = cpu_to_le32(src_size->depth);
473 }
474
475 vmw_fifo_commit(dev_priv, submit_size);
476 vmw_resource_activate(res, vmw_hw_surface_destroy);
477 return 0;
478}
479
480static void vmw_user_surface_free(struct vmw_resource *res)
481{
482 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
483 struct vmw_user_surface *user_srf =
484 container_of(srf, struct vmw_user_surface, srf);
485
486 kfree(srf->sizes);
487 kfree(srf->snooper.image);
488 kfree(user_srf);
489}
490
491int vmw_user_surface_lookup(struct vmw_private *dev_priv,
492 struct ttm_object_file *tfile,
493 int sid, struct vmw_surface **out)
494{
495 struct vmw_resource *res;
496 struct vmw_surface *srf;
497 struct vmw_user_surface *user_srf;
498
499 res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, sid);
500 if (unlikely(res == NULL))
501 return -EINVAL;
502
503 if (res->res_free != &vmw_user_surface_free)
504 return -EINVAL;
505
506 srf = container_of(res, struct vmw_surface, res);
507 user_srf = container_of(srf, struct vmw_user_surface, srf);
508 if (user_srf->base.tfile != tfile && !user_srf->base.shareable)
509 return -EPERM;
510
511 *out = srf;
512 return 0;
513}
514
515static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
516{
517 struct ttm_base_object *base = *p_base;
518 struct vmw_user_surface *user_srf =
519 container_of(base, struct vmw_user_surface, base);
520 struct vmw_resource *res = &user_srf->srf.res;
521
522 *p_base = NULL;
523 vmw_resource_unreference(&res);
524}
525
526int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
527 struct drm_file *file_priv)
528{
529 struct vmw_private *dev_priv = vmw_priv(dev);
530 struct vmw_resource *res;
531 struct vmw_surface *srf;
532 struct vmw_user_surface *user_srf;
533 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
534 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
535 int ret = 0;
536
537 res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, arg->sid);
538 if (unlikely(res == NULL))
539 return -EINVAL;
540
541 if (res->res_free != &vmw_user_surface_free) {
542 ret = -EINVAL;
543 goto out;
544 }
545
546 srf = container_of(res, struct vmw_surface, res);
547 user_srf = container_of(srf, struct vmw_user_surface, srf);
548 if (user_srf->base.tfile != tfile && !user_srf->base.shareable) {
549 ret = -EPERM;
550 goto out;
551 }
552
553 ttm_ref_object_base_unref(tfile, user_srf->base.hash.key,
554 TTM_REF_USAGE);
555out:
556 vmw_resource_unreference(&res);
557 return ret;
558}
559
560int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
561 struct drm_file *file_priv)
562{
563 struct vmw_private *dev_priv = vmw_priv(dev);
564 struct vmw_user_surface *user_srf =
565 kmalloc(sizeof(*user_srf), GFP_KERNEL);
566 struct vmw_surface *srf;
567 struct vmw_resource *res;
568 struct vmw_resource *tmp;
569 union drm_vmw_surface_create_arg *arg =
570 (union drm_vmw_surface_create_arg *)data;
571 struct drm_vmw_surface_create_req *req = &arg->req;
572 struct drm_vmw_surface_arg *rep = &arg->rep;
573 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
574 struct drm_vmw_size __user *user_sizes;
575 int ret;
576 int i;
577
578 if (unlikely(user_srf == NULL))
579 return -ENOMEM;
580
581 srf = &user_srf->srf;
582 res = &srf->res;
583
584 srf->flags = req->flags;
585 srf->format = req->format;
586 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
587 srf->num_sizes = 0;
588 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
589 srf->num_sizes += srf->mip_levels[i];
590
591 if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
592 DRM_VMW_MAX_MIP_LEVELS) {
593 ret = -EINVAL;
594 goto out_err0;
595 }
596
597 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
598 if (unlikely(srf->sizes == NULL)) {
599 ret = -ENOMEM;
600 goto out_err0;
601 }
602
603 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
604 req->size_addr;
605
606 ret = copy_from_user(srf->sizes, user_sizes,
607 srf->num_sizes * sizeof(*srf->sizes));
608 if (unlikely(ret != 0))
609 goto out_err1;
610
611 user_srf->base.shareable = false;
612 user_srf->base.tfile = NULL;
613
614 /**
615 * From this point, the generic resource management functions
616 * destroy the object on failure.
617 */
618
619 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
620 if (unlikely(ret != 0))
621 return ret;
622
623 tmp = vmw_resource_reference(&srf->res);
624 ret = ttm_base_object_init(tfile, &user_srf->base,
625 req->shareable, VMW_RES_SURFACE,
626 &vmw_user_surface_base_release, NULL);
627
628 if (unlikely(ret != 0)) {
629 vmw_resource_unreference(&tmp);
630 vmw_resource_unreference(&res);
631 return ret;
632 }
633
634 if (srf->flags & (1 << 9) &&
635 srf->num_sizes == 1 &&
636 srf->sizes[0].width == 64 &&
637 srf->sizes[0].height == 64 &&
638 srf->format == SVGA3D_A8R8G8B8) {
639
640 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
641 /* clear the image */
642 if (srf->snooper.image)
643 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
644 else
645 DRM_ERROR("Failed to allocate cursor_image\n");
646
647 } else {
648 srf->snooper.image = NULL;
649 }
650 srf->snooper.crtc = NULL;
651
652 rep->sid = res->id;
653 vmw_resource_unreference(&res);
654 return 0;
655out_err1:
656 kfree(srf->sizes);
657out_err0:
658 kfree(user_srf);
659 return ret;
660}
661
662int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
663 struct drm_file *file_priv)
664{
665 struct vmw_private *dev_priv = vmw_priv(dev);
666 union drm_vmw_surface_reference_arg *arg =
667 (union drm_vmw_surface_reference_arg *)data;
668 struct drm_vmw_surface_arg *req = &arg->req;
669 struct drm_vmw_surface_create_req *rep = &arg->rep;
670 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
671 struct vmw_resource *res;
672 struct vmw_surface *srf;
673 struct vmw_user_surface *user_srf;
674 struct drm_vmw_size __user *user_sizes;
675 int ret;
676
677 res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, req->sid);
678 if (unlikely(res == NULL))
679 return -EINVAL;
680
681 if (res->res_free != &vmw_user_surface_free) {
682 ret = -EINVAL;
683 goto out;
684 }
685
686 srf = container_of(res, struct vmw_surface, res);
687 user_srf = container_of(srf, struct vmw_user_surface, srf);
688 if (user_srf->base.tfile != tfile && !user_srf->base.shareable) {
689 DRM_ERROR("Tried to reference none shareable surface\n");
690 ret = -EPERM;
691 goto out;
692 }
693
694 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
695 if (unlikely(ret != 0)) {
696 DRM_ERROR("Could not add a reference to a surface.\n");
697 goto out;
698 }
699
700 rep->flags = srf->flags;
701 rep->format = srf->format;
702 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
703 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
704 rep->size_addr;
705
706 if (user_sizes)
707 ret = copy_to_user(user_sizes, srf->sizes,
708 srf->num_sizes * sizeof(*srf->sizes));
709 if (unlikely(ret != 0)) {
710 DRM_ERROR("copy_to_user failed %p %u\n",
711 user_sizes, srf->num_sizes);
712 /**
713 * FIXME: Unreference surface here?
714 */
715 goto out;
716 }
717out:
718 vmw_resource_unreference(&res);
719 return ret;
720}
721
722int vmw_surface_check(struct vmw_private *dev_priv,
723 struct ttm_object_file *tfile,
724 int id)
725{
726 struct vmw_resource *res;
727 int ret = 0;
728
729 read_lock(&dev_priv->resource_lock);
730 res = idr_find(&dev_priv->surface_idr, id);
731 if (res && res->avail) {
732 struct vmw_surface *srf =
733 container_of(res, struct vmw_surface, res);
734 struct vmw_user_surface *usrf =
735 container_of(srf, struct vmw_user_surface, srf);
736
737 if (usrf->base.tfile != tfile && !usrf->base.shareable)
738 ret = -EPERM;
739 } else
740 ret = -EINVAL;
741 read_unlock(&dev_priv->resource_lock);
742
743 return ret;
744}
745
746/**
747 * Buffer management.
748 */
749
750static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
751 unsigned long num_pages)
752{
753 static size_t bo_user_size = ~0;
754
755 size_t page_array_size =
756 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
757
758 if (unlikely(bo_user_size == ~0)) {
759 bo_user_size = glob->ttm_bo_extra_size +
760 ttm_round_pot(sizeof(struct vmw_dma_buffer));
761 }
762
763 return bo_user_size + page_array_size;
764}
765
766void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
767{
768 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
769 struct ttm_bo_global *glob = bo->glob;
770 struct vmw_private *dev_priv =
771 container_of(bo->bdev, struct vmw_private, bdev);
772
773 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
774 if (vmw_bo->gmr_bound) {
775 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
776 spin_lock(&glob->lru_lock);
777 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
778 spin_unlock(&glob->lru_lock);
779 }
780 kfree(vmw_bo);
781}
782
783int vmw_dmabuf_init(struct vmw_private *dev_priv,
784 struct vmw_dma_buffer *vmw_bo,
785 size_t size, struct ttm_placement *placement,
786 bool interruptible,
787 void (*bo_free) (struct ttm_buffer_object *bo))
788{
789 struct ttm_bo_device *bdev = &dev_priv->bdev;
790 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
791 size_t acc_size;
792 int ret;
793
794 BUG_ON(!bo_free);
795
796 acc_size =
797 vmw_dmabuf_acc_size(bdev->glob,
798 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
799
800 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
801 if (unlikely(ret != 0)) {
802 /* we must free the bo here as
803 * ttm_buffer_object_init does so as well */
804 bo_free(&vmw_bo->base);
805 return ret;
806 }
807
808 memset(vmw_bo, 0, sizeof(*vmw_bo));
809
810 INIT_LIST_HEAD(&vmw_bo->gmr_lru);
811 INIT_LIST_HEAD(&vmw_bo->validate_list);
812 vmw_bo->gmr_id = 0;
813 vmw_bo->gmr_bound = false;
814
815 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
816 ttm_bo_type_device, placement,
817 0, 0, interruptible,
818 NULL, acc_size, bo_free);
819 return ret;
820}
821
822static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
823{
824 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
825 struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma;
826 struct ttm_bo_global *glob = bo->glob;
827 struct vmw_private *dev_priv =
828 container_of(bo->bdev, struct vmw_private, bdev);
829
830 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
831 if (vmw_bo->gmr_bound) {
832 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
833 spin_lock(&glob->lru_lock);
834 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
835 spin_unlock(&glob->lru_lock);
836 }
837 kfree(vmw_user_bo);
838}
839
840static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
841{
842 struct vmw_user_dma_buffer *vmw_user_bo;
843 struct ttm_base_object *base = *p_base;
844 struct ttm_buffer_object *bo;
845
846 *p_base = NULL;
847
848 if (unlikely(base == NULL))
849 return;
850
851 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
852 bo = &vmw_user_bo->dma.base;
853 ttm_bo_unref(&bo);
854}
855
856int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
857 struct drm_file *file_priv)
858{
859 struct vmw_private *dev_priv = vmw_priv(dev);
860 union drm_vmw_alloc_dmabuf_arg *arg =
861 (union drm_vmw_alloc_dmabuf_arg *)data;
862 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
863 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
864 struct vmw_user_dma_buffer *vmw_user_bo;
865 struct ttm_buffer_object *tmp;
866 struct vmw_master *vmaster = vmw_master(file_priv->master);
867 int ret;
868
869 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
870 if (unlikely(vmw_user_bo == NULL))
871 return -ENOMEM;
872
873 ret = ttm_read_lock(&vmaster->lock, true);
874 if (unlikely(ret != 0)) {
875 kfree(vmw_user_bo);
876 return ret;
877 }
878
879 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
880 &vmw_vram_placement, true,
881 &vmw_user_dmabuf_destroy);
882 if (unlikely(ret != 0))
883 return ret;
884
885 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
886 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
887 &vmw_user_bo->base,
888 false,
889 ttm_buffer_type,
890 &vmw_user_dmabuf_release, NULL);
891 if (unlikely(ret != 0)) {
892 ttm_bo_unref(&tmp);
893 } else {
894 rep->handle = vmw_user_bo->base.hash.key;
895 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
896 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
897 rep->cur_gmr_offset = 0;
898 }
899 ttm_bo_unref(&tmp);
900
901 ttm_read_unlock(&vmaster->lock);
902
903 return 0;
904}
905
906int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
907 struct drm_file *file_priv)
908{
909 struct drm_vmw_unref_dmabuf_arg *arg =
910 (struct drm_vmw_unref_dmabuf_arg *)data;
911
912 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
913 arg->handle,
914 TTM_REF_USAGE);
915}
916
917uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
918 uint32_t cur_validate_node)
919{
920 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
921
922 if (likely(vmw_bo->on_validate_list))
923 return vmw_bo->cur_validate_node;
924
925 vmw_bo->cur_validate_node = cur_validate_node;
926 vmw_bo->on_validate_list = true;
927
928 return cur_validate_node;
929}
930
931void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
932{
933 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
934
935 vmw_bo->on_validate_list = false;
936}
937
938uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
939{
940 struct vmw_dma_buffer *vmw_bo;
941
942 if (bo->mem.mem_type == TTM_PL_VRAM)
943 return SVGA_GMR_FRAMEBUFFER;
944
945 vmw_bo = vmw_dma_buffer(bo);
946
947 return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
948}
949
950void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
951{
952 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
953 vmw_bo->gmr_bound = true;
954 vmw_bo->gmr_id = id;
955}
956
957int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
958 uint32_t handle, struct vmw_dma_buffer **out)
959{
960 struct vmw_user_dma_buffer *vmw_user_bo;
961 struct ttm_base_object *base;
962
963 base = ttm_base_object_lookup(tfile, handle);
964 if (unlikely(base == NULL)) {
965 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
966 (unsigned long)handle);
967 return -ESRCH;
968 }
969
970 if (unlikely(base->object_type != ttm_buffer_type)) {
971 ttm_base_object_unref(&base);
972 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
973 (unsigned long)handle);
974 return -EINVAL;
975 }
976
977 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
978 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
979 ttm_base_object_unref(&base);
980 *out = &vmw_user_bo->dma;
981
982 return 0;
983}
984
985/**
986 * TODO: Implement a gmr id eviction mechanism. Currently we just fail
987 * when we're out of ids, causing GMR space to be allocated
988 * out of VRAM.
989 */
990
991int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
992{
993 struct ttm_bo_global *glob = dev_priv->bdev.glob;
994 int id;
995 int ret;
996
997 do {
998 if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
999 return -ENOMEM;
1000
1001 spin_lock(&glob->lru_lock);
1002 ret = ida_get_new(&dev_priv->gmr_ida, &id);
1003 spin_unlock(&glob->lru_lock);
1004 } while (ret == -EAGAIN);
1005
1006 if (unlikely(ret != 0))
1007 return ret;
1008
1009 if (unlikely(id >= dev_priv->max_gmr_ids)) {
1010 spin_lock(&glob->lru_lock);
1011 ida_remove(&dev_priv->gmr_ida, id);
1012 spin_unlock(&glob->lru_lock);
1013 return -EBUSY;
1014 }
1015
1016 *p_id = (uint32_t) id;
1017 return 0;
1018}
1019
1020/*
1021 * Stream managment
1022 */
1023
1024static void vmw_stream_destroy(struct vmw_resource *res)
1025{
1026 struct vmw_private *dev_priv = res->dev_priv;
1027 struct vmw_stream *stream;
1028 int ret;
1029
1030 DRM_INFO("%s: unref\n", __func__);
1031 stream = container_of(res, struct vmw_stream, res);
1032
1033 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1034 WARN_ON(ret != 0);
1035}
1036
1037static int vmw_stream_init(struct vmw_private *dev_priv,
1038 struct vmw_stream *stream,
1039 void (*res_free) (struct vmw_resource *res))
1040{
1041 struct vmw_resource *res = &stream->res;
1042 int ret;
1043
1044 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1045 VMW_RES_STREAM, res_free);
1046
1047 if (unlikely(ret != 0)) {
1048 if (res_free == NULL)
1049 kfree(stream);
1050 else
1051 res_free(&stream->res);
1052 return ret;
1053 }
1054
1055 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1056 if (ret) {
1057 vmw_resource_unreference(&res);
1058 return ret;
1059 }
1060
1061 DRM_INFO("%s: claimed\n", __func__);
1062
1063 vmw_resource_activate(&stream->res, vmw_stream_destroy);
1064 return 0;
1065}
1066
1067/**
1068 * User-space context management:
1069 */
1070
1071static void vmw_user_stream_free(struct vmw_resource *res)
1072{
1073 struct vmw_user_stream *stream =
1074 container_of(res, struct vmw_user_stream, stream.res);
1075
1076 kfree(stream);
1077}
1078
1079/**
1080 * This function is called when user space has no more references on the
1081 * base object. It releases the base-object's reference on the resource object.
1082 */
1083
1084static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1085{
1086 struct ttm_base_object *base = *p_base;
1087 struct vmw_user_stream *stream =
1088 container_of(base, struct vmw_user_stream, base);
1089 struct vmw_resource *res = &stream->stream.res;
1090
1091 *p_base = NULL;
1092 vmw_resource_unreference(&res);
1093}
1094
1095int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1096 struct drm_file *file_priv)
1097{
1098 struct vmw_private *dev_priv = vmw_priv(dev);
1099 struct vmw_resource *res;
1100 struct vmw_user_stream *stream;
1101 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1102 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1103 int ret = 0;
1104
1105 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1106 if (unlikely(res == NULL))
1107 return -EINVAL;
1108
1109 if (res->res_free != &vmw_user_stream_free) {
1110 ret = -EINVAL;
1111 goto out;
1112 }
1113
1114 stream = container_of(res, struct vmw_user_stream, stream.res);
1115 if (stream->base.tfile != tfile) {
1116 ret = -EINVAL;
1117 goto out;
1118 }
1119
1120 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1121out:
1122 vmw_resource_unreference(&res);
1123 return ret;
1124}
1125
1126int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1127 struct drm_file *file_priv)
1128{
1129 struct vmw_private *dev_priv = vmw_priv(dev);
1130 struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1131 struct vmw_resource *res;
1132 struct vmw_resource *tmp;
1133 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1134 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1135 int ret;
1136
1137 if (unlikely(stream == NULL))
1138 return -ENOMEM;
1139
1140 res = &stream->stream.res;
1141 stream->base.shareable = false;
1142 stream->base.tfile = NULL;
1143
1144 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1145 if (unlikely(ret != 0))
1146 return ret;
1147
1148 tmp = vmw_resource_reference(res);
1149 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1150 &vmw_user_stream_base_release, NULL);
1151
1152 if (unlikely(ret != 0)) {
1153 vmw_resource_unreference(&tmp);
1154 goto out_err;
1155 }
1156
1157 arg->stream_id = res->id;
1158out_err:
1159 vmw_resource_unreference(&res);
1160 return ret;
1161}
1162
1163int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1164 struct ttm_object_file *tfile,
1165 uint32_t *inout_id, struct vmw_resource **out)
1166{
1167 struct vmw_user_stream *stream;
1168 struct vmw_resource *res;
1169 int ret;
1170
1171 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1172 if (unlikely(res == NULL))
1173 return -EINVAL;
1174
1175 if (res->res_free != &vmw_user_stream_free) {
1176 ret = -EINVAL;
1177 goto err_ref;
1178 }
1179
1180 stream = container_of(res, struct vmw_user_stream, stream.res);
1181 if (stream->base.tfile != tfile) {
1182 ret = -EPERM;
1183 goto err_ref;
1184 }
1185
1186 *inout_id = stream->stream.stream_id;
1187 *out = res;
1188 return 0;
1189err_ref:
1190 vmw_resource_unreference(&res);
1191 return ret;
1192}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
new file mode 100644
index 000000000000..e3df4adfb4d8
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -0,0 +1,99 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "drmP.h"
29#include "vmwgfx_drv.h"
30
31int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
32{
33 struct drm_file *file_priv;
34 struct vmw_private *dev_priv;
35
36 if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
37 if (vmw_fifo_mmap(filp, vma) == 0)
38 return 0;
39 return drm_mmap(filp, vma);
40 }
41
42 file_priv = (struct drm_file *)filp->private_data;
43 dev_priv = vmw_priv(file_priv->minor->dev);
44 return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
45}
46
47static int vmw_ttm_mem_global_init(struct ttm_global_reference *ref)
48{
49 DRM_INFO("global init.\n");
50 return ttm_mem_global_init(ref->object);
51}
52
53static void vmw_ttm_mem_global_release(struct ttm_global_reference *ref)
54{
55 ttm_mem_global_release(ref->object);
56}
57
58int vmw_ttm_global_init(struct vmw_private *dev_priv)
59{
60 struct ttm_global_reference *global_ref;
61 int ret;
62
63 global_ref = &dev_priv->mem_global_ref;
64 global_ref->global_type = TTM_GLOBAL_TTM_MEM;
65 global_ref->size = sizeof(struct ttm_mem_global);
66 global_ref->init = &vmw_ttm_mem_global_init;
67 global_ref->release = &vmw_ttm_mem_global_release;
68
69 ret = ttm_global_item_ref(global_ref);
70 if (unlikely(ret != 0)) {
71 DRM_ERROR("Failed setting up TTM memory accounting.\n");
72 return ret;
73 }
74
75 dev_priv->bo_global_ref.mem_glob =
76 dev_priv->mem_global_ref.object;
77 global_ref = &dev_priv->bo_global_ref.ref;
78 global_ref->global_type = TTM_GLOBAL_TTM_BO;
79 global_ref->size = sizeof(struct ttm_bo_global);
80 global_ref->init = &ttm_bo_global_init;
81 global_ref->release = &ttm_bo_global_release;
82 ret = ttm_global_item_ref(global_ref);
83
84 if (unlikely(ret != 0)) {
85 DRM_ERROR("Failed setting up TTM buffer objects.\n");
86 goto out_no_bo;
87 }
88
89 return 0;
90out_no_bo:
91 ttm_global_item_unref(&dev_priv->mem_global_ref);
92 return ret;
93}
94
95void vmw_ttm_global_release(struct vmw_private *dev_priv)
96{
97 ttm_global_item_unref(&dev_priv->bo_global_ref.ref);
98 ttm_global_item_unref(&dev_priv->mem_global_ref);
99}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 54e174d28234..093f57af32d3 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -99,6 +99,8 @@ source "drivers/staging/p9auth/Kconfig"
99 99
100source "drivers/staging/line6/Kconfig" 100source "drivers/staging/line6/Kconfig"
101 101
102source "drivers/gpu/drm/vmwgfx/Kconfig"
103
102source "drivers/gpu/drm/radeon/Kconfig" 104source "drivers/gpu/drm/radeon/Kconfig"
103 105
104source "drivers/gpu/drm/nouveau/Kconfig" 106source "drivers/gpu/drm/nouveau/Kconfig"
diff --git a/include/drm/Kbuild b/include/drm/Kbuild
index cfa6af43c9ea..bd3a1c2fbdb4 100644
--- a/include/drm/Kbuild
+++ b/include/drm/Kbuild
@@ -7,5 +7,6 @@ unifdef-y += r128_drm.h
7unifdef-y += radeon_drm.h 7unifdef-y += radeon_drm.h
8unifdef-y += sis_drm.h 8unifdef-y += sis_drm.h
9unifdef-y += savage_drm.h 9unifdef-y += savage_drm.h
10unifdef-y += vmwgfx_drm.h
10unifdef-y += via_drm.h 11unifdef-y += via_drm.h
11unifdef-y += nouveau_drm.h 12unifdef-y += nouveau_drm.h
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index 703ca4db0a29..0d9db099978b 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -77,7 +77,11 @@ enum ttm_object_type {
77 ttm_buffer_type, 77 ttm_buffer_type,
78 ttm_lock_type, 78 ttm_lock_type,
79 ttm_driver_type0 = 256, 79 ttm_driver_type0 = 256,
80 ttm_driver_type1 80 ttm_driver_type1,
81 ttm_driver_type2,
82 ttm_driver_type3,
83 ttm_driver_type4,
84 ttm_driver_type5
81}; 85};
82 86
83struct ttm_object_file; 87struct ttm_object_file;
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
new file mode 100644
index 000000000000..2be7e1249b6f
--- /dev/null
+++ b/include/drm/vmwgfx_drm.h
@@ -0,0 +1,574 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef __VMWGFX_DRM_H__
29#define __VMWGFX_DRM_H__
30
31#define DRM_VMW_MAX_SURFACE_FACES 6
32#define DRM_VMW_MAX_MIP_LEVELS 24
33
34#define DRM_VMW_EXT_NAME_LEN 128
35
36#define DRM_VMW_GET_PARAM 0
37#define DRM_VMW_ALLOC_DMABUF 1
38#define DRM_VMW_UNREF_DMABUF 2
39#define DRM_VMW_CURSOR_BYPASS 3
40/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
41#define DRM_VMW_CONTROL_STREAM 4
42#define DRM_VMW_CLAIM_STREAM 5
43#define DRM_VMW_UNREF_STREAM 6
44/* guarded by DRM_VMW_PARAM_3D == 1 */
45#define DRM_VMW_CREATE_CONTEXT 7
46#define DRM_VMW_UNREF_CONTEXT 8
47#define DRM_VMW_CREATE_SURFACE 9
48#define DRM_VMW_UNREF_SURFACE 10
49#define DRM_VMW_REF_SURFACE 11
50#define DRM_VMW_EXECBUF 12
51#define DRM_VMW_FIFO_DEBUG 13
52#define DRM_VMW_FENCE_WAIT 14
53
54
55/*************************************************************************/
56/**
57 * DRM_VMW_GET_PARAM - get device information.
58 *
59 * DRM_VMW_PARAM_FIFO_OFFSET:
60 * Offset to use to map the first page of the FIFO read-only.
61 * The fifo is mapped using the mmap() system call on the drm device.
62 *
63 * DRM_VMW_PARAM_OVERLAY_IOCTL:
64 * Does the driver support the overlay ioctl.
65 */
66
67#define DRM_VMW_PARAM_NUM_STREAMS 0
68#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
69#define DRM_VMW_PARAM_3D 2
70#define DRM_VMW_PARAM_FIFO_OFFSET 3
71
72
73/**
74 * struct drm_vmw_getparam_arg
75 *
76 * @value: Returned value. //Out
77 * @param: Parameter to query. //In.
78 *
79 * Argument to the DRM_VMW_GET_PARAM Ioctl.
80 */
81
82struct drm_vmw_getparam_arg {
83 uint64_t value;
84 uint32_t param;
85 uint32_t pad64;
86};
87
88/*************************************************************************/
89/**
90 * DRM_VMW_EXTENSION - Query device extensions.
91 */
92
93/**
94 * struct drm_vmw_extension_rep
95 *
96 * @exists: The queried extension exists.
97 * @driver_ioctl_offset: Ioctl number of the first ioctl in the extension.
98 * @driver_sarea_offset: Offset to any space in the DRI SAREA
99 * used by the extension.
100 * @major: Major version number of the extension.
101 * @minor: Minor version number of the extension.
102 * @pl: Patch level version number of the extension.
103 *
104 * Output argument to the DRM_VMW_EXTENSION Ioctl.
105 */
106
107struct drm_vmw_extension_rep {
108 int32_t exists;
109 uint32_t driver_ioctl_offset;
110 uint32_t driver_sarea_offset;
111 uint32_t major;
112 uint32_t minor;
113 uint32_t pl;
114 uint32_t pad64;
115};
116
117/**
118 * union drm_vmw_extension_arg
119 *
120 * @extension - Ascii name of the extension to be queried. //In
121 * @rep - Reply as defined above. //Out
122 *
123 * Argument to the DRM_VMW_EXTENSION Ioctl.
124 */
125
126union drm_vmw_extension_arg {
127 char extension[DRM_VMW_EXT_NAME_LEN];
128 struct drm_vmw_extension_rep rep;
129};
130
131/*************************************************************************/
132/**
133 * DRM_VMW_CREATE_CONTEXT - Create a host context.
134 *
135 * Allocates a device unique context id, and queues a create context command
136 * for the host. Does not wait for host completion.
137 */
138
139/**
140 * struct drm_vmw_context_arg
141 *
142 * @cid: Device unique context ID.
143 *
144 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
145 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
146 */
147
148struct drm_vmw_context_arg {
149 int32_t cid;
150 uint32_t pad64;
151};
152
153/*************************************************************************/
154/**
155 * DRM_VMW_UNREF_CONTEXT - Create a host context.
156 *
157 * Frees a global context id, and queues a destroy host command for the host.
158 * Does not wait for host completion. The context ID can be used directly
159 * in the command stream and shows up as the same context ID on the host.
160 */
161
162/*************************************************************************/
163/**
164 * DRM_VMW_CREATE_SURFACE - Create a host suface.
165 *
166 * Allocates a device unique surface id, and queues a create surface command
167 * for the host. Does not wait for host completion. The surface ID can be
168 * used directly in the command stream and shows up as the same surface
169 * ID on the host.
170 */
171
172/**
173 * struct drm_wmv_surface_create_req
174 *
175 * @flags: Surface flags as understood by the host.
176 * @format: Surface format as understood by the host.
177 * @mip_levels: Number of mip levels for each face.
178 * An unused face should have 0 encoded.
179 * @size_addr: Address of a user-space array of sruct drm_vmw_size
180 * cast to an uint64_t for 32-64 bit compatibility.
181 * The size of the array should equal the total number of mipmap levels.
182 * @shareable: Boolean whether other clients (as identified by file descriptors)
183 * may reference this surface.
184 *
185 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
186 * Output data from the DRM_VMW_REF_SURFACE Ioctl.
187 */
188
189struct drm_vmw_surface_create_req {
190 uint32_t flags;
191 uint32_t format;
192 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
193 uint64_t size_addr;
194 int32_t shareable;
195 uint32_t pad64;
196};
197
198/**
199 * struct drm_wmv_surface_arg
200 *
201 * @sid: Surface id of created surface or surface to destroy or reference.
202 *
203 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
204 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
205 * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
206 */
207
208struct drm_vmw_surface_arg {
209 int32_t sid;
210 uint32_t pad64;
211};
212
213/**
214 * struct drm_vmw_size ioctl.
215 *
216 * @width - mip level width
217 * @height - mip level height
218 * @depth - mip level depth
219 *
220 * Description of a mip level.
221 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
222 */
223
224struct drm_vmw_size {
225 uint32_t width;
226 uint32_t height;
227 uint32_t depth;
228 uint32_t pad64;
229};
230
231/**
232 * union drm_vmw_surface_create_arg
233 *
234 * @rep: Output data as described above.
235 * @req: Input data as described above.
236 *
237 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
238 */
239
240union drm_vmw_surface_create_arg {
241 struct drm_vmw_surface_arg rep;
242 struct drm_vmw_surface_create_req req;
243};
244
245/*************************************************************************/
246/**
247 * DRM_VMW_REF_SURFACE - Reference a host surface.
248 *
249 * Puts a reference on a host surface with a give sid, as previously
250 * returned by the DRM_VMW_CREATE_SURFACE ioctl.
251 * A reference will make sure the surface isn't destroyed while we hold
252 * it and will allow the calling client to use the surface ID in the command
253 * stream.
254 *
255 * On successful return, the Ioctl returns the surface information given
256 * in the DRM_VMW_CREATE_SURFACE ioctl.
257 */
258
259/**
260 * union drm_vmw_surface_reference_arg
261 *
262 * @rep: Output data as described above.
263 * @req: Input data as described above.
264 *
265 * Argument to the DRM_VMW_REF_SURFACE Ioctl.
266 */
267
268union drm_vmw_surface_reference_arg {
269 struct drm_vmw_surface_create_req rep;
270 struct drm_vmw_surface_arg req;
271};
272
273/*************************************************************************/
274/**
275 * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
276 *
277 * Clear a reference previously put on a host surface.
278 * When all references are gone, including the one implicitly placed
279 * on creation,
280 * a destroy surface command will be queued for the host.
281 * Does not wait for completion.
282 */
283
284/*************************************************************************/
285/**
286 * DRM_VMW_EXECBUF
287 *
288 * Submit a command buffer for execution on the host, and return a
289 * fence sequence that when signaled, indicates that the command buffer has
290 * executed.
291 */
292
293/**
294 * struct drm_vmw_execbuf_arg
295 *
296 * @commands: User-space address of a command buffer cast to an uint64_t.
297 * @command-size: Size in bytes of the command buffer.
298 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
299 * uint64_t.
300 *
301 * Argument to the DRM_VMW_EXECBUF Ioctl.
302 */
303
304struct drm_vmw_execbuf_arg {
305 uint64_t commands;
306 uint32_t command_size;
307 uint32_t pad64;
308 uint64_t fence_rep;
309};
310
311/**
312 * struct drm_vmw_fence_rep
313 *
314 * @fence_seq: Fence sequence associated with a command submission.
315 * @error: This member should've been set to -EFAULT on submission.
316 * The following actions should be take on completion:
317 * error == -EFAULT: Fence communication failed. The host is synchronized.
318 * Use the last fence id read from the FIFO fence register.
319 * error != 0 && error != -EFAULT:
320 * Fence submission failed. The host is synchronized. Use the fence_seq member.
321 * error == 0: All is OK, The host may not be synchronized.
322 * Use the fence_seq member.
323 *
324 * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
325 */
326
327struct drm_vmw_fence_rep {
328 uint64_t fence_seq;
329 int32_t error;
330 uint32_t pad64;
331};
332
333/*************************************************************************/
334/**
335 * DRM_VMW_ALLOC_DMABUF
336 *
337 * Allocate a DMA buffer that is visible also to the host.
338 * NOTE: The buffer is
339 * identified by a handle and an offset, which are private to the guest, but
340 * useable in the command stream. The guest kernel may translate these
341 * and patch up the command stream accordingly. In the future, the offset may
342 * be zero at all times, or it may disappear from the interface before it is
343 * fixed.
344 *
345 * The DMA buffer may stay user-space mapped in the guest at all times,
346 * and is thus suitable for sub-allocation.
347 *
348 * DMA buffers are mapped using the mmap() syscall on the drm device.
349 */
350
351/**
352 * struct drm_vmw_alloc_dmabuf_req
353 *
354 * @size: Required minimum size of the buffer.
355 *
356 * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
357 */
358
359struct drm_vmw_alloc_dmabuf_req {
360 uint32_t size;
361 uint32_t pad64;
362};
363
364/**
365 * struct drm_vmw_dmabuf_rep
366 *
367 * @map_handle: Offset to use in the mmap() call used to map the buffer.
368 * @handle: Handle unique to this buffer. Used for unreferencing.
369 * @cur_gmr_id: GMR id to use in the command stream when this buffer is
370 * referenced. See not above.
371 * @cur_gmr_offset: Offset to use in the command stream when this buffer is
372 * referenced. See note above.
373 *
374 * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
375 */
376
377struct drm_vmw_dmabuf_rep {
378 uint64_t map_handle;
379 uint32_t handle;
380 uint32_t cur_gmr_id;
381 uint32_t cur_gmr_offset;
382 uint32_t pad64;
383};
384
385/**
386 * union drm_vmw_dmabuf_arg
387 *
388 * @req: Input data as described above.
389 * @rep: Output data as described above.
390 *
391 * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
392 */
393
394union drm_vmw_alloc_dmabuf_arg {
395 struct drm_vmw_alloc_dmabuf_req req;
396 struct drm_vmw_dmabuf_rep rep;
397};
398
399/*************************************************************************/
400/**
401 * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
402 *
403 */
404
405/**
406 * struct drm_vmw_unref_dmabuf_arg
407 *
408 * @handle: Handle indicating what buffer to free. Obtained from the
409 * DRM_VMW_ALLOC_DMABUF Ioctl.
410 *
411 * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
412 */
413
414struct drm_vmw_unref_dmabuf_arg {
415 uint32_t handle;
416 uint32_t pad64;
417};
418
419/*************************************************************************/
420/**
421 * DRM_VMW_FIFO_DEBUG - Get last FIFO submission.
422 *
423 * This IOCTL copies the last FIFO submission directly out of the FIFO buffer.
424 */
425
426/**
427 * struct drm_vmw_fifo_debug_arg
428 *
429 * @debug_buffer: User space address of a debug_buffer cast to an uint64_t //In
430 * @debug_buffer_size: Size in bytes of debug buffer //In
431 * @used_size: Number of bytes copied to the buffer // Out
432 * @did_not_fit: Boolean indicating that the fifo contents did not fit. //Out
433 *
434 * Argument to the DRM_VMW_FIFO_DEBUG Ioctl.
435 */
436
437struct drm_vmw_fifo_debug_arg {
438 uint64_t debug_buffer;
439 uint32_t debug_buffer_size;
440 uint32_t used_size;
441 int32_t did_not_fit;
442 uint32_t pad64;
443};
444
445struct drm_vmw_fence_wait_arg {
446 uint64_t sequence;
447 uint64_t kernel_cookie;
448 int32_t cookie_valid;
449 int32_t pad64;
450};
451
452/*************************************************************************/
453/**
454 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
455 *
456 * This IOCTL controls the overlay units of the svga device.
457 * The SVGA overlay units does not work like regular hardware units in
458 * that they do not automaticaly read back the contents of the given dma
459 * buffer. But instead only read back for each call to this ioctl, and
460 * at any point between this call being made and a following call that
461 * either changes the buffer or disables the stream.
462 */
463
464/**
465 * struct drm_vmw_rect
466 *
467 * Defines a rectangle. Used in the overlay ioctl to define
468 * source and destination rectangle.
469 */
470
471struct drm_vmw_rect {
472 int32_t x;
473 int32_t y;
474 uint32_t w;
475 uint32_t h;
476};
477
478/**
479 * struct drm_vmw_control_stream_arg
480 *
481 * @stream_id: Stearm to control
482 * @enabled: If false all following arguments are ignored.
483 * @handle: Handle to buffer for getting data from.
484 * @format: Format of the overlay as understood by the host.
485 * @width: Width of the overlay.
486 * @height: Height of the overlay.
487 * @size: Size of the overlay in bytes.
488 * @pitch: Array of pitches, the two last are only used for YUV12 formats.
489 * @offset: Offset from start of dma buffer to overlay.
490 * @src: Source rect, must be within the defined area above.
491 * @dst: Destination rect, x and y may be negative.
492 *
493 * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
494 */
495
496struct drm_vmw_control_stream_arg {
497 uint32_t stream_id;
498 uint32_t enabled;
499
500 uint32_t flags;
501 uint32_t color_key;
502
503 uint32_t handle;
504 uint32_t offset;
505 int32_t format;
506 uint32_t size;
507 uint32_t width;
508 uint32_t height;
509 uint32_t pitch[3];
510
511 uint32_t pad64;
512 struct drm_vmw_rect src;
513 struct drm_vmw_rect dst;
514};
515
516/*************************************************************************/
517/**
518 * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
519 *
520 */
521
522#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)
523#define DRM_VMW_CURSOR_BYPASS_FLAGS (1)
524
525/**
526 * struct drm_vmw_cursor_bypass_arg
527 *
528 * @flags: Flags.
529 * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
530 * @xpos: X position of cursor.
531 * @ypos: Y position of cursor.
532 * @xhot: X hotspot.
533 * @yhot: Y hotspot.
534 *
535 * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
536 */
537
538struct drm_vmw_cursor_bypass_arg {
539 uint32_t flags;
540 uint32_t crtc_id;
541 int32_t xpos;
542 int32_t ypos;
543 int32_t xhot;
544 int32_t yhot;
545};
546
547/*************************************************************************/
548/**
549 * DRM_VMW_CLAIM_STREAM - Claim a single stream.
550 */
551
552/**
553 * struct drm_vmw_context_arg
554 *
555 * @stream_id: Device unique context ID.
556 *
557 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
558 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
559 */
560
561struct drm_vmw_stream_arg {
562 uint32_t stream_id;
563 uint32_t pad64;
564};
565
566/*************************************************************************/
567/**
568 * DRM_VMW_UNREF_STREAM - Unclaim a stream.
569 *
570 * Return a single stream that was claimed by this process. Also makes
571 * sure that the stream has been stopped.
572 */
573
574#endif