aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/gpu/drm/vmwgfx
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig13
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile9
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h1793
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_escape.h89
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_overlay.h201
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h1346
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_types.h45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c252
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c783
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h521
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c716
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c737
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c538
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c213
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c87
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c286
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c880
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h102
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c516
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c625
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h57
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c1187
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c99
23 files changed, 11095 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
new file mode 100644
index 000000000000..30ad13344f7b
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -0,0 +1,13 @@
1config DRM_VMWGFX
2 tristate "DRM driver for VMware Virtual GPU"
3 depends on DRM && PCI && FB
4 select FB_DEFERRED_IO
5 select FB_CFB_FILLRECT
6 select FB_CFB_COPYAREA
7 select FB_CFB_IMAGEBLIT
8 select DRM_TTM
9 help
10 KMS enabled DRM driver for SVGA2 virtual hardware.
11
12 If unsure say n. The compiled module will be
13 called vmwgfx.ko
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
new file mode 100644
index 000000000000..1a3cb6816d1c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -0,0 +1,9 @@
1
2ccflags-y := -Iinclude/drm
3
4vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
7 vmwgfx_overlay.o
8
9obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
new file mode 100644
index 000000000000..77cb45331000
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -0,0 +1,1793 @@
1/**********************************************************
2 * Copyright 1998-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_reg.h --
28 *
29 * SVGA 3D hardware definitions
30 */
31
32#ifndef _SVGA3D_REG_H_
33#define _SVGA3D_REG_H_
34
35#include "svga_reg.h"
36
37
38/*
39 * 3D Hardware Version
40 *
41 * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
42 * register. Is set by the host and read by the guest. This lets
43 * us make new guest drivers which are backwards-compatible with old
44 * SVGA hardware revisions. It does not let us support old guest
45 * drivers. Good enough for now.
46 *
47 */
48
49#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
50#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
51#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
52
53typedef enum {
54 SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
55 SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
56 SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
57 SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
58 SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
59 SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
60 SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS65_B1,
61} SVGA3dHardwareVersion;
62
63/*
64 * Generic Types
65 */
66
67typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
68#define SVGA3D_NUM_CLIPPLANES 6
69#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8
70
71
72/*
73 * Surface formats.
74 *
75 * If you modify this list, be sure to keep GLUtil.c in sync. It
76 * includes the internal format definition of each surface in
77 * GLUtil_ConvertSurfaceFormat, and it contains a table of
78 * human-readable names in GLUtil_GetFormatName.
79 */
80
81typedef enum SVGA3dSurfaceFormat {
82 SVGA3D_FORMAT_INVALID = 0,
83
84 SVGA3D_X8R8G8B8 = 1,
85 SVGA3D_A8R8G8B8 = 2,
86
87 SVGA3D_R5G6B5 = 3,
88 SVGA3D_X1R5G5B5 = 4,
89 SVGA3D_A1R5G5B5 = 5,
90 SVGA3D_A4R4G4B4 = 6,
91
92 SVGA3D_Z_D32 = 7,
93 SVGA3D_Z_D16 = 8,
94 SVGA3D_Z_D24S8 = 9,
95 SVGA3D_Z_D15S1 = 10,
96
97 SVGA3D_LUMINANCE8 = 11,
98 SVGA3D_LUMINANCE4_ALPHA4 = 12,
99 SVGA3D_LUMINANCE16 = 13,
100 SVGA3D_LUMINANCE8_ALPHA8 = 14,
101
102 SVGA3D_DXT1 = 15,
103 SVGA3D_DXT2 = 16,
104 SVGA3D_DXT3 = 17,
105 SVGA3D_DXT4 = 18,
106 SVGA3D_DXT5 = 19,
107
108 SVGA3D_BUMPU8V8 = 20,
109 SVGA3D_BUMPL6V5U5 = 21,
110 SVGA3D_BUMPX8L8V8U8 = 22,
111 SVGA3D_BUMPL8V8U8 = 23,
112
113 SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
114 SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
115
116 SVGA3D_A2R10G10B10 = 26,
117
118 /* signed formats */
119 SVGA3D_V8U8 = 27,
120 SVGA3D_Q8W8V8U8 = 28,
121 SVGA3D_CxV8U8 = 29,
122
123 /* mixed formats */
124 SVGA3D_X8L8V8U8 = 30,
125 SVGA3D_A2W10V10U10 = 31,
126
127 SVGA3D_ALPHA8 = 32,
128
129 /* Single- and dual-component floating point formats */
130 SVGA3D_R_S10E5 = 33,
131 SVGA3D_R_S23E8 = 34,
132 SVGA3D_RG_S10E5 = 35,
133 SVGA3D_RG_S23E8 = 36,
134
135 /*
136 * Any surface can be used as a buffer object, but SVGA3D_BUFFER is
137 * the most efficient format to use when creating new surfaces
138 * expressly for index or vertex data.
139 */
140 SVGA3D_BUFFER = 37,
141
142 SVGA3D_Z_D24X8 = 38,
143
144 SVGA3D_V16U16 = 39,
145
146 SVGA3D_G16R16 = 40,
147 SVGA3D_A16B16G16R16 = 41,
148
149 /* Packed Video formats */
150 SVGA3D_UYVY = 42,
151 SVGA3D_YUY2 = 43,
152
153 SVGA3D_FORMAT_MAX
154} SVGA3dSurfaceFormat;
155
156typedef uint32 SVGA3dColor; /* a, r, g, b */
157
158/*
159 * These match the D3DFORMAT_OP definitions used by Direct3D. We need
160 * them so that we can query the host for what the supported surface
161 * operations are (when we're using the D3D backend, in particular),
162 * and so we can send those operations to the guest.
163 */
164typedef enum {
165 SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
166 SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
167 SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004,
168 SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008,
169 SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010,
170 SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040,
171 SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080,
172
173/*
174 * This format can be used as a render target if the current display mode
175 * is the same depth if the alpha channel is ignored. e.g. if the device
176 * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
177 * format op list entry for A8R8G8B8 should have this cap.
178 */
179 SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100,
180
181/*
182 * This format contains DirectDraw support (including Flip). This flag
183 * should not to be set on alpha formats.
184 */
185 SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400,
186
187/*
188 * The rasterizer can support some level of Direct3D support in this format
189 * and implies that the driver can create a Context in this mode (for some
190 * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
191 * flag must also be set.
192 */
193 SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800,
194
195/*
196 * This is set for a private format when the driver has put the bpp in
197 * the structure.
198 */
199 SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000,
200
201/*
202 * Indicates that this format can be converted to any RGB format for which
203 * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
204 */
205 SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
206
207/*
208 * Indicates that this format can be used to create offscreen plain surfaces.
209 */
210 SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000,
211
212/*
213 * Indicated that this format can be read as an SRGB texture (meaning that the
214 * sampler will linearize the looked up data)
215 */
216 SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
217
218/*
219 * Indicates that this format can be used in the bumpmap instructions
220 */
221 SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
222
223/*
224 * Indicates that this format can be sampled by the displacement map sampler
225 */
226 SVGA3DFORMAT_OP_DMAP = 0x00020000,
227
228/*
229 * Indicates that this format cannot be used with texture filtering
230 */
231 SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
232
233/*
234 * Indicates that format conversions are supported to this RGB format if
235 * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
236 */
237 SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000,
238
239/*
240 * Indicated that this format can be written as an SRGB target (meaning that the
241 * pixel pipe will DE-linearize data on output to format)
242 */
243 SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
244
245/*
246 * Indicates that this format cannot be used with alpha blending
247 */
248 SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
249
250/*
251 * Indicates that the device can auto-generated sublevels for resources
252 * of this format
253 */
254 SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
255
256/*
257 * Indicates that this format can be used by vertex texture sampler
258 */
259 SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
260
261/*
262 * Indicates that this format supports neither texture coordinate wrap
263 * modes, nor mipmapping
264 */
265 SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000
266} SVGA3dFormatOp;
267
268/*
269 * This structure is a conversion of SVGA3DFORMAT_OP_*.
270 * Entries must be located at the same position.
271 */
272typedef union {
273 uint32 value;
274 struct {
275 uint32 texture : 1;
276 uint32 volumeTexture : 1;
277 uint32 cubeTexture : 1;
278 uint32 offscreenRenderTarget : 1;
279 uint32 sameFormatRenderTarget : 1;
280 uint32 unknown1 : 1;
281 uint32 zStencil : 1;
282 uint32 zStencilArbitraryDepth : 1;
283 uint32 sameFormatUpToAlpha : 1;
284 uint32 unknown2 : 1;
285 uint32 displayMode : 1;
286 uint32 acceleration3d : 1;
287 uint32 pixelSize : 1;
288 uint32 convertToARGB : 1;
289 uint32 offscreenPlain : 1;
290 uint32 sRGBRead : 1;
291 uint32 bumpMap : 1;
292 uint32 dmap : 1;
293 uint32 noFilter : 1;
294 uint32 memberOfGroupARGB : 1;
295 uint32 sRGBWrite : 1;
296 uint32 noAlphaBlend : 1;
297 uint32 autoGenMipMap : 1;
298 uint32 vertexTexture : 1;
299 uint32 noTexCoordWrapNorMip : 1;
300 };
301} SVGA3dSurfaceFormatCaps;
302
303/*
304 * SVGA_3D_CMD_SETRENDERSTATE Types. All value types
305 * must fit in a uint32.
306 */
307
308typedef enum {
309 SVGA3D_RS_INVALID = 0,
310 SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */
311 SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */
312 SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */
313 SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */
314 SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */
315 SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */
316 SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */
317 SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */
318 SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */
319 SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */
320 SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */
321 SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */
322 SVGA3D_RS_STENCILREF = 13, /* uint32 */
323 SVGA3D_RS_STENCILMASK = 14, /* uint32 */
324 SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */
325 SVGA3D_RS_FOGSTART = 16, /* float */
326 SVGA3D_RS_FOGEND = 17, /* float */
327 SVGA3D_RS_FOGDENSITY = 18, /* float */
328 SVGA3D_RS_POINTSIZE = 19, /* float */
329 SVGA3D_RS_POINTSIZEMIN = 20, /* float */
330 SVGA3D_RS_POINTSIZEMAX = 21, /* float */
331 SVGA3D_RS_POINTSCALE_A = 22, /* float */
332 SVGA3D_RS_POINTSCALE_B = 23, /* float */
333 SVGA3D_RS_POINTSCALE_C = 24, /* float */
334 SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */
335 SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */
336 SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */
337 SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */
338 SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */
339 SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */
340 SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */
341 SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */
342 SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */
343 SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */
344 SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */
345 SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */
346 SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */
347 SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */
348 SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */
349 SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */
350 SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */
351 SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */
352 SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */
353 SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */
354 SVGA3D_RS_ZBIAS = 45, /* float */
355 SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */
356 SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */
357 SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */
358 SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */
359 SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */
360 SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */
361 SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */
362 SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */
363 SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */
364 SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */
365 SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */
366 SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */
367 SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */
368 SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */
369 SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */
370 SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */
371 SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */
372 SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */
373 SVGA3D_RS_DEPTHBIAS = 64, /* float */
374
375
376 /*
377 * Output Gamma Level
378 *
379 * Output gamma effects the gamma curve of colors that are output from the
380 * rendering pipeline. A value of 1.0 specifies a linear color space. If the
381 * value is <= 0.0, gamma correction is ignored and linear color space is
382 * used.
383 */
384
385 SVGA3D_RS_OUTPUTGAMMA = 65, /* float */
386 SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */
387 SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */
388 SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */
389 SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */
390 SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */
391 SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */
392 SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */
393 SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */
394 SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */
395 SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */
396 SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */
397 SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */
398 SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */
399 SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */
400 SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */
401 SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */
402 SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */
403 SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */
404 SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */
405 SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */
406 SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */
407 SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */
408 SVGA3D_RS_TWEENFACTOR = 88, /* float */
409 SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */
410 SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */
411 SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */
412 SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */
413 SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */
414 SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
415 SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
416 SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
417 SVGA3D_RS_MAX
418} SVGA3dRenderStateName;
419
420typedef enum {
421 SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
422 SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
423 SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
424} SVGA3dVertexMaterial;
425
426typedef enum {
427 SVGA3D_FILLMODE_INVALID = 0,
428 SVGA3D_FILLMODE_POINT = 1,
429 SVGA3D_FILLMODE_LINE = 2,
430 SVGA3D_FILLMODE_FILL = 3,
431 SVGA3D_FILLMODE_MAX
432} SVGA3dFillModeType;
433
434
435typedef
436union {
437 struct {
438 uint16 mode; /* SVGA3dFillModeType */
439 uint16 face; /* SVGA3dFace */
440 };
441 uint32 uintValue;
442} SVGA3dFillMode;
443
444typedef enum {
445 SVGA3D_SHADEMODE_INVALID = 0,
446 SVGA3D_SHADEMODE_FLAT = 1,
447 SVGA3D_SHADEMODE_SMOOTH = 2,
448 SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */
449 SVGA3D_SHADEMODE_MAX
450} SVGA3dShadeMode;
451
452typedef
453union {
454 struct {
455 uint16 repeat;
456 uint16 pattern;
457 };
458 uint32 uintValue;
459} SVGA3dLinePattern;
460
461typedef enum {
462 SVGA3D_BLENDOP_INVALID = 0,
463 SVGA3D_BLENDOP_ZERO = 1,
464 SVGA3D_BLENDOP_ONE = 2,
465 SVGA3D_BLENDOP_SRCCOLOR = 3,
466 SVGA3D_BLENDOP_INVSRCCOLOR = 4,
467 SVGA3D_BLENDOP_SRCALPHA = 5,
468 SVGA3D_BLENDOP_INVSRCALPHA = 6,
469 SVGA3D_BLENDOP_DESTALPHA = 7,
470 SVGA3D_BLENDOP_INVDESTALPHA = 8,
471 SVGA3D_BLENDOP_DESTCOLOR = 9,
472 SVGA3D_BLENDOP_INVDESTCOLOR = 10,
473 SVGA3D_BLENDOP_SRCALPHASAT = 11,
474 SVGA3D_BLENDOP_BLENDFACTOR = 12,
475 SVGA3D_BLENDOP_INVBLENDFACTOR = 13,
476 SVGA3D_BLENDOP_MAX
477} SVGA3dBlendOp;
478
479typedef enum {
480 SVGA3D_BLENDEQ_INVALID = 0,
481 SVGA3D_BLENDEQ_ADD = 1,
482 SVGA3D_BLENDEQ_SUBTRACT = 2,
483 SVGA3D_BLENDEQ_REVSUBTRACT = 3,
484 SVGA3D_BLENDEQ_MINIMUM = 4,
485 SVGA3D_BLENDEQ_MAXIMUM = 5,
486 SVGA3D_BLENDEQ_MAX
487} SVGA3dBlendEquation;
488
489typedef enum {
490 SVGA3D_FRONTWINDING_INVALID = 0,
491 SVGA3D_FRONTWINDING_CW = 1,
492 SVGA3D_FRONTWINDING_CCW = 2,
493 SVGA3D_FRONTWINDING_MAX
494} SVGA3dFrontWinding;
495
496typedef enum {
497 SVGA3D_FACE_INVALID = 0,
498 SVGA3D_FACE_NONE = 1,
499 SVGA3D_FACE_FRONT = 2,
500 SVGA3D_FACE_BACK = 3,
501 SVGA3D_FACE_FRONT_BACK = 4,
502 SVGA3D_FACE_MAX
503} SVGA3dFace;
504
505/*
506 * The order and the values should not be changed
507 */
508
509typedef enum {
510 SVGA3D_CMP_INVALID = 0,
511 SVGA3D_CMP_NEVER = 1,
512 SVGA3D_CMP_LESS = 2,
513 SVGA3D_CMP_EQUAL = 3,
514 SVGA3D_CMP_LESSEQUAL = 4,
515 SVGA3D_CMP_GREATER = 5,
516 SVGA3D_CMP_NOTEQUAL = 6,
517 SVGA3D_CMP_GREATEREQUAL = 7,
518 SVGA3D_CMP_ALWAYS = 8,
519 SVGA3D_CMP_MAX
520} SVGA3dCmpFunc;
521
522/*
523 * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
524 * the fog factor to be specified in the alpha component of the specular
525 * (a.k.a. secondary) vertex color.
526 */
527typedef enum {
528 SVGA3D_FOGFUNC_INVALID = 0,
529 SVGA3D_FOGFUNC_EXP = 1,
530 SVGA3D_FOGFUNC_EXP2 = 2,
531 SVGA3D_FOGFUNC_LINEAR = 3,
532 SVGA3D_FOGFUNC_PER_VERTEX = 4
533} SVGA3dFogFunction;
534
535/*
536 * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
537 * or per-pixel basis.
538 */
539typedef enum {
540 SVGA3D_FOGTYPE_INVALID = 0,
541 SVGA3D_FOGTYPE_VERTEX = 1,
542 SVGA3D_FOGTYPE_PIXEL = 2,
543 SVGA3D_FOGTYPE_MAX = 3
544} SVGA3dFogType;
545
546/*
547 * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
548 * computed using the eye Z value of each pixel (or vertex), whereas range-
549 * based fog is computed using the actual distance (range) to the eye.
550 */
551typedef enum {
552 SVGA3D_FOGBASE_INVALID = 0,
553 SVGA3D_FOGBASE_DEPTHBASED = 1,
554 SVGA3D_FOGBASE_RANGEBASED = 2,
555 SVGA3D_FOGBASE_MAX = 3
556} SVGA3dFogBase;
557
558typedef enum {
559 SVGA3D_STENCILOP_INVALID = 0,
560 SVGA3D_STENCILOP_KEEP = 1,
561 SVGA3D_STENCILOP_ZERO = 2,
562 SVGA3D_STENCILOP_REPLACE = 3,
563 SVGA3D_STENCILOP_INCRSAT = 4,
564 SVGA3D_STENCILOP_DECRSAT = 5,
565 SVGA3D_STENCILOP_INVERT = 6,
566 SVGA3D_STENCILOP_INCR = 7,
567 SVGA3D_STENCILOP_DECR = 8,
568 SVGA3D_STENCILOP_MAX
569} SVGA3dStencilOp;
570
571typedef enum {
572 SVGA3D_CLIPPLANE_0 = (1 << 0),
573 SVGA3D_CLIPPLANE_1 = (1 << 1),
574 SVGA3D_CLIPPLANE_2 = (1 << 2),
575 SVGA3D_CLIPPLANE_3 = (1 << 3),
576 SVGA3D_CLIPPLANE_4 = (1 << 4),
577 SVGA3D_CLIPPLANE_5 = (1 << 5),
578} SVGA3dClipPlanes;
579
580typedef enum {
581 SVGA3D_CLEAR_COLOR = 0x1,
582 SVGA3D_CLEAR_DEPTH = 0x2,
583 SVGA3D_CLEAR_STENCIL = 0x4
584} SVGA3dClearFlag;
585
586typedef enum {
587 SVGA3D_RT_DEPTH = 0,
588 SVGA3D_RT_STENCIL = 1,
589 SVGA3D_RT_COLOR0 = 2,
590 SVGA3D_RT_COLOR1 = 3,
591 SVGA3D_RT_COLOR2 = 4,
592 SVGA3D_RT_COLOR3 = 5,
593 SVGA3D_RT_COLOR4 = 6,
594 SVGA3D_RT_COLOR5 = 7,
595 SVGA3D_RT_COLOR6 = 8,
596 SVGA3D_RT_COLOR7 = 9,
597 SVGA3D_RT_MAX,
598 SVGA3D_RT_INVALID = ((uint32)-1),
599} SVGA3dRenderTargetType;
600
601#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
602
603typedef
604union {
605 struct {
606 uint32 red : 1;
607 uint32 green : 1;
608 uint32 blue : 1;
609 uint32 alpha : 1;
610 };
611 uint32 uintValue;
612} SVGA3dColorMask;
613
614typedef enum {
615 SVGA3D_VBLEND_DISABLE = 0,
616 SVGA3D_VBLEND_1WEIGHT = 1,
617 SVGA3D_VBLEND_2WEIGHT = 2,
618 SVGA3D_VBLEND_3WEIGHT = 3,
619} SVGA3dVertexBlendFlags;
620
621typedef enum {
622 SVGA3D_WRAPCOORD_0 = 1 << 0,
623 SVGA3D_WRAPCOORD_1 = 1 << 1,
624 SVGA3D_WRAPCOORD_2 = 1 << 2,
625 SVGA3D_WRAPCOORD_3 = 1 << 3,
626 SVGA3D_WRAPCOORD_ALL = 0xF,
627} SVGA3dWrapFlags;
628
629/*
630 * SVGA_3D_CMD_TEXTURESTATE Types. All value types
631 * must fit in a uint32.
632 */
633
634typedef enum {
635 SVGA3D_TS_INVALID = 0,
636 SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */
637 SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */
638 SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */
639 SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */
640 SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */
641 SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */
642 SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */
643 SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */
644 SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */
645 SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */
646 SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */
647 SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */
648 SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */
649 SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */
650 SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */
651 SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */
652 SVGA3D_TS_BUMPENVMAT00 = 17, /* float */
653 SVGA3D_TS_BUMPENVMAT01 = 18, /* float */
654 SVGA3D_TS_BUMPENVMAT10 = 19, /* float */
655 SVGA3D_TS_BUMPENVMAT11 = 20, /* float */
656 SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */
657 SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */
658 SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */
659 SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */
660
661
662 /*
663 * Sampler Gamma Level
664 *
665 * Sampler gamma effects the color of samples taken from the sampler. A
666 * value of 1.0 will produce linear samples. If the value is <= 0.0 the
667 * gamma value is ignored and a linear space is used.
668 */
669
670 SVGA3D_TS_GAMMA = 25, /* float */
671 SVGA3D_TS_BUMPENVLSCALE = 26, /* float */
672 SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */
673 SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */
674 SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */
675 SVGA3D_TS_MAX
676} SVGA3dTextureStateName;
677
678typedef enum {
679 SVGA3D_TC_INVALID = 0,
680 SVGA3D_TC_DISABLE = 1,
681 SVGA3D_TC_SELECTARG1 = 2,
682 SVGA3D_TC_SELECTARG2 = 3,
683 SVGA3D_TC_MODULATE = 4,
684 SVGA3D_TC_ADD = 5,
685 SVGA3D_TC_ADDSIGNED = 6,
686 SVGA3D_TC_SUBTRACT = 7,
687 SVGA3D_TC_BLENDTEXTUREALPHA = 8,
688 SVGA3D_TC_BLENDDIFFUSEALPHA = 9,
689 SVGA3D_TC_BLENDCURRENTALPHA = 10,
690 SVGA3D_TC_BLENDFACTORALPHA = 11,
691 SVGA3D_TC_MODULATE2X = 12,
692 SVGA3D_TC_MODULATE4X = 13,
693 SVGA3D_TC_DSDT = 14,
694 SVGA3D_TC_DOTPRODUCT3 = 15,
695 SVGA3D_TC_BLENDTEXTUREALPHAPM = 16,
696 SVGA3D_TC_ADDSIGNED2X = 17,
697 SVGA3D_TC_ADDSMOOTH = 18,
698 SVGA3D_TC_PREMODULATE = 19,
699 SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20,
700 SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21,
701 SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
702 SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
703 SVGA3D_TC_BUMPENVMAPLUMINANCE = 24,
704 SVGA3D_TC_MULTIPLYADD = 25,
705 SVGA3D_TC_LERP = 26,
706 SVGA3D_TC_MAX
707} SVGA3dTextureCombiner;
708
709#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
710
711typedef enum {
712 SVGA3D_TEX_ADDRESS_INVALID = 0,
713 SVGA3D_TEX_ADDRESS_WRAP = 1,
714 SVGA3D_TEX_ADDRESS_MIRROR = 2,
715 SVGA3D_TEX_ADDRESS_CLAMP = 3,
716 SVGA3D_TEX_ADDRESS_BORDER = 4,
717 SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
718 SVGA3D_TEX_ADDRESS_EDGE = 6,
719 SVGA3D_TEX_ADDRESS_MAX
720} SVGA3dTextureAddress;
721
722/*
723 * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
724 * disabled, and the rasterizer should use the magnification filter instead.
725 */
726typedef enum {
727 SVGA3D_TEX_FILTER_NONE = 0,
728 SVGA3D_TEX_FILTER_NEAREST = 1,
729 SVGA3D_TEX_FILTER_LINEAR = 2,
730 SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
731 SVGA3D_TEX_FILTER_FLATCUBIC = 4, // Deprecated, not implemented
732 SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, // Deprecated, not implemented
733 SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, // Not currently implemented
734 SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, // Not currently implemented
735 SVGA3D_TEX_FILTER_MAX
736} SVGA3dTextureFilter;
737
738typedef enum {
739 SVGA3D_TEX_TRANSFORM_OFF = 0,
740 SVGA3D_TEX_TRANSFORM_S = (1 << 0),
741 SVGA3D_TEX_TRANSFORM_T = (1 << 1),
742 SVGA3D_TEX_TRANSFORM_R = (1 << 2),
743 SVGA3D_TEX_TRANSFORM_Q = (1 << 3),
744 SVGA3D_TEX_PROJECTED = (1 << 15),
745} SVGA3dTexTransformFlags;
746
747typedef enum {
748 SVGA3D_TEXCOORD_GEN_OFF = 0,
749 SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1,
750 SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2,
751 SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
752 SVGA3D_TEXCOORD_GEN_SPHERE = 4,
753 SVGA3D_TEXCOORD_GEN_MAX
754} SVGA3dTextureCoordGen;
755
756/*
757 * Texture argument constants for texture combiner
758 */
759typedef enum {
760 SVGA3D_TA_INVALID = 0,
761 SVGA3D_TA_CONSTANT = 1,
762 SVGA3D_TA_PREVIOUS = 2,
763 SVGA3D_TA_DIFFUSE = 3,
764 SVGA3D_TA_TEXTURE = 4,
765 SVGA3D_TA_SPECULAR = 5,
766 SVGA3D_TA_MAX
767} SVGA3dTextureArgData;
768
769#define SVGA3D_TM_MASK_LEN 4
770
771/* Modifiers for texture argument constants defined above. */
772typedef enum {
773 SVGA3D_TM_NONE = 0,
774 SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN),
775 SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN),
776} SVGA3dTextureArgModifier;
777
778#define SVGA3D_INVALID_ID ((uint32)-1)
779#define SVGA3D_MAX_CLIP_PLANES 6
780
781/*
782 * This is the limit to the number of fixed-function texture
783 * transforms and texture coordinates we can support. It does *not*
784 * correspond to the number of texture image units (samplers) we
785 * support!
786 */
787#define SVGA3D_MAX_TEXTURE_COORDS 8
788
789/*
790 * Vertex declarations
791 *
792 * Notes:
793 *
794 * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
795 * draw with any POSITIONT vertex arrays, the programmable vertex
796 * pipeline will be implicitly disabled. Drawing will take place as if
797 * no vertex shader was bound.
798 */
799
800typedef enum {
801 SVGA3D_DECLUSAGE_POSITION = 0,
802 SVGA3D_DECLUSAGE_BLENDWEIGHT, // 1
803 SVGA3D_DECLUSAGE_BLENDINDICES, // 2
804 SVGA3D_DECLUSAGE_NORMAL, // 3
805 SVGA3D_DECLUSAGE_PSIZE, // 4
806 SVGA3D_DECLUSAGE_TEXCOORD, // 5
807 SVGA3D_DECLUSAGE_TANGENT, // 6
808 SVGA3D_DECLUSAGE_BINORMAL, // 7
809 SVGA3D_DECLUSAGE_TESSFACTOR, // 8
810 SVGA3D_DECLUSAGE_POSITIONT, // 9
811 SVGA3D_DECLUSAGE_COLOR, // 10
812 SVGA3D_DECLUSAGE_FOG, // 11
813 SVGA3D_DECLUSAGE_DEPTH, // 12
814 SVGA3D_DECLUSAGE_SAMPLE, // 13
815 SVGA3D_DECLUSAGE_MAX
816} SVGA3dDeclUsage;
817
818typedef enum {
819 SVGA3D_DECLMETHOD_DEFAULT = 0,
820 SVGA3D_DECLMETHOD_PARTIALU,
821 SVGA3D_DECLMETHOD_PARTIALV,
822 SVGA3D_DECLMETHOD_CROSSUV, // Normal
823 SVGA3D_DECLMETHOD_UV,
824 SVGA3D_DECLMETHOD_LOOKUP, // Lookup a displacement map
825 SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, // Lookup a pre-sampled displacement map
826} SVGA3dDeclMethod;
827
828typedef enum {
829 SVGA3D_DECLTYPE_FLOAT1 = 0,
830 SVGA3D_DECLTYPE_FLOAT2 = 1,
831 SVGA3D_DECLTYPE_FLOAT3 = 2,
832 SVGA3D_DECLTYPE_FLOAT4 = 3,
833 SVGA3D_DECLTYPE_D3DCOLOR = 4,
834 SVGA3D_DECLTYPE_UBYTE4 = 5,
835 SVGA3D_DECLTYPE_SHORT2 = 6,
836 SVGA3D_DECLTYPE_SHORT4 = 7,
837 SVGA3D_DECLTYPE_UBYTE4N = 8,
838 SVGA3D_DECLTYPE_SHORT2N = 9,
839 SVGA3D_DECLTYPE_SHORT4N = 10,
840 SVGA3D_DECLTYPE_USHORT2N = 11,
841 SVGA3D_DECLTYPE_USHORT4N = 12,
842 SVGA3D_DECLTYPE_UDEC3 = 13,
843 SVGA3D_DECLTYPE_DEC3N = 14,
844 SVGA3D_DECLTYPE_FLOAT16_2 = 15,
845 SVGA3D_DECLTYPE_FLOAT16_4 = 16,
846 SVGA3D_DECLTYPE_MAX,
847} SVGA3dDeclType;
848
849/*
850 * This structure is used for the divisor for geometry instancing;
851 * it's a direct translation of the Direct3D equivalent.
852 */
853typedef union {
854 struct {
855 /*
856 * For index data, this number represents the number of instances to draw.
857 * For instance data, this number represents the number of
858 * instances/vertex in this stream
859 */
860 uint32 count : 30;
861
862 /*
863 * This is 1 if this is supposed to be the data that is repeated for
864 * every instance.
865 */
866 uint32 indexedData : 1;
867
868 /*
869 * This is 1 if this is supposed to be the per-instance data.
870 */
871 uint32 instanceData : 1;
872 };
873
874 uint32 value;
875} SVGA3dVertexDivisor;
876
877typedef enum {
878 SVGA3D_PRIMITIVE_INVALID = 0,
879 SVGA3D_PRIMITIVE_TRIANGLELIST = 1,
880 SVGA3D_PRIMITIVE_POINTLIST = 2,
881 SVGA3D_PRIMITIVE_LINELIST = 3,
882 SVGA3D_PRIMITIVE_LINESTRIP = 4,
883 SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5,
884 SVGA3D_PRIMITIVE_TRIANGLEFAN = 6,
885 SVGA3D_PRIMITIVE_MAX
886} SVGA3dPrimitiveType;
887
888typedef enum {
889 SVGA3D_COORDINATE_INVALID = 0,
890 SVGA3D_COORDINATE_LEFTHANDED = 1,
891 SVGA3D_COORDINATE_RIGHTHANDED = 2,
892 SVGA3D_COORDINATE_MAX
893} SVGA3dCoordinateType;
894
895typedef enum {
896 SVGA3D_TRANSFORM_INVALID = 0,
897 SVGA3D_TRANSFORM_WORLD = 1,
898 SVGA3D_TRANSFORM_VIEW = 2,
899 SVGA3D_TRANSFORM_PROJECTION = 3,
900 SVGA3D_TRANSFORM_TEXTURE0 = 4,
901 SVGA3D_TRANSFORM_TEXTURE1 = 5,
902 SVGA3D_TRANSFORM_TEXTURE2 = 6,
903 SVGA3D_TRANSFORM_TEXTURE3 = 7,
904 SVGA3D_TRANSFORM_TEXTURE4 = 8,
905 SVGA3D_TRANSFORM_TEXTURE5 = 9,
906 SVGA3D_TRANSFORM_TEXTURE6 = 10,
907 SVGA3D_TRANSFORM_TEXTURE7 = 11,
908 SVGA3D_TRANSFORM_WORLD1 = 12,
909 SVGA3D_TRANSFORM_WORLD2 = 13,
910 SVGA3D_TRANSFORM_WORLD3 = 14,
911 SVGA3D_TRANSFORM_MAX
912} SVGA3dTransformType;
913
914typedef enum {
915 SVGA3D_LIGHTTYPE_INVALID = 0,
916 SVGA3D_LIGHTTYPE_POINT = 1,
917 SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */
918 SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */
919 SVGA3D_LIGHTTYPE_DIRECTIONAL = 4,
920 SVGA3D_LIGHTTYPE_MAX
921} SVGA3dLightType;
922
923typedef enum {
924 SVGA3D_CUBEFACE_POSX = 0,
925 SVGA3D_CUBEFACE_NEGX = 1,
926 SVGA3D_CUBEFACE_POSY = 2,
927 SVGA3D_CUBEFACE_NEGY = 3,
928 SVGA3D_CUBEFACE_POSZ = 4,
929 SVGA3D_CUBEFACE_NEGZ = 5,
930} SVGA3dCubeFace;
931
932typedef enum {
933 SVGA3D_SHADERTYPE_COMPILED_DX8 = 0,
934 SVGA3D_SHADERTYPE_VS = 1,
935 SVGA3D_SHADERTYPE_PS = 2,
936 SVGA3D_SHADERTYPE_MAX
937} SVGA3dShaderType;
938
939typedef enum {
940 SVGA3D_CONST_TYPE_FLOAT = 0,
941 SVGA3D_CONST_TYPE_INT = 1,
942 SVGA3D_CONST_TYPE_BOOL = 2,
943} SVGA3dShaderConstType;
944
945#define SVGA3D_MAX_SURFACE_FACES 6
946
947typedef enum {
948 SVGA3D_STRETCH_BLT_POINT = 0,
949 SVGA3D_STRETCH_BLT_LINEAR = 1,
950 SVGA3D_STRETCH_BLT_MAX
951} SVGA3dStretchBltMode;
952
953typedef enum {
954 SVGA3D_QUERYTYPE_OCCLUSION = 0,
955 SVGA3D_QUERYTYPE_MAX
956} SVGA3dQueryType;
957
958typedef enum {
959 SVGA3D_QUERYSTATE_PENDING = 0, /* Waiting on the host (set by guest) */
960 SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully (set by host) */
961 SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully (set by host) */
962 SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (For guest use only) */
963} SVGA3dQueryState;
964
965typedef enum {
966 SVGA3D_WRITE_HOST_VRAM = 1,
967 SVGA3D_READ_HOST_VRAM = 2,
968} SVGA3dTransferType;
969
970/*
971 * The maximum number vertex arrays we're guaranteed to support in
972 * SVGA_3D_CMD_DRAWPRIMITIVES.
973 */
974#define SVGA3D_MAX_VERTEX_ARRAYS 32
975
976/*
977 * Identifiers for commands in the command FIFO.
978 *
979 * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
980 * the SVGA3D protocol and remain reserved; they should not be used in the
981 * future.
982 *
983 * IDs between 1040 and 1999 (inclusive) are available for use by the
984 * current SVGA3D protocol.
985 *
986 * FIFO clients other than SVGA3D should stay below 1000, or at 2000
987 * and up.
988 */
989
990#define SVGA_3D_CMD_LEGACY_BASE 1000
991#define SVGA_3D_CMD_BASE 1040
992
993#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0
994#define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1
995#define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2
996#define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3
997#define SVGA_3D_CMD_SURFACE_DMA SVGA_3D_CMD_BASE + 4
998#define SVGA_3D_CMD_CONTEXT_DEFINE SVGA_3D_CMD_BASE + 5
999#define SVGA_3D_CMD_CONTEXT_DESTROY SVGA_3D_CMD_BASE + 6
1000#define SVGA_3D_CMD_SETTRANSFORM SVGA_3D_CMD_BASE + 7
1001#define SVGA_3D_CMD_SETZRANGE SVGA_3D_CMD_BASE + 8
1002#define SVGA_3D_CMD_SETRENDERSTATE SVGA_3D_CMD_BASE + 9
1003#define SVGA_3D_CMD_SETRENDERTARGET SVGA_3D_CMD_BASE + 10
1004#define SVGA_3D_CMD_SETTEXTURESTATE SVGA_3D_CMD_BASE + 11
1005#define SVGA_3D_CMD_SETMATERIAL SVGA_3D_CMD_BASE + 12
1006#define SVGA_3D_CMD_SETLIGHTDATA SVGA_3D_CMD_BASE + 13
1007#define SVGA_3D_CMD_SETLIGHTENABLED SVGA_3D_CMD_BASE + 14
1008#define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15
1009#define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16
1010#define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17
1011#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 // Deprecated
1012#define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19
1013#define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20
1014#define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21
1015#define SVGA_3D_CMD_SET_SHADER_CONST SVGA_3D_CMD_BASE + 22
1016#define SVGA_3D_CMD_DRAW_PRIMITIVES SVGA_3D_CMD_BASE + 23
1017#define SVGA_3D_CMD_SETSCISSORRECT SVGA_3D_CMD_BASE + 24
1018#define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25
1019#define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26
1020#define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27
1021#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 // Deprecated
1022#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
1023#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 30
1024
1025#define SVGA_3D_CMD_FUTURE_MAX 2000
1026
1027/*
1028 * Common substructures used in multiple FIFO commands:
1029 */
1030
1031typedef struct {
1032 union {
1033 struct {
1034 uint16 function; // SVGA3dFogFunction
1035 uint8 type; // SVGA3dFogType
1036 uint8 base; // SVGA3dFogBase
1037 };
1038 uint32 uintValue;
1039 };
1040} SVGA3dFogMode;
1041
1042/*
1043 * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
1044 * is a surface ID as well as face/mipmap indices.
1045 */
1046
1047typedef
1048struct SVGA3dSurfaceImageId {
1049 uint32 sid;
1050 uint32 face;
1051 uint32 mipmap;
1052} SVGA3dSurfaceImageId;
1053
1054typedef
1055struct SVGA3dGuestImage {
1056 SVGAGuestPtr ptr;
1057
1058 /*
1059 * A note on interpretation of pitch: This value of pitch is the
1060 * number of bytes between vertically adjacent image
1061 * blocks. Normally this is the number of bytes between the first
1062 * pixel of two adjacent scanlines. With compressed textures,
1063 * however, this may represent the number of bytes between
1064 * compression blocks rather than between rows of pixels.
1065 *
1066 * XXX: Compressed textures currently must be tightly packed in guest memory.
1067 *
1068 * If the image is 1-dimensional, pitch is ignored.
1069 *
1070 * If 'pitch' is zero, the SVGA3D device calculates a pitch value
1071 * assuming each row of blocks is tightly packed.
1072 */
1073 uint32 pitch;
1074} SVGA3dGuestImage;
1075
1076
1077/*
1078 * FIFO command format definitions:
1079 */
1080
1081/*
1082 * The data size header following cmdNum for every 3d command
1083 */
1084typedef
1085struct {
1086 uint32 id;
1087 uint32 size;
1088} SVGA3dCmdHeader;
1089
1090/*
1091 * A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
1092 * optional mipmaps and cube faces.
1093 */
1094
1095typedef
1096struct {
1097 uint32 width;
1098 uint32 height;
1099 uint32 depth;
1100} SVGA3dSize;
1101
1102typedef enum {
1103 SVGA3D_SURFACE_CUBEMAP = (1 << 0),
1104 SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
1105 SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
1106 SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
1107 SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
1108 SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
1109 SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
1110 SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
1111 SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
1112} SVGA3dSurfaceFlags;
1113
1114typedef
1115struct {
1116 uint32 numMipLevels;
1117} SVGA3dSurfaceFace;
1118
1119typedef
1120struct {
1121 uint32 sid;
1122 SVGA3dSurfaceFlags surfaceFlags;
1123 SVGA3dSurfaceFormat format;
1124 SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
1125 /*
1126 * Followed by an SVGA3dSize structure for each mip level in each face.
1127 *
1128 * A note on surface sizes: Sizes are always specified in pixels,
1129 * even if the true surface size is not a multiple of the minimum
1130 * block size of the surface's format. For example, a 3x3x1 DXT1
1131 * compressed texture would actually be stored as a 4x4x1 image in
1132 * memory.
1133 */
1134} SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */
1135
1136typedef
1137struct {
1138 uint32 sid;
1139} SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
1140
1141typedef
1142struct {
1143 uint32 cid;
1144} SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */
1145
1146typedef
1147struct {
1148 uint32 cid;
1149} SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */
1150
1151typedef
1152struct {
1153 uint32 cid;
1154 SVGA3dClearFlag clearFlag;
1155 uint32 color;
1156 float depth;
1157 uint32 stencil;
1158 /* Followed by variable number of SVGA3dRect structures */
1159} SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */
1160
1161typedef
1162struct SVGA3dCopyRect {
1163 uint32 x;
1164 uint32 y;
1165 uint32 w;
1166 uint32 h;
1167 uint32 srcx;
1168 uint32 srcy;
1169} SVGA3dCopyRect;
1170
1171typedef
1172struct SVGA3dCopyBox {
1173 uint32 x;
1174 uint32 y;
1175 uint32 z;
1176 uint32 w;
1177 uint32 h;
1178 uint32 d;
1179 uint32 srcx;
1180 uint32 srcy;
1181 uint32 srcz;
1182} SVGA3dCopyBox;
1183
1184typedef
1185struct {
1186 uint32 x;
1187 uint32 y;
1188 uint32 w;
1189 uint32 h;
1190} SVGA3dRect;
1191
1192typedef
1193struct {
1194 uint32 x;
1195 uint32 y;
1196 uint32 z;
1197 uint32 w;
1198 uint32 h;
1199 uint32 d;
1200} SVGA3dBox;
1201
1202typedef
1203struct {
1204 uint32 x;
1205 uint32 y;
1206 uint32 z;
1207} SVGA3dPoint;
1208
1209typedef
1210struct {
1211 SVGA3dLightType type;
1212 SVGA3dBool inWorldSpace;
1213 float diffuse[4];
1214 float specular[4];
1215 float ambient[4];
1216 float position[4];
1217 float direction[4];
1218 float range;
1219 float falloff;
1220 float attenuation0;
1221 float attenuation1;
1222 float attenuation2;
1223 float theta;
1224 float phi;
1225} SVGA3dLightData;
1226
1227typedef
1228struct {
1229 uint32 sid;
1230 /* Followed by variable number of SVGA3dCopyRect structures */
1231} SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */
1232
1233typedef
1234struct {
1235 SVGA3dRenderStateName state;
1236 union {
1237 uint32 uintValue;
1238 float floatValue;
1239 };
1240} SVGA3dRenderState;
1241
1242typedef
1243struct {
1244 uint32 cid;
1245 /* Followed by variable number of SVGA3dRenderState structures */
1246} SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */
1247
1248typedef
1249struct {
1250 uint32 cid;
1251 SVGA3dRenderTargetType type;
1252 SVGA3dSurfaceImageId target;
1253} SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */
1254
1255typedef
1256struct {
1257 SVGA3dSurfaceImageId src;
1258 SVGA3dSurfaceImageId dest;
1259 /* Followed by variable number of SVGA3dCopyBox structures */
1260} SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
1261
1262typedef
1263struct {
1264 SVGA3dSurfaceImageId src;
1265 SVGA3dSurfaceImageId dest;
1266 SVGA3dBox boxSrc;
1267 SVGA3dBox boxDest;
1268 SVGA3dStretchBltMode mode;
1269} SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
1270
1271typedef
1272struct {
1273 /*
1274 * If the discard flag is present in a surface DMA operation, the host may
1275 * discard the contents of the current mipmap level and face of the target
1276 * surface before applying the surface DMA contents.
1277 */
1278 uint32 discard : 1;
1279
1280 /*
1281 * If the unsynchronized flag is present, the host may perform this upload
1282 * without syncing to pending reads on this surface.
1283 */
1284 uint32 unsynchronized : 1;
1285
1286 /*
1287 * Guests *MUST* set the reserved bits to 0 before submitting the command
1288 * suffix as future flags may occupy these bits.
1289 */
1290 uint32 reserved : 30;
1291} SVGA3dSurfaceDMAFlags;
1292
1293typedef
1294struct {
1295 SVGA3dGuestImage guest;
1296 SVGA3dSurfaceImageId host;
1297 SVGA3dTransferType transfer;
1298 /*
1299 * Followed by variable number of SVGA3dCopyBox structures. For consistency
1300 * in all clipping logic and coordinate translation, we define the
1301 * "source" in each copyBox as the guest image and the
1302 * "destination" as the host image, regardless of transfer
1303 * direction.
1304 *
1305 * For efficiency, the SVGA3D device is free to copy more data than
1306 * specified. For example, it may round copy boxes outwards such
1307 * that they lie on particular alignment boundaries.
1308 */
1309} SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */
1310
1311/*
1312 * SVGA3dCmdSurfaceDMASuffix --
1313 *
1314 * This is a command suffix that will appear after a SurfaceDMA command in
1315 * the FIFO. It contains some extra information that hosts may use to
1316 * optimize performance or protect the guest. This suffix exists to preserve
1317 * backwards compatibility while also allowing for new functionality to be
1318 * implemented.
1319 */
1320
1321typedef
1322struct {
1323 uint32 suffixSize;
1324
1325 /*
1326 * The maximum offset is used to determine the maximum offset from the
1327 * guestPtr base address that will be accessed or written to during this
1328 * surfaceDMA. If the suffix is supported, the host will respect this
1329 * boundary while performing surface DMAs.
1330 *
1331 * Defaults to MAX_UINT32
1332 */
1333 uint32 maximumOffset;
1334
1335 /*
1336 * A set of flags that describes optimizations that the host may perform
1337 * while performing this surface DMA operation. The guest should never rely
1338 * on behaviour that is different when these flags are set for correctness.
1339 *
1340 * Defaults to 0
1341 */
1342 SVGA3dSurfaceDMAFlags flags;
1343} SVGA3dCmdSurfaceDMASuffix;
1344
1345/*
1346 * SVGA_3D_CMD_DRAW_PRIMITIVES --
1347 *
1348 * This command is the SVGA3D device's generic drawing entry point.
1349 * It can draw multiple ranges of primitives, optionally using an
1350 * index buffer, using an arbitrary collection of vertex buffers.
1351 *
1352 * Each SVGA3dVertexDecl defines a distinct vertex array to bind
1353 * during this draw call. The declarations specify which surface
1354 * the vertex data lives in, what that vertex data is used for,
1355 * and how to interpret it.
1356 *
1357 * Each SVGA3dPrimitiveRange defines a collection of primitives
1358 * to render using the same vertex arrays. An index buffer is
1359 * optional.
1360 */
1361
1362typedef
1363struct {
1364 /*
1365 * A range hint is an optional specification for the range of indices
1366 * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
1367 * that the entire array will be used.
1368 *
1369 * These are only hints. The SVGA3D device may use them for
1370 * performance optimization if possible, but it's also allowed to
1371 * ignore these values.
1372 */
1373 uint32 first;
1374 uint32 last;
1375} SVGA3dArrayRangeHint;
1376
1377typedef
1378struct {
1379 /*
1380 * Define the origin and shape of a vertex or index array. Both
1381 * 'offset' and 'stride' are in bytes. The provided surface will be
1382 * reinterpreted as a flat array of bytes in the same format used
1383 * by surface DMA operations. To avoid unnecessary conversions, the
1384 * surface should be created with the SVGA3D_BUFFER format.
1385 *
1386 * Index 0 in the array starts 'offset' bytes into the surface.
1387 * Index 1 begins at byte 'offset + stride', etc. Array indices may
1388 * not be negative.
1389 */
1390 uint32 surfaceId;
1391 uint32 offset;
1392 uint32 stride;
1393} SVGA3dArray;
1394
1395typedef
1396struct {
1397 /*
1398 * Describe a vertex array's data type, and define how it is to be
1399 * used by the fixed function pipeline or the vertex shader. It
1400 * isn't useful to have two VertexDecls with the same
1401 * VertexArrayIdentity in one draw call.
1402 */
1403 SVGA3dDeclType type;
1404 SVGA3dDeclMethod method;
1405 SVGA3dDeclUsage usage;
1406 uint32 usageIndex;
1407} SVGA3dVertexArrayIdentity;
1408
1409typedef
1410struct {
1411 SVGA3dVertexArrayIdentity identity;
1412 SVGA3dArray array;
1413 SVGA3dArrayRangeHint rangeHint;
1414} SVGA3dVertexDecl;
1415
1416typedef
1417struct {
1418 /*
1419 * Define a group of primitives to render, from sequential indices.
1420 *
1421 * The value of 'primitiveType' and 'primitiveCount' imply the
1422 * total number of vertices that will be rendered.
1423 */
1424 SVGA3dPrimitiveType primType;
1425 uint32 primitiveCount;
1426
1427 /*
1428 * Optional index buffer. If indexArray.surfaceId is
1429 * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
1430 * without an index buffer is identical to rendering with an index
1431 * buffer containing the sequence [0, 1, 2, 3, ...].
1432 *
1433 * If an index buffer is in use, indexWidth specifies the width in
1434 * bytes of each index value. It must be less than or equal to
1435 * indexArray.stride.
1436 *
1437 * (Currently, the SVGA3D device requires index buffers to be tightly
1438 * packed. In other words, indexWidth == indexArray.stride)
1439 */
1440 SVGA3dArray indexArray;
1441 uint32 indexWidth;
1442
1443 /*
1444 * Optional index bias. This number is added to all indices from
1445 * indexArray before they are used as vertex array indices. This
1446 * can be used in multiple ways:
1447 *
1448 * - When not using an indexArray, this bias can be used to
1449 * specify where in the vertex arrays to begin rendering.
1450 *
1451 * - A positive number here is equivalent to increasing the
1452 * offset in each vertex array.
1453 *
1454 * - A negative number can be used to render using a small
1455 * vertex array and an index buffer that contains large
1456 * values. This may be used by some applications that
1457 * crop a vertex buffer without modifying their index
1458 * buffer.
1459 *
1460 * Note that rendering with a negative bias value may be slower and
1461 * use more memory than rendering with a positive or zero bias.
1462 */
1463 int32 indexBias;
1464} SVGA3dPrimitiveRange;
1465
1466typedef
1467struct {
1468 uint32 cid;
1469 uint32 numVertexDecls;
1470 uint32 numRanges;
1471
1472 /*
1473 * There are two variable size arrays after the
1474 * SVGA3dCmdDrawPrimitives structure. In order,
1475 * they are:
1476 *
1477 * 1. SVGA3dVertexDecl, quantity 'numVertexDecls'
1478 * 2. SVGA3dPrimitiveRange, quantity 'numRanges'
1479 * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
1480 * the frequency divisor for this the corresponding vertex decl)
1481 */
1482} SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
1483
1484typedef
1485struct {
1486 uint32 stage;
1487 SVGA3dTextureStateName name;
1488 union {
1489 uint32 value;
1490 float floatValue;
1491 };
1492} SVGA3dTextureState;
1493
1494typedef
1495struct {
1496 uint32 cid;
1497 /* Followed by variable number of SVGA3dTextureState structures */
1498} SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */
1499
1500typedef
1501struct {
1502 uint32 cid;
1503 SVGA3dTransformType type;
1504 float matrix[16];
1505} SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */
1506
1507typedef
1508struct {
1509 float min;
1510 float max;
1511} SVGA3dZRange;
1512
1513typedef
1514struct {
1515 uint32 cid;
1516 SVGA3dZRange zRange;
1517} SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */
1518
1519typedef
1520struct {
1521 float diffuse[4];
1522 float ambient[4];
1523 float specular[4];
1524 float emissive[4];
1525 float shininess;
1526} SVGA3dMaterial;
1527
1528typedef
1529struct {
1530 uint32 cid;
1531 SVGA3dFace face;
1532 SVGA3dMaterial material;
1533} SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */
1534
1535typedef
1536struct {
1537 uint32 cid;
1538 uint32 index;
1539 SVGA3dLightData data;
1540} SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */
1541
1542typedef
1543struct {
1544 uint32 cid;
1545 uint32 index;
1546 uint32 enabled;
1547} SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */
1548
1549typedef
1550struct {
1551 uint32 cid;
1552 SVGA3dRect rect;
1553} SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */
1554
1555typedef
1556struct {
1557 uint32 cid;
1558 SVGA3dRect rect;
1559} SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */
1560
1561typedef
1562struct {
1563 uint32 cid;
1564 uint32 index;
1565 float plane[4];
1566} SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */
1567
1568typedef
1569struct {
1570 uint32 cid;
1571 uint32 shid;
1572 SVGA3dShaderType type;
1573 /* Followed by variable number of DWORDs for shader bycode */
1574} SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */
1575
1576typedef
1577struct {
1578 uint32 cid;
1579 uint32 shid;
1580 SVGA3dShaderType type;
1581} SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */
1582
1583typedef
1584struct {
1585 uint32 cid;
1586 uint32 reg; /* register number */
1587 SVGA3dShaderType type;
1588 SVGA3dShaderConstType ctype;
1589 uint32 values[4];
1590} SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */
1591
1592typedef
1593struct {
1594 uint32 cid;
1595 SVGA3dShaderType type;
1596 uint32 shid;
1597} SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */
1598
1599typedef
1600struct {
1601 uint32 cid;
1602 SVGA3dQueryType type;
1603} SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */
1604
1605typedef
1606struct {
1607 uint32 cid;
1608 SVGA3dQueryType type;
1609 SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */
1610} SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */
1611
1612typedef
1613struct {
1614 uint32 cid; /* Same parameters passed to END_QUERY */
1615 SVGA3dQueryType type;
1616 SVGAGuestPtr guestResult;
1617} SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */
1618
1619typedef
1620struct {
1621 uint32 totalSize; /* Set by guest before query is ended. */
1622 SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */
1623 union { /* Set by host on exit from PENDING state */
1624 uint32 result32;
1625 };
1626} SVGA3dQueryResult;
1627
1628/*
1629 * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
1630 *
1631 * This is a blit from an SVGA3D surface to a Screen Object. Just
1632 * like GMR-to-screen blits, this blit may be directed at a
1633 * specific screen or to the virtual coordinate space.
1634 *
1635 * The blit copies from a rectangular region of an SVGA3D surface
1636 * image to a rectangular region of a screen or screens.
1637 *
1638 * This command takes an optional variable-length list of clipping
1639 * rectangles after the body of the command. If no rectangles are
1640 * specified, there is no clipping region. The entire destRect is
1641 * drawn to. If one or more rectangles are included, they describe
1642 * a clipping region. The clip rectangle coordinates are measured
1643 * relative to the top-left corner of destRect.
1644 *
1645 * This clipping region serves multiple purposes:
1646 *
1647 * - It can be used to perform an irregularly shaped blit more
1648 * efficiently than by issuing many separate blit commands.
1649 *
1650 * - It is equivalent to allowing blits with non-integer
1651 * source coordinates. You could blit just one half-pixel
1652 * of a source, for example, by specifying a larger
1653 * destination rectangle than you need, then removing
1654 * part of it using a clip rectangle.
1655 *
1656 * Availability:
1657 * SVGA_FIFO_CAP_SCREEN_OBJECT
1658 *
1659 * Limitations:
1660 *
1661 * - Currently, no backend supports blits from a mipmap or face
1662 * other than the first one.
1663 */
1664
1665typedef
1666struct {
1667 SVGA3dSurfaceImageId srcImage;
1668 SVGASignedRect srcRect;
1669 uint32 destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */
1670 SVGASignedRect destRect; /* Supports scaling if src/rest different size */
1671 /* Clipping: zero or more SVGASignedRects follow */
1672} SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
1673
1674
1675/*
1676 * Capability query index.
1677 *
1678 * Notes:
1679 *
1680 * 1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
1681 * fixed-function texture units available. Each of these units
1682 * work in both FFP and Shader modes, and they support texture
1683 * transforms and texture coordinates. The host may have additional
1684 * texture image units that are only usable with shaders.
1685 *
1686 * 2. The BUFFER_FORMAT capabilities are deprecated, and they always
1687 * return TRUE. Even on physical hardware that does not support
1688 * these formats natively, the SVGA3D device will provide an emulation
1689 * which should be invisible to the guest OS.
1690 *
1691 * In general, the SVGA3D device should support any operation on
1692 * any surface format, it just may perform some of these
1693 * operations in software depending on the capabilities of the
1694 * available physical hardware.
1695 *
1696 * XXX: In the future, we will add capabilities that describe in
1697 * detail what formats are supported in hardware for what kinds
1698 * of operations.
1699 */
1700
1701typedef enum {
1702 SVGA3D_DEVCAP_3D = 0,
1703 SVGA3D_DEVCAP_MAX_LIGHTS = 1,
1704 SVGA3D_DEVCAP_MAX_TEXTURES = 2, /* See note (1) */
1705 SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
1706 SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
1707 SVGA3D_DEVCAP_VERTEX_SHADER = 5,
1708 SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
1709 SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
1710 SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
1711 SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
1712 SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
1713 SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
1714 SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12, /* See note (2) */
1715 SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13, /* See note (2) */
1716 SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14, /* See note (2) */
1717 SVGA3D_DEVCAP_QUERY_TYPES = 15,
1718 SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
1719 SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
1720 SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
1721 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
1722 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
1723 SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
1724 SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
1725 SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
1726 SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
1727 SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
1728 SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
1729 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
1730 SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
1731 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
1732 SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
1733 SVGA3D_DEVCAP_TEXTURE_OPS = 31,
1734 SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
1735 SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
1736 SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
1737 SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
1738 SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
1739 SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
1740 SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
1741 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
1742 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
1743 SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
1744 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
1745 SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
1746 SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
1747 SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
1748 SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
1749 SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
1750 SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
1751 SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
1752 SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
1753 SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
1754 SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
1755 SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
1756 SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
1757 SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
1758 SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
1759 SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
1760 SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
1761 SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
1762 SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
1763 SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
1764 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
1765
1766 /*
1767 * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
1768 * render targets. This does no include the depth or stencil targets.
1769 */
1770 SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
1771
1772 SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
1773 SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
1774 SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
1775 SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
1776 SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
1777
1778 /*
1779 * Don't add new caps into the previous section; the values in this
1780 * enumeration must not change. You can put new values right before
1781 * SVGA3D_DEVCAP_MAX.
1782 */
1783 SVGA3D_DEVCAP_MAX /* This must be the last index. */
1784} SVGA3dDevCapIndex;
1785
1786typedef union {
1787 Bool b;
1788 uint32 u;
1789 int32 i;
1790 float f;
1791} SVGA3dDevCapResult;
1792
1793#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/svga_escape.h
new file mode 100644
index 000000000000..7b85e9b8c854
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_escape.h
@@ -0,0 +1,89 @@
1/**********************************************************
2 * Copyright 2007-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga_escape.h --
28 *
29 * Definitions for our own (vendor-specific) SVGA Escape commands.
30 */
31
32#ifndef _SVGA_ESCAPE_H_
33#define _SVGA_ESCAPE_H_
34
35
36/*
37 * Namespace IDs for the escape command
38 */
39
40#define SVGA_ESCAPE_NSID_VMWARE 0x00000000
41#define SVGA_ESCAPE_NSID_DEVEL 0xFFFFFFFF
42
43
44/*
45 * Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to
46 * the first DWORD of escape data (after the nsID and size). As a
47 * guideline we're using the high word and low word as a major and
48 * minor command number, respectively.
49 *
50 * Major command number allocation:
51 *
52 * 0000: Reserved
53 * 0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h)
54 * 0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h)
55 * 0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h)
56 */
57
58#define SVGA_ESCAPE_VMWARE_MAJOR_MASK 0xFFFF0000
59
60
61/*
62 * SVGA Hint commands.
63 *
64 * These escapes let the SVGA driver provide optional information to
65 * he host about the state of the guest or guest applications. The
66 * host can use these hints to make user interface or performance
67 * decisions.
68 *
69 * Notes:
70 *
71 * - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests
72 * that use the SVGA Screen Object extension. Instead of sending
73 * this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your
74 * Screen Object.
75 */
76
77#define SVGA_ESCAPE_VMWARE_HINT 0x00030000
78#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 // Deprecated
79
80typedef
81struct {
82 uint32 command;
83 uint32 fullscreen;
84 struct {
85 int32 x, y;
86 } monitorPosition;
87} SVGAEscapeHintFullscreen;
88
89#endif /* _SVGA_ESCAPE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/svga_overlay.h
new file mode 100644
index 000000000000..f753d73c14b4
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_overlay.h
@@ -0,0 +1,201 @@
1/**********************************************************
2 * Copyright 2007-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga_overlay.h --
28 *
29 * Definitions for video-overlay support.
30 */
31
32#ifndef _SVGA_OVERLAY_H_
33#define _SVGA_OVERLAY_H_
34
35#include "svga_reg.h"
36
37/*
38 * Video formats we support
39 */
40
41#define VMWARE_FOURCC_YV12 0x32315659 // 'Y' 'V' '1' '2'
42#define VMWARE_FOURCC_YUY2 0x32595559 // 'Y' 'U' 'Y' '2'
43#define VMWARE_FOURCC_UYVY 0x59565955 // 'U' 'Y' 'V' 'Y'
44
45typedef enum {
46 SVGA_OVERLAY_FORMAT_INVALID = 0,
47 SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12,
48 SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2,
49 SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY,
50} SVGAOverlayFormat;
51
52#define SVGA_VIDEO_COLORKEY_MASK 0x00ffffff
53
54#define SVGA_ESCAPE_VMWARE_VIDEO 0x00020000
55
56#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS 0x00020001
57 /* FIFO escape layout:
58 * Type, Stream Id, (Register Id, Value) pairs */
59
60#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH 0x00020002
61 /* FIFO escape layout:
62 * Type, Stream Id */
63
64typedef
65struct SVGAEscapeVideoSetRegs {
66 struct {
67 uint32 cmdType;
68 uint32 streamId;
69 } header;
70
71 // May include zero or more items.
72 struct {
73 uint32 registerId;
74 uint32 value;
75 } items[1];
76} SVGAEscapeVideoSetRegs;
77
78typedef
79struct SVGAEscapeVideoFlush {
80 uint32 cmdType;
81 uint32 streamId;
82} SVGAEscapeVideoFlush;
83
84
85/*
86 * Struct definitions for the video overlay commands built on
87 * SVGAFifoCmdEscape.
88 */
89typedef
90struct {
91 uint32 command;
92 uint32 overlay;
93} SVGAFifoEscapeCmdVideoBase;
94
95typedef
96struct {
97 SVGAFifoEscapeCmdVideoBase videoCmd;
98} SVGAFifoEscapeCmdVideoFlush;
99
100typedef
101struct {
102 SVGAFifoEscapeCmdVideoBase videoCmd;
103 struct {
104 uint32 regId;
105 uint32 value;
106 } items[1];
107} SVGAFifoEscapeCmdVideoSetRegs;
108
109typedef
110struct {
111 SVGAFifoEscapeCmdVideoBase videoCmd;
112 struct {
113 uint32 regId;
114 uint32 value;
115 } items[SVGA_VIDEO_NUM_REGS];
116} SVGAFifoEscapeCmdVideoSetAllRegs;
117
118
119/*
120 *----------------------------------------------------------------------
121 *
122 * VMwareVideoGetAttributes --
123 *
124 * Computes the size, pitches and offsets for YUV frames.
125 *
126 * Results:
127 * TRUE on success; otherwise FALSE on failure.
128 *
129 * Side effects:
130 * Pitches and offsets for the given YUV frame are put in 'pitches'
131 * and 'offsets' respectively. They are both optional though.
132 *
133 *----------------------------------------------------------------------
134 */
135
136static inline bool
137VMwareVideoGetAttributes(const SVGAOverlayFormat format, // IN
138 uint32 *width, // IN / OUT
139 uint32 *height, // IN / OUT
140 uint32 *size, // OUT
141 uint32 *pitches, // OUT (optional)
142 uint32 *offsets) // OUT (optional)
143{
144 int tmp;
145
146 *width = (*width + 1) & ~1;
147
148 if (offsets) {
149 offsets[0] = 0;
150 }
151
152 switch (format) {
153 case VMWARE_FOURCC_YV12:
154 *height = (*height + 1) & ~1;
155 *size = (*width + 3) & ~3;
156
157 if (pitches) {
158 pitches[0] = *size;
159 }
160
161 *size *= *height;
162
163 if (offsets) {
164 offsets[1] = *size;
165 }
166
167 tmp = ((*width >> 1) + 3) & ~3;
168
169 if (pitches) {
170 pitches[1] = pitches[2] = tmp;
171 }
172
173 tmp *= (*height >> 1);
174 *size += tmp;
175
176 if (offsets) {
177 offsets[2] = *size;
178 }
179
180 *size += tmp;
181 break;
182
183 case VMWARE_FOURCC_YUY2:
184 case VMWARE_FOURCC_UYVY:
185 *size = *width * 2;
186
187 if (pitches) {
188 pitches[0] = *size;
189 }
190
191 *size *= *height;
192 break;
193
194 default:
195 return false;
196 }
197
198 return true;
199}
200
201#endif // _SVGA_OVERLAY_H_
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
new file mode 100644
index 000000000000..1b96c2ec07dd
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -0,0 +1,1346 @@
1/**********************************************************
2 * Copyright 1998-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga_reg.h --
28 *
29 * Virtual hardware definitions for the VMware SVGA II device.
30 */
31
32#ifndef _SVGA_REG_H_
33#define _SVGA_REG_H_
34
35/*
36 * PCI device IDs.
37 */
38#define PCI_VENDOR_ID_VMWARE 0x15AD
39#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
40
41/*
42 * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
43 * cursor bypass mode. This is still supported, but no new guest
44 * drivers should use it.
45 */
46#define SVGA_CURSOR_ON_HIDE 0x0 /* Must be 0 to maintain backward compatibility */
47#define SVGA_CURSOR_ON_SHOW 0x1 /* Must be 1 to maintain backward compatibility */
48#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2 /* Remove the cursor from the framebuffer because we need to see what's under it */
49#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */
50
51/*
52 * The maximum framebuffer size that can traced for e.g. guests in VESA mode.
53 * The changeMap in the monitor is proportional to this number. Therefore, we'd
54 * like to keep it as small as possible to reduce monitor overhead (using
55 * SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
56 * 4k!).
57 *
58 * NB: For compatibility reasons, this value must be greater than 0xff0000.
59 * See bug 335072.
60 */
61#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000
62
63#define SVGA_MAX_PSEUDOCOLOR_DEPTH 8
64#define SVGA_MAX_PSEUDOCOLORS (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH)
65#define SVGA_NUM_PALETTE_REGS (3 * SVGA_MAX_PSEUDOCOLORS)
66
67#define SVGA_MAGIC 0x900000UL
68#define SVGA_MAKE_ID(ver) (SVGA_MAGIC << 8 | (ver))
69
70/* Version 2 let the address of the frame buffer be unsigned on Win32 */
71#define SVGA_VERSION_2 2
72#define SVGA_ID_2 SVGA_MAKE_ID(SVGA_VERSION_2)
73
74/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so
75 PALETTE_BASE has moved */
76#define SVGA_VERSION_1 1
77#define SVGA_ID_1 SVGA_MAKE_ID(SVGA_VERSION_1)
78
79/* Version 0 is the initial version */
80#define SVGA_VERSION_0 0
81#define SVGA_ID_0 SVGA_MAKE_ID(SVGA_VERSION_0)
82
83/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
84#define SVGA_ID_INVALID 0xFFFFFFFF
85
86/* Port offsets, relative to BAR0 */
87#define SVGA_INDEX_PORT 0x0
88#define SVGA_VALUE_PORT 0x1
89#define SVGA_BIOS_PORT 0x2
90#define SVGA_IRQSTATUS_PORT 0x8
91
92/*
93 * Interrupt source flags for IRQSTATUS_PORT and IRQMASK.
94 *
95 * Interrupts are only supported when the
96 * SVGA_CAP_IRQMASK capability is present.
97 */
98#define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */
99#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */
100#define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */
101
102/*
103 * Registers
104 */
105
106enum {
107 SVGA_REG_ID = 0,
108 SVGA_REG_ENABLE = 1,
109 SVGA_REG_WIDTH = 2,
110 SVGA_REG_HEIGHT = 3,
111 SVGA_REG_MAX_WIDTH = 4,
112 SVGA_REG_MAX_HEIGHT = 5,
113 SVGA_REG_DEPTH = 6,
114 SVGA_REG_BITS_PER_PIXEL = 7, /* Current bpp in the guest */
115 SVGA_REG_PSEUDOCOLOR = 8,
116 SVGA_REG_RED_MASK = 9,
117 SVGA_REG_GREEN_MASK = 10,
118 SVGA_REG_BLUE_MASK = 11,
119 SVGA_REG_BYTES_PER_LINE = 12,
120 SVGA_REG_FB_START = 13, /* (Deprecated) */
121 SVGA_REG_FB_OFFSET = 14,
122 SVGA_REG_VRAM_SIZE = 15,
123 SVGA_REG_FB_SIZE = 16,
124
125 /* ID 0 implementation only had the above registers, then the palette */
126
127 SVGA_REG_CAPABILITIES = 17,
128 SVGA_REG_MEM_START = 18, /* (Deprecated) */
129 SVGA_REG_MEM_SIZE = 19,
130 SVGA_REG_CONFIG_DONE = 20, /* Set when memory area configured */
131 SVGA_REG_SYNC = 21, /* See "FIFO Synchronization Registers" */
132 SVGA_REG_BUSY = 22, /* See "FIFO Synchronization Registers" */
133 SVGA_REG_GUEST_ID = 23, /* Set guest OS identifier */
134 SVGA_REG_CURSOR_ID = 24, /* (Deprecated) */
135 SVGA_REG_CURSOR_X = 25, /* (Deprecated) */
136 SVGA_REG_CURSOR_Y = 26, /* (Deprecated) */
137 SVGA_REG_CURSOR_ON = 27, /* (Deprecated) */
138 SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* (Deprecated) */
139 SVGA_REG_SCRATCH_SIZE = 29, /* Number of scratch registers */
140 SVGA_REG_MEM_REGS = 30, /* Number of FIFO registers */
141 SVGA_REG_NUM_DISPLAYS = 31, /* (Deprecated) */
142 SVGA_REG_PITCHLOCK = 32, /* Fixed pitch for all modes */
143 SVGA_REG_IRQMASK = 33, /* Interrupt mask */
144
145 /* Legacy multi-monitor support */
146 SVGA_REG_NUM_GUEST_DISPLAYS = 34,/* Number of guest displays in X/Y direction */
147 SVGA_REG_DISPLAY_ID = 35, /* Display ID for the following display attributes */
148 SVGA_REG_DISPLAY_IS_PRIMARY = 36,/* Whether this is a primary display */
149 SVGA_REG_DISPLAY_POSITION_X = 37,/* The display position x */
150 SVGA_REG_DISPLAY_POSITION_Y = 38,/* The display position y */
151 SVGA_REG_DISPLAY_WIDTH = 39, /* The display's width */
152 SVGA_REG_DISPLAY_HEIGHT = 40, /* The display's height */
153
154 /* See "Guest memory regions" below. */
155 SVGA_REG_GMR_ID = 41,
156 SVGA_REG_GMR_DESCRIPTOR = 42,
157 SVGA_REG_GMR_MAX_IDS = 43,
158 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
159
160 SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
161 SVGA_REG_TOP = 46, /* Must be 1 more than the last register */
162
163 SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
164 /* Next 768 (== 256*3) registers exist for colormap */
165
166 SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
167 /* Base of scratch registers */
168 /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
169 First 4 are reserved for VESA BIOS Extension; any remaining are for
170 the use of the current SVGA driver. */
171};
172
173
174/*
175 * Guest memory regions (GMRs):
176 *
177 * This is a new memory mapping feature available in SVGA devices
178 * which have the SVGA_CAP_GMR bit set. Previously, there were two
179 * fixed memory regions available with which to share data between the
180 * device and the driver: the FIFO ('MEM') and the framebuffer. GMRs
181 * are our name for an extensible way of providing arbitrary DMA
182 * buffers for use between the driver and the SVGA device. They are a
183 * new alternative to framebuffer memory, usable for both 2D and 3D
184 * graphics operations.
185 *
186 * Since GMR mapping must be done synchronously with guest CPU
187 * execution, we use a new pair of SVGA registers:
188 *
189 * SVGA_REG_GMR_ID --
190 *
191 * Read/write.
192 * This register holds the 32-bit ID (a small positive integer)
193 * of a GMR to create, delete, or redefine. Writing this register
194 * has no side-effects.
195 *
196 * SVGA_REG_GMR_DESCRIPTOR --
197 *
198 * Write-only.
199 * Writing this register will create, delete, or redefine the GMR
200 * specified by the above ID register. If this register is zero,
201 * the GMR is deleted. Any pointers into this GMR (including those
202 * currently being processed by FIFO commands) will be
203 * synchronously invalidated.
204 *
205 * If this register is nonzero, it must be the physical page
206 * number (PPN) of a data structure which describes the physical
207 * layout of the memory region this GMR should describe. The
208 * descriptor structure will be read synchronously by the SVGA
209 * device when this register is written. The descriptor need not
210 * remain allocated for the lifetime of the GMR.
211 *
212 * The guest driver should write SVGA_REG_GMR_ID first, then
213 * SVGA_REG_GMR_DESCRIPTOR.
214 *
215 * SVGA_REG_GMR_MAX_IDS --
216 *
217 * Read-only.
218 * The SVGA device may choose to support a maximum number of
219 * user-defined GMR IDs. This register holds the number of supported
220 * IDs. (The maximum supported ID plus 1)
221 *
222 * SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH --
223 *
224 * Read-only.
225 * The SVGA device may choose to put a limit on the total number
226 * of SVGAGuestMemDescriptor structures it will read when defining
227 * a single GMR.
228 *
229 * The descriptor structure is an array of SVGAGuestMemDescriptor
230 * structures. Each structure may do one of three things:
231 *
232 * - Terminate the GMR descriptor list.
233 * (ppn==0, numPages==0)
234 *
235 * - Add a PPN or range of PPNs to the GMR's virtual address space.
236 * (ppn != 0, numPages != 0)
237 *
238 * - Provide the PPN of the next SVGAGuestMemDescriptor, in order to
239 * support multi-page GMR descriptor tables without forcing the
240 * driver to allocate physically contiguous memory.
241 * (ppn != 0, numPages == 0)
242 *
243 * Note that each physical page of SVGAGuestMemDescriptor structures
244 * can describe at least 2MB of guest memory. If the driver needs to
245 * use more than one page of descriptor structures, it must use one of
246 * its SVGAGuestMemDescriptors to point to an additional page. The
247 * device will never automatically cross a page boundary.
248 *
249 * Once the driver has described a GMR, it is immediately available
250 * for use via any FIFO command that uses an SVGAGuestPtr structure.
251 * These pointers include a GMR identifier plus an offset into that
252 * GMR.
253 *
254 * The driver must check the SVGA_CAP_GMR bit before using the GMR
255 * registers.
256 */
257
258/*
259 * Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer
260 * memory as well. In the future, these IDs could even be used to
261 * allow legacy memory regions to be redefined by the guest as GMRs.
262 *
263 * Using the guest framebuffer (GFB) at BAR1 for general purpose DMA
264 * is being phased out. Please try to use user-defined GMRs whenever
265 * possible.
266 */
267#define SVGA_GMR_NULL ((uint32) -1)
268#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) // Guest Framebuffer (GFB)
269
270typedef
271struct SVGAGuestMemDescriptor {
272 uint32 ppn;
273 uint32 numPages;
274} SVGAGuestMemDescriptor;
275
276typedef
277struct SVGAGuestPtr {
278 uint32 gmrId;
279 uint32 offset;
280} SVGAGuestPtr;
281
282
283/*
284 * SVGAGMRImageFormat --
285 *
286 * This is a packed representation of the source 2D image format
287 * for a GMR-to-screen blit. Currently it is defined as an encoding
288 * of the screen's color depth and bits-per-pixel, however, 16 bits
289 * are reserved for future use to identify other encodings (such as
290 * RGBA or higher-precision images).
291 *
292 * Currently supported formats:
293 *
294 * bpp depth Format Name
295 * --- ----- -----------
296 * 32 24 32-bit BGRX
297 * 24 24 24-bit BGR
298 * 16 16 RGB 5-6-5
299 * 16 15 RGB 5-5-5
300 *
301 */
302
303typedef
304struct SVGAGMRImageFormat {
305 union {
306 struct {
307 uint32 bitsPerPixel : 8;
308 uint32 colorDepth : 8;
309 uint32 reserved : 16; // Must be zero
310 };
311
312 uint32 value;
313 };
314} SVGAGMRImageFormat;
315
316/*
317 * SVGAColorBGRX --
318 *
319 * A 24-bit color format (BGRX), which does not depend on the
320 * format of the legacy guest framebuffer (GFB) or the current
321 * GMRFB state.
322 */
323
324typedef
325struct SVGAColorBGRX {
326 union {
327 struct {
328 uint32 b : 8;
329 uint32 g : 8;
330 uint32 r : 8;
331 uint32 x : 8; // Unused
332 };
333
334 uint32 value;
335 };
336} SVGAColorBGRX;
337
338
339/*
340 * SVGASignedRect --
341 * SVGASignedPoint --
342 *
343 * Signed rectangle and point primitives. These are used by the new
344 * 2D primitives for drawing to Screen Objects, which can occupy a
345 * signed virtual coordinate space.
346 *
347 * SVGASignedRect specifies a half-open interval: the (left, top)
348 * pixel is part of the rectangle, but the (right, bottom) pixel is
349 * not.
350 */
351
352typedef
353struct SVGASignedRect {
354 int32 left;
355 int32 top;
356 int32 right;
357 int32 bottom;
358} SVGASignedRect;
359
360typedef
361struct SVGASignedPoint {
362 int32 x;
363 int32 y;
364} SVGASignedPoint;
365
366
367/*
368 * Capabilities
369 *
370 * Note the holes in the bitfield. Missing bits have been deprecated,
371 * and must not be reused. Those capabilities will never be reported
372 * by new versions of the SVGA device.
373 */
374
375#define SVGA_CAP_NONE 0x00000000
376#define SVGA_CAP_RECT_COPY 0x00000002
377#define SVGA_CAP_CURSOR 0x00000020
378#define SVGA_CAP_CURSOR_BYPASS 0x00000040 // Legacy (Use Cursor Bypass 3 instead)
379#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 // Legacy (Use Cursor Bypass 3 instead)
380#define SVGA_CAP_8BIT_EMULATION 0x00000100
381#define SVGA_CAP_ALPHA_CURSOR 0x00000200
382#define SVGA_CAP_3D 0x00004000
383#define SVGA_CAP_EXTENDED_FIFO 0x00008000
384#define SVGA_CAP_MULTIMON 0x00010000 // Legacy multi-monitor support
385#define SVGA_CAP_PITCHLOCK 0x00020000
386#define SVGA_CAP_IRQMASK 0x00040000
387#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 // Legacy multi-monitor support
388#define SVGA_CAP_GMR 0x00100000
389#define SVGA_CAP_TRACES 0x00200000
390
391
392/*
393 * FIFO register indices.
394 *
395 * The FIFO is a chunk of device memory mapped into guest physmem. It
396 * is always treated as 32-bit words.
397 *
398 * The guest driver gets to decide how to partition it between
399 * - FIFO registers (there are always at least 4, specifying where the
400 * following data area is and how much data it contains; there may be
401 * more registers following these, depending on the FIFO protocol
402 * version in use)
403 * - FIFO data, written by the guest and slurped out by the VMX.
404 * These indices are 32-bit word offsets into the FIFO.
405 */
406
407enum {
408 /*
409 * Block 1 (basic registers): The originally defined FIFO registers.
410 * These exist and are valid for all versions of the FIFO protocol.
411 */
412
413 SVGA_FIFO_MIN = 0,
414 SVGA_FIFO_MAX, /* The distance from MIN to MAX must be at least 10K */
415 SVGA_FIFO_NEXT_CMD,
416 SVGA_FIFO_STOP,
417
418 /*
419 * Block 2 (extended registers): Mandatory registers for the extended
420 * FIFO. These exist if the SVGA caps register includes
421 * SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their
422 * associated capability bit is enabled.
423 *
424 * Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied
425 * support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE.
426 * This means that the guest has to test individually (in most cases
427 * using FIFO caps) for the presence of registers after this; the VMX
428 * can define "extended FIFO" to mean whatever it wants, and currently
429 * won't enable it unless there's room for that set and much more.
430 */
431
432 SVGA_FIFO_CAPABILITIES = 4,
433 SVGA_FIFO_FLAGS,
434 // Valid with SVGA_FIFO_CAP_FENCE:
435 SVGA_FIFO_FENCE,
436
437 /*
438 * Block 3a (optional extended registers): Additional registers for the
439 * extended FIFO, whose presence isn't actually implied by
440 * SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to
441 * leave room for them.
442 *
443 * These in block 3a, the VMX currently considers mandatory for the
444 * extended FIFO.
445 */
446
447 // Valid if exists (i.e. if extended FIFO enabled):
448 SVGA_FIFO_3D_HWVERSION, /* See SVGA3dHardwareVersion in svga3d_reg.h */
449 // Valid with SVGA_FIFO_CAP_PITCHLOCK:
450 SVGA_FIFO_PITCHLOCK,
451
452 // Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3:
453 SVGA_FIFO_CURSOR_ON, /* Cursor bypass 3 show/hide register */
454 SVGA_FIFO_CURSOR_X, /* Cursor bypass 3 x register */
455 SVGA_FIFO_CURSOR_Y, /* Cursor bypass 3 y register */
456 SVGA_FIFO_CURSOR_COUNT, /* Incremented when any of the other 3 change */
457 SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
458
459 // Valid with SVGA_FIFO_CAP_RESERVE:
460 SVGA_FIFO_RESERVED, /* Bytes past NEXT_CMD with real contents */
461
462 /*
463 * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT:
464 *
465 * By default this is SVGA_ID_INVALID, to indicate that the cursor
466 * coordinates are specified relative to the virtual root. If this
467 * is set to a specific screen ID, cursor position is reinterpreted
468 * as a signed offset relative to that screen's origin. This is the
469 * only way to place the cursor on a non-rooted screen.
470 */
471 SVGA_FIFO_CURSOR_SCREEN_ID,
472
473 /*
474 * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
475 * registers, but this must be done carefully and with judicious use of
476 * capability bits, since comparisons based on SVGA_FIFO_MIN aren't
477 * enough to tell you whether the register exists: we've shipped drivers
478 * and products that used SVGA_FIFO_3D_CAPS but didn't know about some of
479 * the earlier ones. The actual order of introduction was:
480 * - PITCHLOCK
481 * - 3D_CAPS
482 * - CURSOR_* (cursor bypass 3)
483 * - RESERVED
484 * So, code that wants to know whether it can use any of the
485 * aforementioned registers, or anything else added after PITCHLOCK and
486 * before 3D_CAPS, needs to reason about something other than
487 * SVGA_FIFO_MIN.
488 */
489
490 /*
491 * 3D caps block space; valid with 3D hardware version >=
492 * SVGA3D_HWVERSION_WS6_B1.
493 */
494 SVGA_FIFO_3D_CAPS = 32,
495 SVGA_FIFO_3D_CAPS_LAST = 32 + 255,
496
497 /*
498 * End of VMX's current definition of "extended-FIFO registers".
499 * Registers before here are always enabled/disabled as a block; either
500 * the extended FIFO is enabled and includes all preceding registers, or
501 * it's disabled entirely.
502 *
503 * Block 3b (truly optional extended registers): Additional registers for
504 * the extended FIFO, which the VMX already knows how to enable and
505 * disable with correct granularity.
506 *
507 * Registers after here exist if and only if the guest SVGA driver
508 * sets SVGA_FIFO_MIN high enough to leave room for them.
509 */
510
511 // Valid if register exists:
512 SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
513 SVGA_FIFO_FENCE_GOAL, /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
514 SVGA_FIFO_BUSY, /* See "FIFO Synchronization Registers" */
515
516 /*
517 * Always keep this last. This defines the maximum number of
518 * registers we know about. At power-on, this value is placed in
519 * the SVGA_REG_MEM_REGS register, and we expect the guest driver
520 * to allocate this much space in FIFO memory for registers.
521 */
522 SVGA_FIFO_NUM_REGS
523};
524
525
526/*
527 * Definition of registers included in extended FIFO support.
528 *
529 * The guest SVGA driver gets to allocate the FIFO between registers
530 * and data. It must always allocate at least 4 registers, but old
531 * drivers stopped there.
532 *
533 * The VMX will enable extended FIFO support if and only if the guest
534 * left enough room for all registers defined as part of the mandatory
535 * set for the extended FIFO.
536 *
537 * Note that the guest drivers typically allocate the FIFO only at
538 * initialization time, not at mode switches, so it's likely that the
539 * number of FIFO registers won't change without a reboot.
540 *
541 * All registers less than this value are guaranteed to be present if
542 * svgaUser->fifo.extended is set. Any later registers must be tested
543 * individually for compatibility at each use (in the VMX).
544 *
545 * This value is used only by the VMX, so it can change without
546 * affecting driver compatibility; keep it that way?
547 */
548#define SVGA_FIFO_EXTENDED_MANDATORY_REGS (SVGA_FIFO_3D_CAPS_LAST + 1)
549
550
551/*
552 * FIFO Synchronization Registers
553 *
554 * This explains the relationship between the various FIFO
555 * sync-related registers in IOSpace and in FIFO space.
556 *
557 * SVGA_REG_SYNC --
558 *
559 * The SYNC register can be used in two different ways by the guest:
560 *
561 * 1. If the guest wishes to fully sync (drain) the FIFO,
562 * it will write once to SYNC then poll on the BUSY
563 * register. The FIFO is sync'ed once BUSY is zero.
564 *
565 * 2. If the guest wants to asynchronously wake up the host,
566 * it will write once to SYNC without polling on BUSY.
567 * Ideally it will do this after some new commands have
568 * been placed in the FIFO, and after reading a zero
569 * from SVGA_FIFO_BUSY.
570 *
571 * (1) is the original behaviour that SYNC was designed to
572 * support. Originally, a write to SYNC would implicitly
573 * trigger a read from BUSY. This causes us to synchronously
574 * process the FIFO.
575 *
576 * This behaviour has since been changed so that writing SYNC
577 * will *not* implicitly cause a read from BUSY. Instead, it
578 * makes a channel call which asynchronously wakes up the MKS
579 * thread.
580 *
581 * New guests can use this new behaviour to implement (2)
582 * efficiently. This lets guests get the host's attention
583 * without waiting for the MKS to poll, which gives us much
584 * better CPU utilization on SMP hosts and on UP hosts while
585 * we're blocked on the host GPU.
586 *
587 * Old guests shouldn't notice the behaviour change. SYNC was
588 * never guaranteed to process the entire FIFO, since it was
589 * bounded to a particular number of CPU cycles. Old guests will
590 * still loop on the BUSY register until the FIFO is empty.
591 *
592 * Writing to SYNC currently has the following side-effects:
593 *
594 * - Sets SVGA_REG_BUSY to TRUE (in the monitor)
595 * - Asynchronously wakes up the MKS thread for FIFO processing
596 * - The value written to SYNC is recorded as a "reason", for
597 * stats purposes.
598 *
599 * If SVGA_FIFO_BUSY is available, drivers are advised to only
600 * write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
601 * SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
602 * eventually set SVGA_FIFO_BUSY on its own, but this approach
603 * lets the driver avoid sending multiple asynchronous wakeup
604 * messages to the MKS thread.
605 *
606 * SVGA_REG_BUSY --
607 *
608 * This register is set to TRUE when SVGA_REG_SYNC is written,
609 * and it reads as FALSE when the FIFO has been completely
610 * drained.
611 *
612 * Every read from this register causes us to synchronously
613 * process FIFO commands. There is no guarantee as to how many
614 * commands each read will process.
615 *
616 * CPU time spent processing FIFO commands will be billed to
617 * the guest.
618 *
619 * New drivers should avoid using this register unless they
620 * need to guarantee that the FIFO is completely drained. It
621 * is overkill for performing a sync-to-fence. Older drivers
622 * will use this register for any type of synchronization.
623 *
624 * SVGA_FIFO_BUSY --
625 *
626 * This register is a fast way for the guest driver to check
627 * whether the FIFO is already being processed. It reads and
628 * writes at normal RAM speeds, with no monitor intervention.
629 *
630 * If this register reads as TRUE, the host is guaranteeing that
631 * any new commands written into the FIFO will be noticed before
632 * the MKS goes back to sleep.
633 *
634 * If this register reads as FALSE, no such guarantee can be
635 * made.
636 *
637 * The guest should use this register to quickly determine
638 * whether or not it needs to wake up the host. If the guest
639 * just wrote a command or group of commands that it would like
640 * the host to begin processing, it should:
641 *
642 * 1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
643 * action is necessary.
644 *
645 * 2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
646 * code that we've already sent a SYNC to the host and we
647 * don't need to send a duplicate.
648 *
649 * 3. Write a reason to SVGA_REG_SYNC. This will send an
650 * asynchronous wakeup to the MKS thread.
651 */
652
653
654/*
655 * FIFO Capabilities
656 *
657 * Fence -- Fence register and command are supported
658 * Accel Front -- Front buffer only commands are supported
659 * Pitch Lock -- Pitch lock register is supported
660 * Video -- SVGA Video overlay units are supported
661 * Escape -- Escape command is supported
662 *
663 * XXX: Add longer descriptions for each capability, including a list
664 * of the new features that each capability provides.
665 *
666 * SVGA_FIFO_CAP_SCREEN_OBJECT --
667 *
668 * Provides dynamic multi-screen rendering, for improved Unity and
669 * multi-monitor modes. With Screen Object, the guest can
670 * dynamically create and destroy 'screens', which can represent
671 * Unity windows or virtual monitors. Screen Object also provides
672 * strong guarantees that DMA operations happen only when
673 * guest-initiated. Screen Object deprecates the BAR1 guest
674 * framebuffer (GFB) and all commands that work only with the GFB.
675 *
676 * New registers:
677 * FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID
678 *
679 * New 2D commands:
680 * DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN,
681 * BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY
682 *
683 * New 3D commands:
684 * BLIT_SURFACE_TO_SCREEN
685 *
686 * New guarantees:
687 *
688 * - The host will not read or write guest memory, including the GFB,
689 * except when explicitly initiated by a DMA command.
690 *
691 * - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK,
692 * is guaranteed to complete before any subsequent FENCEs.
693 *
694 * - All legacy commands which affect a Screen (UPDATE, PRESENT,
695 * PRESENT_READBACK) as well as new Screen blit commands will
696 * all behave consistently as blits, and memory will be read
697 * or written in FIFO order.
698 *
699 * For example, if you PRESENT from one SVGA3D surface to multiple
700 * places on the screen, the data copied will always be from the
701 * SVGA3D surface at the time the PRESENT was issued in the FIFO.
702 * This was not necessarily true on devices without Screen Object.
703 *
704 * This means that on devices that support Screen Object, the
705 * PRESENT_READBACK command should not be necessary unless you
706 * actually want to read back the results of 3D rendering into
707 * system memory. (And for that, the BLIT_SCREEN_TO_GMRFB
708 * command provides a strict superset of functionality.)
709 *
710 * - When a screen is resized, either using Screen Object commands or
711 * legacy multimon registers, its contents are preserved.
712 */
713
714#define SVGA_FIFO_CAP_NONE 0
715#define SVGA_FIFO_CAP_FENCE (1<<0)
716#define SVGA_FIFO_CAP_ACCELFRONT (1<<1)
717#define SVGA_FIFO_CAP_PITCHLOCK (1<<2)
718#define SVGA_FIFO_CAP_VIDEO (1<<3)
719#define SVGA_FIFO_CAP_CURSOR_BYPASS_3 (1<<4)
720#define SVGA_FIFO_CAP_ESCAPE (1<<5)
721#define SVGA_FIFO_CAP_RESERVE (1<<6)
722#define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7)
723
724
725/*
726 * FIFO Flags
727 *
728 * Accel Front -- Driver should use front buffer only commands
729 */
730
731#define SVGA_FIFO_FLAG_NONE 0
732#define SVGA_FIFO_FLAG_ACCELFRONT (1<<0)
733#define SVGA_FIFO_FLAG_RESERVED (1<<31) // Internal use only
734
735/*
736 * FIFO reservation sentinel value
737 */
738
739#define SVGA_FIFO_RESERVED_UNKNOWN 0xffffffff
740
741
742/*
743 * Video overlay support
744 */
745
746#define SVGA_NUM_OVERLAY_UNITS 32
747
748
749/*
750 * Video capabilities that the guest is currently using
751 */
752
753#define SVGA_VIDEO_FLAG_COLORKEY 0x0001
754
755
756/*
757 * Offsets for the video overlay registers
758 */
759
760enum {
761 SVGA_VIDEO_ENABLED = 0,
762 SVGA_VIDEO_FLAGS,
763 SVGA_VIDEO_DATA_OFFSET,
764 SVGA_VIDEO_FORMAT,
765 SVGA_VIDEO_COLORKEY,
766 SVGA_VIDEO_SIZE, // Deprecated
767 SVGA_VIDEO_WIDTH,
768 SVGA_VIDEO_HEIGHT,
769 SVGA_VIDEO_SRC_X,
770 SVGA_VIDEO_SRC_Y,
771 SVGA_VIDEO_SRC_WIDTH,
772 SVGA_VIDEO_SRC_HEIGHT,
773 SVGA_VIDEO_DST_X, // Signed int32
774 SVGA_VIDEO_DST_Y, // Signed int32
775 SVGA_VIDEO_DST_WIDTH,
776 SVGA_VIDEO_DST_HEIGHT,
777 SVGA_VIDEO_PITCH_1,
778 SVGA_VIDEO_PITCH_2,
779 SVGA_VIDEO_PITCH_3,
780 SVGA_VIDEO_DATA_GMRID, // Optional, defaults to SVGA_GMR_FRAMEBUFFER
781 SVGA_VIDEO_DST_SCREEN_ID, // Optional, defaults to virtual coords (SVGA_ID_INVALID)
782 SVGA_VIDEO_NUM_REGS
783};
784
785
786/*
787 * SVGA Overlay Units
788 *
789 * width and height relate to the entire source video frame.
790 * srcX, srcY, srcWidth and srcHeight represent subset of the source
791 * video frame to be displayed.
792 */
793
794typedef struct SVGAOverlayUnit {
795 uint32 enabled;
796 uint32 flags;
797 uint32 dataOffset;
798 uint32 format;
799 uint32 colorKey;
800 uint32 size;
801 uint32 width;
802 uint32 height;
803 uint32 srcX;
804 uint32 srcY;
805 uint32 srcWidth;
806 uint32 srcHeight;
807 int32 dstX;
808 int32 dstY;
809 uint32 dstWidth;
810 uint32 dstHeight;
811 uint32 pitches[3];
812 uint32 dataGMRId;
813 uint32 dstScreenId;
814} SVGAOverlayUnit;
815
816
817/*
818 * SVGAScreenObject --
819 *
820 * This is a new way to represent a guest's multi-monitor screen or
821 * Unity window. Screen objects are only supported if the
822 * SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set.
823 *
824 * If Screen Objects are supported, they can be used to fully
825 * replace the functionality provided by the framebuffer registers
826 * (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY.
827 *
828 * The screen object is a struct with guaranteed binary
829 * compatibility. New flags can be added, and the struct may grow,
830 * but existing fields must retain their meaning.
831 *
832 */
833
834#define SVGA_SCREEN_HAS_ROOT (1 << 0) // Screen is present in the virtual coord space
835#define SVGA_SCREEN_IS_PRIMARY (1 << 1) // Guest considers this screen to be 'primary'
836#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) // Guest is running a fullscreen app here
837
838typedef
839struct SVGAScreenObject {
840 uint32 structSize; // sizeof(SVGAScreenObject)
841 uint32 id;
842 uint32 flags;
843 struct {
844 uint32 width;
845 uint32 height;
846 } size;
847 struct {
848 int32 x;
849 int32 y;
850 } root; // Only used if SVGA_SCREEN_HAS_ROOT is set.
851} SVGAScreenObject;
852
853
854/*
855 * Commands in the command FIFO:
856 *
857 * Command IDs defined below are used for the traditional 2D FIFO
858 * communication (not all commands are available for all versions of the
859 * SVGA FIFO protocol).
860 *
861 * Note the holes in the command ID numbers: These commands have been
862 * deprecated, and the old IDs must not be reused.
863 *
864 * Command IDs from 1000 to 1999 are reserved for use by the SVGA3D
865 * protocol.
866 *
867 * Each command's parameters are described by the comments and
868 * structs below.
869 */
870
871typedef enum {
872 SVGA_CMD_INVALID_CMD = 0,
873 SVGA_CMD_UPDATE = 1,
874 SVGA_CMD_RECT_COPY = 3,
875 SVGA_CMD_DEFINE_CURSOR = 19,
876 SVGA_CMD_DEFINE_ALPHA_CURSOR = 22,
877 SVGA_CMD_UPDATE_VERBOSE = 25,
878 SVGA_CMD_FRONT_ROP_FILL = 29,
879 SVGA_CMD_FENCE = 30,
880 SVGA_CMD_ESCAPE = 33,
881 SVGA_CMD_DEFINE_SCREEN = 34,
882 SVGA_CMD_DESTROY_SCREEN = 35,
883 SVGA_CMD_DEFINE_GMRFB = 36,
884 SVGA_CMD_BLIT_GMRFB_TO_SCREEN = 37,
885 SVGA_CMD_BLIT_SCREEN_TO_GMRFB = 38,
886 SVGA_CMD_ANNOTATION_FILL = 39,
887 SVGA_CMD_ANNOTATION_COPY = 40,
888 SVGA_CMD_MAX
889} SVGAFifoCmdId;
890
891#define SVGA_CMD_MAX_ARGS 64
892
893
894/*
895 * SVGA_CMD_UPDATE --
896 *
897 * This is a DMA transfer which copies from the Guest Framebuffer
898 * (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which
899 * intersect with the provided virtual rectangle.
900 *
901 * This command does not support using arbitrary guest memory as a
902 * data source- it only works with the pre-defined GFB memory.
903 * This command also does not support signed virtual coordinates.
904 * If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with
905 * negative root x/y coordinates, the negative portion of those
906 * screens will not be reachable by this command.
907 *
908 * This command is not necessary when using framebuffer
909 * traces. Traces are automatically enabled if the SVGA FIFO is
910 * disabled, and you may explicitly enable/disable traces using
911 * SVGA_REG_TRACES. With traces enabled, any write to the GFB will
912 * automatically act as if a subsequent SVGA_CMD_UPDATE was issued.
913 *
914 * Traces and SVGA_CMD_UPDATE are the only supported ways to render
915 * pseudocolor screen updates. The newer Screen Object commands
916 * only support true color formats.
917 *
918 * Availability:
919 * Always available.
920 */
921
922typedef
923struct {
924 uint32 x;
925 uint32 y;
926 uint32 width;
927 uint32 height;
928} SVGAFifoCmdUpdate;
929
930
931/*
932 * SVGA_CMD_RECT_COPY --
933 *
934 * Perform a rectangular DMA transfer from one area of the GFB to
935 * another, and copy the result to any screens which intersect it.
936 *
937 * Availability:
938 * SVGA_CAP_RECT_COPY
939 */
940
941typedef
942struct {
943 uint32 srcX;
944 uint32 srcY;
945 uint32 destX;
946 uint32 destY;
947 uint32 width;
948 uint32 height;
949} SVGAFifoCmdRectCopy;
950
951
952/*
953 * SVGA_CMD_DEFINE_CURSOR --
954 *
955 * Provide a new cursor image, as an AND/XOR mask.
956 *
957 * The recommended way to position the cursor overlay is by using
958 * the SVGA_FIFO_CURSOR_* registers, supported by the
959 * SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
960 *
961 * Availability:
962 * SVGA_CAP_CURSOR
963 */
964
965typedef
966struct {
967 uint32 id; // Reserved, must be zero.
968 uint32 hotspotX;
969 uint32 hotspotY;
970 uint32 width;
971 uint32 height;
972 uint32 andMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
973 uint32 xorMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
974 /*
975 * Followed by scanline data for AND mask, then XOR mask.
976 * Each scanline is padded to a 32-bit boundary.
977 */
978} SVGAFifoCmdDefineCursor;
979
980
981/*
982 * SVGA_CMD_DEFINE_ALPHA_CURSOR --
983 *
984 * Provide a new cursor image, in 32-bit BGRA format.
985 *
986 * The recommended way to position the cursor overlay is by using
987 * the SVGA_FIFO_CURSOR_* registers, supported by the
988 * SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
989 *
990 * Availability:
991 * SVGA_CAP_ALPHA_CURSOR
992 */
993
994typedef
995struct {
996 uint32 id; // Reserved, must be zero.
997 uint32 hotspotX;
998 uint32 hotspotY;
999 uint32 width;
1000 uint32 height;
1001 /* Followed by scanline data */
1002} SVGAFifoCmdDefineAlphaCursor;
1003
1004
1005/*
1006 * SVGA_CMD_UPDATE_VERBOSE --
1007 *
1008 * Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
1009 * 'reason' value, an opaque cookie which is used by internal
1010 * debugging tools. Third party drivers should not use this
1011 * command.
1012 *
1013 * Availability:
1014 * SVGA_CAP_EXTENDED_FIFO
1015 */
1016
1017typedef
1018struct {
1019 uint32 x;
1020 uint32 y;
1021 uint32 width;
1022 uint32 height;
1023 uint32 reason;
1024} SVGAFifoCmdUpdateVerbose;
1025
1026
1027/*
1028 * SVGA_CMD_FRONT_ROP_FILL --
1029 *
1030 * This is a hint which tells the SVGA device that the driver has
1031 * just filled a rectangular region of the GFB with a solid
1032 * color. Instead of reading these pixels from the GFB, the device
1033 * can assume that they all equal 'color'. This is primarily used
1034 * for remote desktop protocols.
1035 *
1036 * Availability:
1037 * SVGA_FIFO_CAP_ACCELFRONT
1038 */
1039
1040#define SVGA_ROP_COPY 0x03
1041
1042typedef
1043struct {
1044 uint32 color; // In the same format as the GFB
1045 uint32 x;
1046 uint32 y;
1047 uint32 width;
1048 uint32 height;
1049 uint32 rop; // Must be SVGA_ROP_COPY
1050} SVGAFifoCmdFrontRopFill;
1051
1052
1053/*
1054 * SVGA_CMD_FENCE --
1055 *
1056 * Insert a synchronization fence. When the SVGA device reaches
1057 * this command, it will copy the 'fence' value into the
1058 * SVGA_FIFO_FENCE register. It will also compare the fence against
1059 * SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the
1060 * SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will
1061 * raise this interrupt.
1062 *
1063 * Availability:
1064 * SVGA_FIFO_FENCE for this command,
1065 * SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL.
1066 */
1067
1068typedef
1069struct {
1070 uint32 fence;
1071} SVGAFifoCmdFence;
1072
1073
1074/*
1075 * SVGA_CMD_ESCAPE --
1076 *
1077 * Send an extended or vendor-specific variable length command.
1078 * This is used for video overlay, third party plugins, and
1079 * internal debugging tools. See svga_escape.h
1080 *
1081 * Availability:
1082 * SVGA_FIFO_CAP_ESCAPE
1083 */
1084
1085typedef
1086struct {
1087 uint32 nsid;
1088 uint32 size;
1089 /* followed by 'size' bytes of data */
1090} SVGAFifoCmdEscape;
1091
1092
1093/*
1094 * SVGA_CMD_DEFINE_SCREEN --
1095 *
1096 * Define or redefine an SVGAScreenObject. See the description of
1097 * SVGAScreenObject above. The video driver is responsible for
1098 * generating new screen IDs. They should be small positive
1099 * integers. The virtual device will have an implementation
1100 * specific upper limit on the number of screen IDs
1101 * supported. Drivers are responsible for recycling IDs. The first
1102 * valid ID is zero.
1103 *
1104 * - Interaction with other registers:
1105 *
1106 * For backwards compatibility, when the GFB mode registers (WIDTH,
1107 * HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device
1108 * deletes all screens other than screen #0, and redefines screen
1109 * #0 according to the specified mode. Drivers that use
1110 * SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0.
1111 *
1112 * If you use screen objects, do not use the legacy multi-mon
1113 * registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
1114 *
1115 * Availability:
1116 * SVGA_FIFO_CAP_SCREEN_OBJECT
1117 */
1118
1119typedef
1120struct {
1121 SVGAScreenObject screen; // Variable-length according to version
1122} SVGAFifoCmdDefineScreen;
1123
1124
1125/*
1126 * SVGA_CMD_DESTROY_SCREEN --
1127 *
1128 * Destroy an SVGAScreenObject. Its ID is immediately available for
1129 * re-use.
1130 *
1131 * Availability:
1132 * SVGA_FIFO_CAP_SCREEN_OBJECT
1133 */
1134
1135typedef
1136struct {
1137 uint32 screenId;
1138} SVGAFifoCmdDestroyScreen;
1139
1140
1141/*
1142 * SVGA_CMD_DEFINE_GMRFB --
1143 *
1144 * This command sets a piece of SVGA device state called the
1145 * Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a
1146 * piece of light-weight state which identifies the location and
1147 * format of an image in guest memory or in BAR1. The GMRFB has
1148 * an arbitrary size, and it doesn't need to match the geometry
1149 * of the GFB or any screen object.
1150 *
1151 * The GMRFB can be redefined as often as you like. You could
1152 * always use the same GMRFB, you could redefine it before
1153 * rendering from a different guest screen, or you could even
1154 * redefine it before every blit.
1155 *
1156 * There are multiple ways to use this command. The simplest way is
1157 * to use it to move the framebuffer either to elsewhere in the GFB
1158 * (BAR1) memory region, or to a user-defined GMR. This lets a
1159 * driver use a framebuffer allocated entirely out of normal system
1160 * memory, which we encourage.
1161 *
1162 * Another way to use this command is to set up a ring buffer of
1163 * updates in GFB memory. If a driver wants to ensure that no
1164 * frames are skipped by the SVGA device, it is important that the
1165 * driver not modify the source data for a blit until the device is
1166 * done processing the command. One efficient way to accomplish
1167 * this is to use a ring of small DMA buffers. Each buffer is used
1168 * for one blit, then we move on to the next buffer in the
1169 * ring. The FENCE mechanism is used to protect each buffer from
1170 * re-use until the device is finished with that buffer's
1171 * corresponding blit.
1172 *
1173 * This command does not affect the meaning of SVGA_CMD_UPDATE.
1174 * UPDATEs always occur from the legacy GFB memory area. This
1175 * command has no support for pseudocolor GMRFBs. Currently only
1176 * true-color 15, 16, and 24-bit depths are supported. Future
1177 * devices may expose capabilities for additional framebuffer
1178 * formats.
1179 *
1180 * The default GMRFB value is undefined. Drivers must always send
1181 * this command at least once before performing any blit from the
1182 * GMRFB.
1183 *
1184 * Availability:
1185 * SVGA_FIFO_CAP_SCREEN_OBJECT
1186 */
1187
1188typedef
1189struct {
1190 SVGAGuestPtr ptr;
1191 uint32 bytesPerLine;
1192 SVGAGMRImageFormat format;
1193} SVGAFifoCmdDefineGMRFB;
1194
1195
1196/*
1197 * SVGA_CMD_BLIT_GMRFB_TO_SCREEN --
1198 *
1199 * This is a guest-to-host blit. It performs a DMA operation to
1200 * copy a rectangular region of pixels from the current GMRFB to
1201 * one or more Screen Objects.
1202 *
1203 * The destination coordinate may be specified relative to a
1204 * screen's origin (if a screen ID is specified) or relative to the
1205 * virtual coordinate system's origin (if the screen ID is
1206 * SVGA_ID_INVALID). The actual destination may span zero or more
1207 * screens, in the case of a virtual destination rect or a rect
1208 * which extends off the edge of the specified screen.
1209 *
1210 * This command writes to the screen's "base layer": the underlying
1211 * framebuffer which exists below any cursor or video overlays. No
1212 * action is necessary to explicitly hide or update any overlays
1213 * which exist on top of the updated region.
1214 *
1215 * The SVGA device is guaranteed to finish reading from the GMRFB
1216 * by the time any subsequent FENCE commands are reached.
1217 *
1218 * This command consumes an annotation. See the
1219 * SVGA_CMD_ANNOTATION_* commands for details.
1220 *
1221 * Availability:
1222 * SVGA_FIFO_CAP_SCREEN_OBJECT
1223 */
1224
1225typedef
1226struct {
1227 SVGASignedPoint srcOrigin;
1228 SVGASignedRect destRect;
1229 uint32 destScreenId;
1230} SVGAFifoCmdBlitGMRFBToScreen;
1231
1232
1233/*
1234 * SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
1235 *
1236 * This is a host-to-guest blit. It performs a DMA operation to
1237 * copy a rectangular region of pixels from a single Screen Object
1238 * back to the current GMRFB.
1239 *
1240 * Usage note: This command should be used rarely. It will
1241 * typically be inefficient, but it is necessary for some types of
1242 * synchronization between 3D (GPU) and 2D (CPU) rendering into
1243 * overlapping areas of a screen.
1244 *
1245 * The source coordinate is specified relative to a screen's
1246 * origin. The provided screen ID must be valid. If any parameters
1247 * are invalid, the resulting pixel values are undefined.
1248 *
1249 * This command reads the screen's "base layer". Overlays like
1250 * video and cursor are not included, but any data which was sent
1251 * using a blit-to-screen primitive will be available, no matter
1252 * whether the data's original source was the GMRFB or the 3D
1253 * acceleration hardware.
1254 *
1255 * Note that our guest-to-host blits and host-to-guest blits aren't
1256 * symmetric in their current implementation. While the parameters
1257 * are identical, host-to-guest blits are a lot less featureful.
1258 * They do not support clipping: If the source parameters don't
1259 * fully fit within a screen, the blit fails. They must originate
1260 * from exactly one screen. Virtual coordinates are not directly
1261 * supported.
1262 *
1263 * Host-to-guest blits do support the same set of GMRFB formats
1264 * offered by guest-to-host blits.
1265 *
1266 * The SVGA device is guaranteed to finish writing to the GMRFB by
1267 * the time any subsequent FENCE commands are reached.
1268 *
1269 * Availability:
1270 * SVGA_FIFO_CAP_SCREEN_OBJECT
1271 */
1272
1273typedef
1274struct {
1275 SVGASignedPoint destOrigin;
1276 SVGASignedRect srcRect;
1277 uint32 srcScreenId;
1278} SVGAFifoCmdBlitScreenToGMRFB;
1279
1280
1281/*
1282 * SVGA_CMD_ANNOTATION_FILL --
1283 *
1284 * This is a blit annotation. This command stores a small piece of
1285 * device state which is consumed by the next blit-to-screen
1286 * command. The state is only cleared by commands which are
1287 * specifically documented as consuming an annotation. Other
1288 * commands (such as ESCAPEs for debugging) may intervene between
1289 * the annotation and its associated blit.
1290 *
1291 * This annotation is a promise about the contents of the next
1292 * blit: The video driver is guaranteeing that all pixels in that
1293 * blit will have the same value, specified here as a color in
1294 * SVGAColorBGRX format.
1295 *
1296 * The SVGA device can still render the blit correctly even if it
1297 * ignores this annotation, but the annotation may allow it to
1298 * perform the blit more efficiently, for example by ignoring the
1299 * source data and performing a fill in hardware.
1300 *
1301 * This annotation is most important for performance when the
1302 * user's display is being remoted over a network connection.
1303 *
1304 * Availability:
1305 * SVGA_FIFO_CAP_SCREEN_OBJECT
1306 */
1307
1308typedef
1309struct {
1310 SVGAColorBGRX color;
1311} SVGAFifoCmdAnnotationFill;
1312
1313
1314/*
1315 * SVGA_CMD_ANNOTATION_COPY --
1316 *
1317 * This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more
1318 * information about annotations.
1319 *
1320 * This annotation is a promise about the contents of the next
1321 * blit: The video driver is guaranteeing that all pixels in that
1322 * blit will have the same value as those which already exist at an
1323 * identically-sized region on the same or a different screen.
1324 *
1325 * Note that the source pixels for the COPY in this annotation are
1326 * sampled before applying the anqnotation's associated blit. They
1327 * are allowed to overlap with the blit's destination pixels.
1328 *
1329 * The copy source rectangle is specified the same way as the blit
1330 * destination: it can be a rectangle which spans zero or more
1331 * screens, specified relative to either a screen or to the virtual
1332 * coordinate system's origin. If the source rectangle includes
1333 * pixels which are not from exactly one screen, the results are
1334 * undefined.
1335 *
1336 * Availability:
1337 * SVGA_FIFO_CAP_SCREEN_OBJECT
1338 */
1339
1340typedef
1341struct {
1342 SVGASignedPoint srcOrigin;
1343 uint32 srcScreenId;
1344} SVGAFifoCmdAnnotationCopy;
1345
1346#endif
diff --git a/drivers/gpu/drm/vmwgfx/svga_types.h b/drivers/gpu/drm/vmwgfx/svga_types.h
new file mode 100644
index 000000000000..55836dedcfc2
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_types.h
@@ -0,0 +1,45 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/**
29 * Silly typedefs for the svga headers. Currently the headers are shared
30 * between all components that talk to svga. And as such the headers are
31 * are in a completely different style and use weird defines.
32 *
33 * This file lets all the ugly be prefixed with svga*.
34 */
35
36#ifndef _SVGA_TYPES_H_
37#define _SVGA_TYPES_H_
38
39typedef uint16_t uint16;
40typedef uint32_t uint32;
41typedef uint8_t uint8;
42typedef int32_t int32;
43typedef bool Bool;
44
45#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
new file mode 100644
index 000000000000..825ebe3d89d5
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -0,0 +1,252 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "ttm/ttm_bo_driver.h"
30#include "ttm/ttm_placement.h"
31
32static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
33 TTM_PL_FLAG_CACHED;
34
35static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
36 TTM_PL_FLAG_CACHED |
37 TTM_PL_FLAG_NO_EVICT;
38
39static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
40 TTM_PL_FLAG_CACHED;
41
42struct ttm_placement vmw_vram_placement = {
43 .fpfn = 0,
44 .lpfn = 0,
45 .num_placement = 1,
46 .placement = &vram_placement_flags,
47 .num_busy_placement = 1,
48 .busy_placement = &vram_placement_flags
49};
50
51struct ttm_placement vmw_vram_sys_placement = {
52 .fpfn = 0,
53 .lpfn = 0,
54 .num_placement = 1,
55 .placement = &vram_placement_flags,
56 .num_busy_placement = 1,
57 .busy_placement = &sys_placement_flags
58};
59
60struct ttm_placement vmw_vram_ne_placement = {
61 .fpfn = 0,
62 .lpfn = 0,
63 .num_placement = 1,
64 .placement = &vram_ne_placement_flags,
65 .num_busy_placement = 1,
66 .busy_placement = &vram_ne_placement_flags
67};
68
69struct ttm_placement vmw_sys_placement = {
70 .fpfn = 0,
71 .lpfn = 0,
72 .num_placement = 1,
73 .placement = &sys_placement_flags,
74 .num_busy_placement = 1,
75 .busy_placement = &sys_placement_flags
76};
77
78struct vmw_ttm_backend {
79 struct ttm_backend backend;
80};
81
82static int vmw_ttm_populate(struct ttm_backend *backend,
83 unsigned long num_pages, struct page **pages,
84 struct page *dummy_read_page)
85{
86 return 0;
87}
88
89static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
90{
91 return 0;
92}
93
94static int vmw_ttm_unbind(struct ttm_backend *backend)
95{
96 return 0;
97}
98
99static void vmw_ttm_clear(struct ttm_backend *backend)
100{
101}
102
103static void vmw_ttm_destroy(struct ttm_backend *backend)
104{
105 struct vmw_ttm_backend *vmw_be =
106 container_of(backend, struct vmw_ttm_backend, backend);
107
108 kfree(vmw_be);
109}
110
111static struct ttm_backend_func vmw_ttm_func = {
112 .populate = vmw_ttm_populate,
113 .clear = vmw_ttm_clear,
114 .bind = vmw_ttm_bind,
115 .unbind = vmw_ttm_unbind,
116 .destroy = vmw_ttm_destroy,
117};
118
119struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
120{
121 struct vmw_ttm_backend *vmw_be;
122
123 vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
124 if (!vmw_be)
125 return NULL;
126
127 vmw_be->backend.func = &vmw_ttm_func;
128
129 return &vmw_be->backend;
130}
131
132int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
133{
134 return 0;
135}
136
137int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
138 struct ttm_mem_type_manager *man)
139{
140 struct vmw_private *dev_priv =
141 container_of(bdev, struct vmw_private, bdev);
142
143 switch (type) {
144 case TTM_PL_SYSTEM:
145 /* System memory */
146
147 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
148 man->available_caching = TTM_PL_MASK_CACHING;
149 man->default_caching = TTM_PL_FLAG_CACHED;
150 break;
151 case TTM_PL_VRAM:
152 /* "On-card" video ram */
153 man->gpu_offset = 0;
154 man->io_offset = dev_priv->vram_start;
155 man->io_size = dev_priv->vram_size;
156 man->flags = TTM_MEMTYPE_FLAG_FIXED |
157 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
158 man->io_addr = NULL;
159 man->available_caching = TTM_PL_MASK_CACHING;
160 man->default_caching = TTM_PL_FLAG_WC;
161 break;
162 default:
163 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
164 return -EINVAL;
165 }
166 return 0;
167}
168
169void vmw_evict_flags(struct ttm_buffer_object *bo,
170 struct ttm_placement *placement)
171{
172 *placement = vmw_sys_placement;
173}
174
175/**
176 * FIXME: Proper access checks on buffers.
177 */
178
179static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
180{
181 return 0;
182}
183
184static void vmw_move_notify(struct ttm_buffer_object *bo,
185 struct ttm_mem_reg *new_mem)
186{
187 if (new_mem->mem_type != TTM_PL_SYSTEM)
188 vmw_dmabuf_gmr_unbind(bo);
189}
190
191static void vmw_swap_notify(struct ttm_buffer_object *bo)
192{
193 vmw_dmabuf_gmr_unbind(bo);
194}
195
196/**
197 * FIXME: We're using the old vmware polling method to sync.
198 * Do this with fences instead.
199 */
200
201static void *vmw_sync_obj_ref(void *sync_obj)
202{
203 return sync_obj;
204}
205
206static void vmw_sync_obj_unref(void **sync_obj)
207{
208 *sync_obj = NULL;
209}
210
211static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
212{
213 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
214
215 mutex_lock(&dev_priv->hw_mutex);
216 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
217 mutex_unlock(&dev_priv->hw_mutex);
218 return 0;
219}
220
221static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
222{
223 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
224 uint32_t sequence = (unsigned long) sync_obj;
225
226 return vmw_fence_signaled(dev_priv, sequence);
227}
228
229static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
230 bool lazy, bool interruptible)
231{
232 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
233 uint32_t sequence = (unsigned long) sync_obj;
234
235 return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ);
236}
237
238struct ttm_bo_driver vmw_bo_driver = {
239 .create_ttm_backend_entry = vmw_ttm_backend_init,
240 .invalidate_caches = vmw_invalidate_caches,
241 .init_mem_type = vmw_init_mem_type,
242 .evict_flags = vmw_evict_flags,
243 .move = NULL,
244 .verify_access = vmw_verify_access,
245 .sync_obj_signaled = vmw_sync_obj_signaled,
246 .sync_obj_wait = vmw_sync_obj_wait,
247 .sync_obj_flush = vmw_sync_obj_flush,
248 .sync_obj_unref = vmw_sync_obj_unref,
249 .sync_obj_ref = vmw_sync_obj_ref,
250 .move_notify = vmw_move_notify,
251 .swap_notify = vmw_swap_notify
252};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
new file mode 100644
index 000000000000..0c9c0811f42d
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -0,0 +1,783 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "drmP.h"
29#include "vmwgfx_drv.h"
30#include "ttm/ttm_placement.h"
31#include "ttm/ttm_bo_driver.h"
32#include "ttm/ttm_object.h"
33#include "ttm/ttm_module.h"
34
35#define VMWGFX_DRIVER_NAME "vmwgfx"
36#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
37#define VMWGFX_CHIP_SVGAII 0
38#define VMW_FB_RESERVATION 0
39
40/**
41 * Fully encoded drm commands. Might move to vmw_drm.h
42 */
43
44#define DRM_IOCTL_VMW_GET_PARAM \
45 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
46 struct drm_vmw_getparam_arg)
47#define DRM_IOCTL_VMW_ALLOC_DMABUF \
48 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
49 union drm_vmw_alloc_dmabuf_arg)
50#define DRM_IOCTL_VMW_UNREF_DMABUF \
51 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
52 struct drm_vmw_unref_dmabuf_arg)
53#define DRM_IOCTL_VMW_CURSOR_BYPASS \
54 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
55 struct drm_vmw_cursor_bypass_arg)
56
57#define DRM_IOCTL_VMW_CONTROL_STREAM \
58 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
59 struct drm_vmw_control_stream_arg)
60#define DRM_IOCTL_VMW_CLAIM_STREAM \
61 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
62 struct drm_vmw_stream_arg)
63#define DRM_IOCTL_VMW_UNREF_STREAM \
64 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
65 struct drm_vmw_stream_arg)
66
67#define DRM_IOCTL_VMW_CREATE_CONTEXT \
68 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
69 struct drm_vmw_context_arg)
70#define DRM_IOCTL_VMW_UNREF_CONTEXT \
71 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
72 struct drm_vmw_context_arg)
73#define DRM_IOCTL_VMW_CREATE_SURFACE \
74 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
75 union drm_vmw_surface_create_arg)
76#define DRM_IOCTL_VMW_UNREF_SURFACE \
77 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
78 struct drm_vmw_surface_arg)
79#define DRM_IOCTL_VMW_REF_SURFACE \
80 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
81 union drm_vmw_surface_reference_arg)
82#define DRM_IOCTL_VMW_EXECBUF \
83 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
84 struct drm_vmw_execbuf_arg)
85#define DRM_IOCTL_VMW_FIFO_DEBUG \
86 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \
87 struct drm_vmw_fifo_debug_arg)
88#define DRM_IOCTL_VMW_FENCE_WAIT \
89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
90 struct drm_vmw_fence_wait_arg)
91
92
93/**
94 * The core DRM version of this macro doesn't account for
95 * DRM_COMMAND_BASE.
96 */
97
98#define VMW_IOCTL_DEF(ioctl, func, flags) \
99 [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
100
101/**
102 * Ioctl definitions.
103 */
104
105static struct drm_ioctl_desc vmw_ioctls[] = {
106 VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl,
107 DRM_AUTH | DRM_UNLOCKED),
108 VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
109 DRM_AUTH | DRM_UNLOCKED),
110 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
111 DRM_AUTH | DRM_UNLOCKED),
112 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
113 vmw_kms_cursor_bypass_ioctl,
114 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
115
116 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
117 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
118 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
119 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
120 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
121 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
122
123 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
124 DRM_AUTH | DRM_UNLOCKED),
125 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
126 DRM_AUTH | DRM_UNLOCKED),
127 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
128 DRM_AUTH | DRM_UNLOCKED),
129 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
130 DRM_AUTH | DRM_UNLOCKED),
131 VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
132 DRM_AUTH | DRM_UNLOCKED),
133 VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
134 DRM_AUTH | DRM_UNLOCKED),
135 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
136 DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
137 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
138 DRM_AUTH | DRM_UNLOCKED)
139};
140
141static struct pci_device_id vmw_pci_id_list[] = {
142 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
143 {0, 0, 0}
144};
145
146static char *vmw_devname = "vmwgfx";
147
148static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
149static void vmw_master_init(struct vmw_master *);
150static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
151 void *ptr);
152
153static void vmw_print_capabilities(uint32_t capabilities)
154{
155 DRM_INFO("Capabilities:\n");
156 if (capabilities & SVGA_CAP_RECT_COPY)
157 DRM_INFO(" Rect copy.\n");
158 if (capabilities & SVGA_CAP_CURSOR)
159 DRM_INFO(" Cursor.\n");
160 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
161 DRM_INFO(" Cursor bypass.\n");
162 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
163 DRM_INFO(" Cursor bypass 2.\n");
164 if (capabilities & SVGA_CAP_8BIT_EMULATION)
165 DRM_INFO(" 8bit emulation.\n");
166 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
167 DRM_INFO(" Alpha cursor.\n");
168 if (capabilities & SVGA_CAP_3D)
169 DRM_INFO(" 3D.\n");
170 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
171 DRM_INFO(" Extended Fifo.\n");
172 if (capabilities & SVGA_CAP_MULTIMON)
173 DRM_INFO(" Multimon.\n");
174 if (capabilities & SVGA_CAP_PITCHLOCK)
175 DRM_INFO(" Pitchlock.\n");
176 if (capabilities & SVGA_CAP_IRQMASK)
177 DRM_INFO(" Irq mask.\n");
178 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
179 DRM_INFO(" Display Topology.\n");
180 if (capabilities & SVGA_CAP_GMR)
181 DRM_INFO(" GMR.\n");
182 if (capabilities & SVGA_CAP_TRACES)
183 DRM_INFO(" Traces.\n");
184}
185
186static int vmw_request_device(struct vmw_private *dev_priv)
187{
188 int ret;
189
190 vmw_kms_save_vga(dev_priv);
191
192 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
193 if (unlikely(ret != 0)) {
194 DRM_ERROR("Unable to initialize FIFO.\n");
195 return ret;
196 }
197
198 return 0;
199}
200
201static void vmw_release_device(struct vmw_private *dev_priv)
202{
203 vmw_fifo_release(dev_priv, &dev_priv->fifo);
204 vmw_kms_restore_vga(dev_priv);
205}
206
207
208static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
209{
210 struct vmw_private *dev_priv;
211 int ret;
212 uint32_t svga_id;
213
214 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
215 if (unlikely(dev_priv == NULL)) {
216 DRM_ERROR("Failed allocating a device private struct.\n");
217 return -ENOMEM;
218 }
219 memset(dev_priv, 0, sizeof(*dev_priv));
220
221 dev_priv->dev = dev;
222 dev_priv->vmw_chipset = chipset;
223 dev_priv->last_read_sequence = (uint32_t) -100;
224 mutex_init(&dev_priv->hw_mutex);
225 mutex_init(&dev_priv->cmdbuf_mutex);
226 rwlock_init(&dev_priv->resource_lock);
227 idr_init(&dev_priv->context_idr);
228 idr_init(&dev_priv->surface_idr);
229 idr_init(&dev_priv->stream_idr);
230 ida_init(&dev_priv->gmr_ida);
231 mutex_init(&dev_priv->init_mutex);
232 init_waitqueue_head(&dev_priv->fence_queue);
233 init_waitqueue_head(&dev_priv->fifo_queue);
234 atomic_set(&dev_priv->fence_queue_waiters, 0);
235 atomic_set(&dev_priv->fifo_queue_waiters, 0);
236 INIT_LIST_HEAD(&dev_priv->gmr_lru);
237
238 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
239 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
240 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
241
242 mutex_lock(&dev_priv->hw_mutex);
243
244 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
245 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
246 if (svga_id != SVGA_ID_2) {
247 ret = -ENOSYS;
248 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
249 mutex_unlock(&dev_priv->hw_mutex);
250 goto out_err0;
251 }
252
253 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
254
255 if (dev_priv->capabilities & SVGA_CAP_GMR) {
256 dev_priv->max_gmr_descriptors =
257 vmw_read(dev_priv,
258 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
259 dev_priv->max_gmr_ids =
260 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
261 }
262
263 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
264 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
265 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
266 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
267
268 mutex_unlock(&dev_priv->hw_mutex);
269
270 vmw_print_capabilities(dev_priv->capabilities);
271
272 if (dev_priv->capabilities & SVGA_CAP_GMR) {
273 DRM_INFO("Max GMR ids is %u\n",
274 (unsigned)dev_priv->max_gmr_ids);
275 DRM_INFO("Max GMR descriptors is %u\n",
276 (unsigned)dev_priv->max_gmr_descriptors);
277 }
278 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
279 dev_priv->vram_start, dev_priv->vram_size / 1024);
280 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
281 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
282
283 ret = vmw_ttm_global_init(dev_priv);
284 if (unlikely(ret != 0))
285 goto out_err0;
286
287
288 vmw_master_init(&dev_priv->fbdev_master);
289 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
290 dev_priv->active_master = &dev_priv->fbdev_master;
291
292
293 ret = ttm_bo_device_init(&dev_priv->bdev,
294 dev_priv->bo_global_ref.ref.object,
295 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
296 false);
297 if (unlikely(ret != 0)) {
298 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
299 goto out_err1;
300 }
301
302 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
303 (dev_priv->vram_size >> PAGE_SHIFT));
304 if (unlikely(ret != 0)) {
305 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
306 goto out_err2;
307 }
308
309 dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
310 dev_priv->mmio_size, DRM_MTRR_WC);
311
312 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
313 dev_priv->mmio_size);
314
315 if (unlikely(dev_priv->mmio_virt == NULL)) {
316 ret = -ENOMEM;
317 DRM_ERROR("Failed mapping MMIO.\n");
318 goto out_err3;
319 }
320
321 dev_priv->tdev = ttm_object_device_init
322 (dev_priv->mem_global_ref.object, 12);
323
324 if (unlikely(dev_priv->tdev == NULL)) {
325 DRM_ERROR("Unable to initialize TTM object management.\n");
326 ret = -ENOMEM;
327 goto out_err4;
328 }
329
330 dev->dev_private = dev_priv;
331
332 if (!dev->devname)
333 dev->devname = vmw_devname;
334
335 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
336 ret = drm_irq_install(dev);
337 if (unlikely(ret != 0)) {
338 DRM_ERROR("Failed installing irq: %d\n", ret);
339 goto out_no_irq;
340 }
341 }
342
343 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
344 dev_priv->stealth = (ret != 0);
345 if (dev_priv->stealth) {
346 /**
347 * Request at least the mmio PCI resource.
348 */
349
350 DRM_INFO("It appears like vesafb is loaded. "
351 "Ignore above error if any.\n");
352 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
353 if (unlikely(ret != 0)) {
354 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
355 goto out_no_device;
356 }
357 }
358 ret = vmw_request_device(dev_priv);
359 if (unlikely(ret != 0))
360 goto out_no_device;
361 vmw_kms_init(dev_priv);
362 vmw_overlay_init(dev_priv);
363 vmw_fb_init(dev_priv);
364
365 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
366 register_pm_notifier(&dev_priv->pm_nb);
367
368 DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
369
370 return 0;
371
372out_no_device:
373 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
374 drm_irq_uninstall(dev_priv->dev);
375 if (dev->devname == vmw_devname)
376 dev->devname = NULL;
377out_no_irq:
378 ttm_object_device_release(&dev_priv->tdev);
379out_err4:
380 iounmap(dev_priv->mmio_virt);
381out_err3:
382 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
383 dev_priv->mmio_size, DRM_MTRR_WC);
384 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
385out_err2:
386 (void)ttm_bo_device_release(&dev_priv->bdev);
387out_err1:
388 vmw_ttm_global_release(dev_priv);
389out_err0:
390 ida_destroy(&dev_priv->gmr_ida);
391 idr_destroy(&dev_priv->surface_idr);
392 idr_destroy(&dev_priv->context_idr);
393 idr_destroy(&dev_priv->stream_idr);
394 kfree(dev_priv);
395 return ret;
396}
397
398static int vmw_driver_unload(struct drm_device *dev)
399{
400 struct vmw_private *dev_priv = vmw_priv(dev);
401
402 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
403
404 unregister_pm_notifier(&dev_priv->pm_nb);
405
406 vmw_fb_close(dev_priv);
407 vmw_kms_close(dev_priv);
408 vmw_overlay_close(dev_priv);
409 vmw_release_device(dev_priv);
410 if (dev_priv->stealth)
411 pci_release_region(dev->pdev, 2);
412 else
413 pci_release_regions(dev->pdev);
414
415 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
416 drm_irq_uninstall(dev_priv->dev);
417 if (dev->devname == vmw_devname)
418 dev->devname = NULL;
419 ttm_object_device_release(&dev_priv->tdev);
420 iounmap(dev_priv->mmio_virt);
421 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
422 dev_priv->mmio_size, DRM_MTRR_WC);
423 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
424 (void)ttm_bo_device_release(&dev_priv->bdev);
425 vmw_ttm_global_release(dev_priv);
426 ida_destroy(&dev_priv->gmr_ida);
427 idr_destroy(&dev_priv->surface_idr);
428 idr_destroy(&dev_priv->context_idr);
429 idr_destroy(&dev_priv->stream_idr);
430
431 kfree(dev_priv);
432
433 return 0;
434}
435
436static void vmw_postclose(struct drm_device *dev,
437 struct drm_file *file_priv)
438{
439 struct vmw_fpriv *vmw_fp;
440
441 vmw_fp = vmw_fpriv(file_priv);
442 ttm_object_file_release(&vmw_fp->tfile);
443 if (vmw_fp->locked_master)
444 drm_master_put(&vmw_fp->locked_master);
445 kfree(vmw_fp);
446}
447
448static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
449{
450 struct vmw_private *dev_priv = vmw_priv(dev);
451 struct vmw_fpriv *vmw_fp;
452 int ret = -ENOMEM;
453
454 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
455 if (unlikely(vmw_fp == NULL))
456 return ret;
457
458 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
459 if (unlikely(vmw_fp->tfile == NULL))
460 goto out_no_tfile;
461
462 file_priv->driver_priv = vmw_fp;
463
464 if (unlikely(dev_priv->bdev.dev_mapping == NULL))
465 dev_priv->bdev.dev_mapping =
466 file_priv->filp->f_path.dentry->d_inode->i_mapping;
467
468 return 0;
469
470out_no_tfile:
471 kfree(vmw_fp);
472 return ret;
473}
474
475static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
476 unsigned long arg)
477{
478 struct drm_file *file_priv = filp->private_data;
479 struct drm_device *dev = file_priv->minor->dev;
480 unsigned int nr = DRM_IOCTL_NR(cmd);
481
482 /*
483 * Do extra checking on driver private ioctls.
484 */
485
486 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
487 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
488 struct drm_ioctl_desc *ioctl =
489 &vmw_ioctls[nr - DRM_COMMAND_BASE];
490
491 if (unlikely(ioctl->cmd != cmd)) {
492 DRM_ERROR("Invalid command format, ioctl %d\n",
493 nr - DRM_COMMAND_BASE);
494 return -EINVAL;
495 }
496 }
497
498 return drm_ioctl(filp, cmd, arg);
499}
500
501static int vmw_firstopen(struct drm_device *dev)
502{
503 struct vmw_private *dev_priv = vmw_priv(dev);
504 dev_priv->is_opened = true;
505
506 return 0;
507}
508
509static void vmw_lastclose(struct drm_device *dev)
510{
511 struct vmw_private *dev_priv = vmw_priv(dev);
512 struct drm_crtc *crtc;
513 struct drm_mode_set set;
514 int ret;
515
516 /**
517 * Do nothing on the lastclose call from drm_unload.
518 */
519
520 if (!dev_priv->is_opened)
521 return;
522
523 dev_priv->is_opened = false;
524 set.x = 0;
525 set.y = 0;
526 set.fb = NULL;
527 set.mode = NULL;
528 set.connectors = NULL;
529 set.num_connectors = 0;
530
531 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
532 set.crtc = crtc;
533 ret = crtc->funcs->set_config(&set);
534 WARN_ON(ret != 0);
535 }
536
537}
538
539static void vmw_master_init(struct vmw_master *vmaster)
540{
541 ttm_lock_init(&vmaster->lock);
542}
543
544static int vmw_master_create(struct drm_device *dev,
545 struct drm_master *master)
546{
547 struct vmw_master *vmaster;
548
549 DRM_INFO("Master create.\n");
550 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
551 if (unlikely(vmaster == NULL))
552 return -ENOMEM;
553
554 ttm_lock_init(&vmaster->lock);
555 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
556 master->driver_priv = vmaster;
557
558 return 0;
559}
560
561static void vmw_master_destroy(struct drm_device *dev,
562 struct drm_master *master)
563{
564 struct vmw_master *vmaster = vmw_master(master);
565
566 DRM_INFO("Master destroy.\n");
567 master->driver_priv = NULL;
568 kfree(vmaster);
569}
570
571
572static int vmw_master_set(struct drm_device *dev,
573 struct drm_file *file_priv,
574 bool from_open)
575{
576 struct vmw_private *dev_priv = vmw_priv(dev);
577 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
578 struct vmw_master *active = dev_priv->active_master;
579 struct vmw_master *vmaster = vmw_master(file_priv->master);
580 int ret = 0;
581
582 DRM_INFO("Master set.\n");
583
584 if (active) {
585 BUG_ON(active != &dev_priv->fbdev_master);
586 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
587 if (unlikely(ret != 0))
588 goto out_no_active_lock;
589
590 ttm_lock_set_kill(&active->lock, true, SIGTERM);
591 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
592 if (unlikely(ret != 0)) {
593 DRM_ERROR("Unable to clean VRAM on "
594 "master drop.\n");
595 }
596
597 dev_priv->active_master = NULL;
598 }
599
600 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
601 if (!from_open) {
602 ttm_vt_unlock(&vmaster->lock);
603 BUG_ON(vmw_fp->locked_master != file_priv->master);
604 drm_master_put(&vmw_fp->locked_master);
605 }
606
607 dev_priv->active_master = vmaster;
608
609 return 0;
610
611out_no_active_lock:
612 vmw_release_device(dev_priv);
613 return ret;
614}
615
616static void vmw_master_drop(struct drm_device *dev,
617 struct drm_file *file_priv,
618 bool from_release)
619{
620 struct vmw_private *dev_priv = vmw_priv(dev);
621 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
622 struct vmw_master *vmaster = vmw_master(file_priv->master);
623 int ret;
624
625 DRM_INFO("Master drop.\n");
626
627 /**
628 * Make sure the master doesn't disappear while we have
629 * it locked.
630 */
631
632 vmw_fp->locked_master = drm_master_get(file_priv->master);
633 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
634
635 if (unlikely((ret != 0))) {
636 DRM_ERROR("Unable to lock TTM at VT switch.\n");
637 drm_master_put(&vmw_fp->locked_master);
638 }
639
640 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
641
642 dev_priv->active_master = &dev_priv->fbdev_master;
643 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
644 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
645
646 vmw_fb_on(dev_priv);
647}
648
649
650static void vmw_remove(struct pci_dev *pdev)
651{
652 struct drm_device *dev = pci_get_drvdata(pdev);
653
654 drm_put_dev(dev);
655}
656
657static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
658 void *ptr)
659{
660 struct vmw_private *dev_priv =
661 container_of(nb, struct vmw_private, pm_nb);
662 struct vmw_master *vmaster = dev_priv->active_master;
663
664 switch (val) {
665 case PM_HIBERNATION_PREPARE:
666 case PM_SUSPEND_PREPARE:
667 ttm_suspend_lock(&vmaster->lock);
668
669 /**
670 * This empties VRAM and unbinds all GMR bindings.
671 * Buffer contents is moved to swappable memory.
672 */
673 ttm_bo_swapout_all(&dev_priv->bdev);
674 break;
675 case PM_POST_HIBERNATION:
676 case PM_POST_SUSPEND:
677 ttm_suspend_unlock(&vmaster->lock);
678 break;
679 case PM_RESTORE_PREPARE:
680 break;
681 case PM_POST_RESTORE:
682 break;
683 default:
684 break;
685 }
686 return 0;
687}
688
689/**
690 * These might not be needed with the virtual SVGA device.
691 */
692
693int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
694{
695 pci_save_state(pdev);
696 pci_disable_device(pdev);
697 pci_set_power_state(pdev, PCI_D3hot);
698 return 0;
699}
700
701int vmw_pci_resume(struct pci_dev *pdev)
702{
703 pci_set_power_state(pdev, PCI_D0);
704 pci_restore_state(pdev);
705 return pci_enable_device(pdev);
706}
707
708static struct drm_driver driver = {
709 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
710 DRIVER_MODESET,
711 .load = vmw_driver_load,
712 .unload = vmw_driver_unload,
713 .firstopen = vmw_firstopen,
714 .lastclose = vmw_lastclose,
715 .irq_preinstall = vmw_irq_preinstall,
716 .irq_postinstall = vmw_irq_postinstall,
717 .irq_uninstall = vmw_irq_uninstall,
718 .irq_handler = vmw_irq_handler,
719 .reclaim_buffers_locked = NULL,
720 .get_map_ofs = drm_core_get_map_ofs,
721 .get_reg_ofs = drm_core_get_reg_ofs,
722 .ioctls = vmw_ioctls,
723 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
724 .dma_quiescent = NULL, /*vmw_dma_quiescent, */
725 .master_create = vmw_master_create,
726 .master_destroy = vmw_master_destroy,
727 .master_set = vmw_master_set,
728 .master_drop = vmw_master_drop,
729 .open = vmw_driver_open,
730 .postclose = vmw_postclose,
731 .fops = {
732 .owner = THIS_MODULE,
733 .open = drm_open,
734 .release = drm_release,
735 .unlocked_ioctl = vmw_unlocked_ioctl,
736 .mmap = vmw_mmap,
737 .poll = drm_poll,
738 .fasync = drm_fasync,
739#if defined(CONFIG_COMPAT)
740 .compat_ioctl = drm_compat_ioctl,
741#endif
742 },
743 .pci_driver = {
744 .name = VMWGFX_DRIVER_NAME,
745 .id_table = vmw_pci_id_list,
746 .probe = vmw_probe,
747 .remove = vmw_remove,
748 .suspend = vmw_pci_suspend,
749 .resume = vmw_pci_resume
750 },
751 .name = VMWGFX_DRIVER_NAME,
752 .desc = VMWGFX_DRIVER_DESC,
753 .date = VMWGFX_DRIVER_DATE,
754 .major = VMWGFX_DRIVER_MAJOR,
755 .minor = VMWGFX_DRIVER_MINOR,
756 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
757};
758
759static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
760{
761 return drm_get_dev(pdev, ent, &driver);
762}
763
764static int __init vmwgfx_init(void)
765{
766 int ret;
767 ret = drm_init(&driver);
768 if (ret)
769 DRM_ERROR("Failed initializing DRM.\n");
770 return ret;
771}
772
773static void __exit vmwgfx_exit(void)
774{
775 drm_exit(&driver);
776}
777
778module_init(vmwgfx_init);
779module_exit(vmwgfx_exit);
780
781MODULE_AUTHOR("VMware Inc. and others");
782MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
783MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
new file mode 100644
index 000000000000..356dc935ec13
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -0,0 +1,521 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_DRV_H_
29#define _VMWGFX_DRV_H_
30
31#include "vmwgfx_reg.h"
32#include "drmP.h"
33#include "vmwgfx_drm.h"
34#include "drm_hashtab.h"
35#include "linux/suspend.h"
36#include "ttm/ttm_bo_driver.h"
37#include "ttm/ttm_object.h"
38#include "ttm/ttm_lock.h"
39#include "ttm/ttm_execbuf_util.h"
40#include "ttm/ttm_module.h"
41
42#define VMWGFX_DRIVER_DATE "20100209"
43#define VMWGFX_DRIVER_MAJOR 1
44#define VMWGFX_DRIVER_MINOR 0
45#define VMWGFX_DRIVER_PATCHLEVEL 0
46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
48#define VMWGFX_MAX_RELOCATIONS 2048
49#define VMWGFX_MAX_GMRS 2048
50
51struct vmw_fpriv {
52 struct drm_master *locked_master;
53 struct ttm_object_file *tfile;
54};
55
56struct vmw_dma_buffer {
57 struct ttm_buffer_object base;
58 struct list_head validate_list;
59 struct list_head gmr_lru;
60 uint32_t gmr_id;
61 bool gmr_bound;
62 uint32_t cur_validate_node;
63 bool on_validate_list;
64};
65
66struct vmw_resource {
67 struct kref kref;
68 struct vmw_private *dev_priv;
69 struct idr *idr;
70 int id;
71 enum ttm_object_type res_type;
72 bool avail;
73 void (*hw_destroy) (struct vmw_resource *res);
74 void (*res_free) (struct vmw_resource *res);
75
76 /* TODO is a generic snooper needed? */
77#if 0
78 void (*snoop)(struct vmw_resource *res,
79 struct ttm_object_file *tfile,
80 SVGA3dCmdHeader *header);
81 void *snoop_priv;
82#endif
83};
84
85struct vmw_cursor_snooper {
86 struct drm_crtc *crtc;
87 size_t age;
88 uint32_t *image;
89};
90
91struct vmw_surface {
92 struct vmw_resource res;
93 uint32_t flags;
94 uint32_t format;
95 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
96 struct drm_vmw_size *sizes;
97 uint32_t num_sizes;
98
99 bool scanout;
100
101 /* TODO so far just a extra pointer */
102 struct vmw_cursor_snooper snooper;
103};
104
105struct vmw_fifo_state {
106 unsigned long reserved_size;
107 __le32 *dynamic_buffer;
108 __le32 *static_buffer;
109 __le32 *last_buffer;
110 uint32_t last_data_size;
111 uint32_t last_buffer_size;
112 bool last_buffer_add;
113 unsigned long static_buffer_size;
114 bool using_bounce_buffer;
115 uint32_t capabilities;
116 struct mutex fifo_mutex;
117 struct rw_semaphore rwsem;
118};
119
120struct vmw_relocation {
121 SVGAGuestPtr *location;
122 uint32_t index;
123};
124
125struct vmw_sw_context{
126 struct ida bo_list;
127 uint32_t last_cid;
128 bool cid_valid;
129 uint32_t last_sid;
130 uint32_t sid_translation;
131 bool sid_valid;
132 struct ttm_object_file *tfile;
133 struct list_head validate_nodes;
134 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
135 uint32_t cur_reloc;
136 struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS];
137 uint32_t cur_val_buf;
138};
139
140struct vmw_legacy_display;
141struct vmw_overlay;
142
143struct vmw_master {
144 struct ttm_lock lock;
145};
146
147struct vmw_private {
148 struct ttm_bo_device bdev;
149 struct ttm_bo_global_ref bo_global_ref;
150 struct ttm_global_reference mem_global_ref;
151
152 struct vmw_fifo_state fifo;
153
154 struct drm_device *dev;
155 unsigned long vmw_chipset;
156 unsigned int io_start;
157 uint32_t vram_start;
158 uint32_t vram_size;
159 uint32_t mmio_start;
160 uint32_t mmio_size;
161 uint32_t fb_max_width;
162 uint32_t fb_max_height;
163 __le32 __iomem *mmio_virt;
164 int mmio_mtrr;
165 uint32_t capabilities;
166 uint32_t max_gmr_descriptors;
167 uint32_t max_gmr_ids;
168 struct mutex hw_mutex;
169
170 /*
171 * VGA registers.
172 */
173
174 uint32_t vga_width;
175 uint32_t vga_height;
176 uint32_t vga_depth;
177 uint32_t vga_bpp;
178 uint32_t vga_pseudo;
179 uint32_t vga_red_mask;
180 uint32_t vga_blue_mask;
181 uint32_t vga_green_mask;
182
183 /*
184 * Framebuffer info.
185 */
186
187 void *fb_info;
188 struct vmw_legacy_display *ldu_priv;
189 struct vmw_overlay *overlay_priv;
190
191 /*
192 * Context and surface management.
193 */
194
195 rwlock_t resource_lock;
196 struct idr context_idr;
197 struct idr surface_idr;
198 struct idr stream_idr;
199
200 /*
201 * Block lastclose from racing with firstopen.
202 */
203
204 struct mutex init_mutex;
205
206 /*
207 * A resource manager for kernel-only surfaces and
208 * contexts.
209 */
210
211 struct ttm_object_device *tdev;
212
213 /*
214 * Fencing and IRQs.
215 */
216
217 atomic_t fence_seq;
218 wait_queue_head_t fence_queue;
219 wait_queue_head_t fifo_queue;
220 atomic_t fence_queue_waiters;
221 atomic_t fifo_queue_waiters;
222 uint32_t last_read_sequence;
223 spinlock_t irq_lock;
224
225 /*
226 * Device state
227 */
228
229 uint32_t traces_state;
230 uint32_t enable_state;
231 uint32_t config_done_state;
232
233 /**
234 * Execbuf
235 */
236 /**
237 * Protected by the cmdbuf mutex.
238 */
239
240 struct vmw_sw_context ctx;
241 uint32_t val_seq;
242 struct mutex cmdbuf_mutex;
243
244 /**
245 * GMR management. Protected by the lru spinlock.
246 */
247
248 struct ida gmr_ida;
249 struct list_head gmr_lru;
250
251
252 /**
253 * Operating mode.
254 */
255
256 bool stealth;
257 bool is_opened;
258
259 /**
260 * Master management.
261 */
262
263 struct vmw_master *active_master;
264 struct vmw_master fbdev_master;
265 struct notifier_block pm_nb;
266};
267
268static inline struct vmw_private *vmw_priv(struct drm_device *dev)
269{
270 return (struct vmw_private *)dev->dev_private;
271}
272
273static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
274{
275 return (struct vmw_fpriv *)file_priv->driver_priv;
276}
277
278static inline struct vmw_master *vmw_master(struct drm_master *master)
279{
280 return (struct vmw_master *) master->driver_priv;
281}
282
283static inline void vmw_write(struct vmw_private *dev_priv,
284 unsigned int offset, uint32_t value)
285{
286 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
287 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
288}
289
290static inline uint32_t vmw_read(struct vmw_private *dev_priv,
291 unsigned int offset)
292{
293 uint32_t val;
294
295 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
296 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
297 return val;
298}
299
300/**
301 * GMR utilities - vmwgfx_gmr.c
302 */
303
304extern int vmw_gmr_bind(struct vmw_private *dev_priv,
305 struct ttm_buffer_object *bo);
306extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
307
308/**
309 * Resource utilities - vmwgfx_resource.c
310 */
311
312extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
313extern void vmw_resource_unreference(struct vmw_resource **p_res);
314extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
315extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
316 struct drm_file *file_priv);
317extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
318 struct drm_file *file_priv);
319extern int vmw_context_check(struct vmw_private *dev_priv,
320 struct ttm_object_file *tfile,
321 int id);
322extern void vmw_surface_res_free(struct vmw_resource *res);
323extern int vmw_surface_init(struct vmw_private *dev_priv,
324 struct vmw_surface *srf,
325 void (*res_free) (struct vmw_resource *res));
326extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
327 struct ttm_object_file *tfile,
328 uint32_t handle,
329 struct vmw_surface **out);
330extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
331 struct drm_file *file_priv);
332extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
333 struct drm_file *file_priv);
334extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
335 struct drm_file *file_priv);
336extern int vmw_surface_check(struct vmw_private *dev_priv,
337 struct ttm_object_file *tfile,
338 uint32_t handle, int *id);
339extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
340extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
341 struct vmw_dma_buffer *vmw_bo,
342 size_t size, struct ttm_placement *placement,
343 bool interuptable,
344 void (*bo_free) (struct ttm_buffer_object *bo));
345extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
346 struct drm_file *file_priv);
347extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
348 struct drm_file *file_priv);
349extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
350 uint32_t cur_validate_node);
351extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
352extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
353 uint32_t id, struct vmw_dma_buffer **out);
354extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
355extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
356extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
357extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
358 struct vmw_dma_buffer *bo);
359extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
360 struct vmw_dma_buffer *bo);
361extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
362extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
363 struct drm_file *file_priv);
364extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
365 struct drm_file *file_priv);
366extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
367 struct ttm_object_file *tfile,
368 uint32_t *inout_id,
369 struct vmw_resource **out);
370
371
372/**
373 * Misc Ioctl functionality - vmwgfx_ioctl.c
374 */
375
376extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
377 struct drm_file *file_priv);
378extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
379 struct drm_file *file_priv);
380
381/**
382 * Fifo utilities - vmwgfx_fifo.c
383 */
384
385extern int vmw_fifo_init(struct vmw_private *dev_priv,
386 struct vmw_fifo_state *fifo);
387extern void vmw_fifo_release(struct vmw_private *dev_priv,
388 struct vmw_fifo_state *fifo);
389extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
390extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
391extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
392 uint32_t *sequence);
393extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
394extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
395extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
396
397/**
398 * TTM glue - vmwgfx_ttm_glue.c
399 */
400
401extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
402extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
403extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
404
405/**
406 * TTM buffer object driver - vmwgfx_buffer.c
407 */
408
409extern struct ttm_placement vmw_vram_placement;
410extern struct ttm_placement vmw_vram_ne_placement;
411extern struct ttm_placement vmw_vram_sys_placement;
412extern struct ttm_placement vmw_sys_placement;
413extern struct ttm_bo_driver vmw_bo_driver;
414extern int vmw_dma_quiescent(struct drm_device *dev);
415
416/**
417 * Command submission - vmwgfx_execbuf.c
418 */
419
420extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
421 struct drm_file *file_priv);
422
423/**
424 * IRQs and wating - vmwgfx_irq.c
425 */
426
427extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
428extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy,
429 uint32_t sequence, bool interruptible,
430 unsigned long timeout);
431extern void vmw_irq_preinstall(struct drm_device *dev);
432extern int vmw_irq_postinstall(struct drm_device *dev);
433extern void vmw_irq_uninstall(struct drm_device *dev);
434extern bool vmw_fence_signaled(struct vmw_private *dev_priv,
435 uint32_t sequence);
436extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
437 struct drm_file *file_priv);
438extern int vmw_fallback_wait(struct vmw_private *dev_priv,
439 bool lazy,
440 bool fifo_idle,
441 uint32_t sequence,
442 bool interruptible,
443 unsigned long timeout);
444
445/**
446 * Kernel framebuffer - vmwgfx_fb.c
447 */
448
449int vmw_fb_init(struct vmw_private *vmw_priv);
450int vmw_fb_close(struct vmw_private *dev_priv);
451int vmw_fb_off(struct vmw_private *vmw_priv);
452int vmw_fb_on(struct vmw_private *vmw_priv);
453
454/**
455 * Kernel modesetting - vmwgfx_kms.c
456 */
457
458int vmw_kms_init(struct vmw_private *dev_priv);
459int vmw_kms_close(struct vmw_private *dev_priv);
460int vmw_kms_save_vga(struct vmw_private *vmw_priv);
461int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
462int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
463 struct drm_file *file_priv);
464void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
465void vmw_kms_cursor_snoop(struct vmw_surface *srf,
466 struct ttm_object_file *tfile,
467 struct ttm_buffer_object *bo,
468 SVGA3dCmdHeader *header);
469
470/**
471 * Overlay control - vmwgfx_overlay.c
472 */
473
474int vmw_overlay_init(struct vmw_private *dev_priv);
475int vmw_overlay_close(struct vmw_private *dev_priv);
476int vmw_overlay_ioctl(struct drm_device *dev, void *data,
477 struct drm_file *file_priv);
478int vmw_overlay_stop_all(struct vmw_private *dev_priv);
479int vmw_overlay_resume_all(struct vmw_private *dev_priv);
480int vmw_overlay_pause_all(struct vmw_private *dev_priv);
481int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
482int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
483int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
484int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
485
486/**
487 * Inline helper functions
488 */
489
490static inline void vmw_surface_unreference(struct vmw_surface **srf)
491{
492 struct vmw_surface *tmp_srf = *srf;
493 struct vmw_resource *res = &tmp_srf->res;
494 *srf = NULL;
495
496 vmw_resource_unreference(&res);
497}
498
499static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
500{
501 (void) vmw_resource_reference(&srf->res);
502 return srf;
503}
504
505static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
506{
507 struct vmw_dma_buffer *tmp_buf = *buf;
508 struct ttm_buffer_object *bo = &tmp_buf->base;
509 *buf = NULL;
510
511 ttm_bo_unref(&bo);
512}
513
514static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
515{
516 if (ttm_bo_reference(&buf->base))
517 return buf;
518 return NULL;
519}
520
521#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
new file mode 100644
index 000000000000..0897359b3e4e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -0,0 +1,716 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
30#include "ttm/ttm_bo_api.h"
31#include "ttm/ttm_placement.h"
32
33static int vmw_cmd_invalid(struct vmw_private *dev_priv,
34 struct vmw_sw_context *sw_context,
35 SVGA3dCmdHeader *header)
36{
37 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
38}
39
40static int vmw_cmd_ok(struct vmw_private *dev_priv,
41 struct vmw_sw_context *sw_context,
42 SVGA3dCmdHeader *header)
43{
44 return 0;
45}
46
47static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
48 struct vmw_sw_context *sw_context,
49 SVGA3dCmdHeader *header)
50{
51 struct vmw_cid_cmd {
52 SVGA3dCmdHeader header;
53 __le32 cid;
54 } *cmd;
55 int ret;
56
57 cmd = container_of(header, struct vmw_cid_cmd, header);
58 if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
59 return 0;
60
61 ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
62 if (unlikely(ret != 0)) {
63 DRM_ERROR("Could not find or use context %u\n",
64 (unsigned) cmd->cid);
65 return ret;
66 }
67
68 sw_context->last_cid = cmd->cid;
69 sw_context->cid_valid = true;
70
71 return 0;
72}
73
74static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
75 struct vmw_sw_context *sw_context,
76 uint32_t *sid)
77{
78 if (*sid == SVGA3D_INVALID_ID)
79 return 0;
80
81 if (unlikely((!sw_context->sid_valid ||
82 *sid != sw_context->last_sid))) {
83 int real_id;
84 int ret = vmw_surface_check(dev_priv, sw_context->tfile,
85 *sid, &real_id);
86
87 if (unlikely(ret != 0)) {
88 DRM_ERROR("Could ot find or use surface 0x%08x "
89 "address 0x%08lx\n",
90 (unsigned int) *sid,
91 (unsigned long) sid);
92 return ret;
93 }
94
95 sw_context->last_sid = *sid;
96 sw_context->sid_valid = true;
97 *sid = real_id;
98 sw_context->sid_translation = real_id;
99 } else
100 *sid = sw_context->sid_translation;
101
102 return 0;
103}
104
105
106static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
107 struct vmw_sw_context *sw_context,
108 SVGA3dCmdHeader *header)
109{
110 struct vmw_sid_cmd {
111 SVGA3dCmdHeader header;
112 SVGA3dCmdSetRenderTarget body;
113 } *cmd;
114 int ret;
115
116 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
117 if (unlikely(ret != 0))
118 return ret;
119
120 cmd = container_of(header, struct vmw_sid_cmd, header);
121 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
122 return ret;
123}
124
125static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
126 struct vmw_sw_context *sw_context,
127 SVGA3dCmdHeader *header)
128{
129 struct vmw_sid_cmd {
130 SVGA3dCmdHeader header;
131 SVGA3dCmdSurfaceCopy body;
132 } *cmd;
133 int ret;
134
135 cmd = container_of(header, struct vmw_sid_cmd, header);
136 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
137 if (unlikely(ret != 0))
138 return ret;
139 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
140}
141
142static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
143 struct vmw_sw_context *sw_context,
144 SVGA3dCmdHeader *header)
145{
146 struct vmw_sid_cmd {
147 SVGA3dCmdHeader header;
148 SVGA3dCmdSurfaceStretchBlt body;
149 } *cmd;
150 int ret;
151
152 cmd = container_of(header, struct vmw_sid_cmd, header);
153 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
154 if (unlikely(ret != 0))
155 return ret;
156 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
157}
158
159static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
160 struct vmw_sw_context *sw_context,
161 SVGA3dCmdHeader *header)
162{
163 struct vmw_sid_cmd {
164 SVGA3dCmdHeader header;
165 SVGA3dCmdBlitSurfaceToScreen body;
166 } *cmd;
167
168 cmd = container_of(header, struct vmw_sid_cmd, header);
169 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
170}
171
172static int vmw_cmd_present_check(struct vmw_private *dev_priv,
173 struct vmw_sw_context *sw_context,
174 SVGA3dCmdHeader *header)
175{
176 struct vmw_sid_cmd {
177 SVGA3dCmdHeader header;
178 SVGA3dCmdPresent body;
179 } *cmd;
180
181 cmd = container_of(header, struct vmw_sid_cmd, header);
182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
183}
184
185static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
186 struct vmw_sw_context *sw_context,
187 SVGAGuestPtr *ptr,
188 struct vmw_dma_buffer **vmw_bo_p)
189{
190 struct vmw_dma_buffer *vmw_bo = NULL;
191 struct ttm_buffer_object *bo;
192 uint32_t handle = ptr->gmrId;
193 struct vmw_relocation *reloc;
194 uint32_t cur_validate_node;
195 struct ttm_validate_buffer *val_buf;
196 int ret;
197
198 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
199 if (unlikely(ret != 0)) {
200 DRM_ERROR("Could not find or use GMR region.\n");
201 return -EINVAL;
202 }
203 bo = &vmw_bo->base;
204
205 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
206 DRM_ERROR("Max number relocations per submission"
207 " exceeded\n");
208 ret = -EINVAL;
209 goto out_no_reloc;
210 }
211
212 reloc = &sw_context->relocs[sw_context->cur_reloc++];
213 reloc->location = ptr;
214
215 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
216 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
217 DRM_ERROR("Max number of DMA buffers per submission"
218 " exceeded.\n");
219 ret = -EINVAL;
220 goto out_no_reloc;
221 }
222
223 reloc->index = cur_validate_node;
224 if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
225 val_buf = &sw_context->val_bufs[cur_validate_node];
226 val_buf->bo = ttm_bo_reference(bo);
227 val_buf->new_sync_obj_arg = (void *) dev_priv;
228 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
229 ++sw_context->cur_val_buf;
230 }
231 *vmw_bo_p = vmw_bo;
232 return 0;
233
234out_no_reloc:
235 vmw_dmabuf_unreference(&vmw_bo);
236 vmw_bo_p = NULL;
237 return ret;
238}
239
240static int vmw_cmd_end_query(struct vmw_private *dev_priv,
241 struct vmw_sw_context *sw_context,
242 SVGA3dCmdHeader *header)
243{
244 struct vmw_dma_buffer *vmw_bo;
245 struct vmw_query_cmd {
246 SVGA3dCmdHeader header;
247 SVGA3dCmdEndQuery q;
248 } *cmd;
249 int ret;
250
251 cmd = container_of(header, struct vmw_query_cmd, header);
252 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
253 if (unlikely(ret != 0))
254 return ret;
255
256 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
257 &cmd->q.guestResult,
258 &vmw_bo);
259 if (unlikely(ret != 0))
260 return ret;
261
262 vmw_dmabuf_unreference(&vmw_bo);
263 return 0;
264}
265
266static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
267 struct vmw_sw_context *sw_context,
268 SVGA3dCmdHeader *header)
269{
270 struct vmw_dma_buffer *vmw_bo;
271 struct vmw_query_cmd {
272 SVGA3dCmdHeader header;
273 SVGA3dCmdWaitForQuery q;
274 } *cmd;
275 int ret;
276
277 cmd = container_of(header, struct vmw_query_cmd, header);
278 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
279 if (unlikely(ret != 0))
280 return ret;
281
282 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
283 &cmd->q.guestResult,
284 &vmw_bo);
285 if (unlikely(ret != 0))
286 return ret;
287
288 vmw_dmabuf_unreference(&vmw_bo);
289 return 0;
290}
291
292
293static int vmw_cmd_dma(struct vmw_private *dev_priv,
294 struct vmw_sw_context *sw_context,
295 SVGA3dCmdHeader *header)
296{
297 struct vmw_dma_buffer *vmw_bo = NULL;
298 struct ttm_buffer_object *bo;
299 struct vmw_surface *srf = NULL;
300 struct vmw_dma_cmd {
301 SVGA3dCmdHeader header;
302 SVGA3dCmdSurfaceDMA dma;
303 } *cmd;
304 int ret;
305
306 cmd = container_of(header, struct vmw_dma_cmd, header);
307 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
308 &cmd->dma.guest.ptr,
309 &vmw_bo);
310 if (unlikely(ret != 0))
311 return ret;
312
313 bo = &vmw_bo->base;
314 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
315 cmd->dma.host.sid, &srf);
316 if (ret) {
317 DRM_ERROR("could not find surface\n");
318 goto out_no_reloc;
319 }
320
321 /**
322 * Patch command stream with device SID.
323 */
324
325 cmd->dma.host.sid = srf->res.id;
326 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
327 /**
328 * FIXME: May deadlock here when called from the
329 * command parsing code.
330 */
331 vmw_surface_unreference(&srf);
332
333out_no_reloc:
334 vmw_dmabuf_unreference(&vmw_bo);
335 return ret;
336}
337
338static int vmw_cmd_draw(struct vmw_private *dev_priv,
339 struct vmw_sw_context *sw_context,
340 SVGA3dCmdHeader *header)
341{
342 struct vmw_draw_cmd {
343 SVGA3dCmdHeader header;
344 SVGA3dCmdDrawPrimitives body;
345 } *cmd;
346 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
347 (unsigned long)header + sizeof(*cmd));
348 SVGA3dPrimitiveRange *range;
349 uint32_t i;
350 uint32_t maxnum;
351 int ret;
352
353 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
354 if (unlikely(ret != 0))
355 return ret;
356
357 cmd = container_of(header, struct vmw_draw_cmd, header);
358 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
359
360 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
361 DRM_ERROR("Illegal number of vertex declarations.\n");
362 return -EINVAL;
363 }
364
365 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
366 ret = vmw_cmd_sid_check(dev_priv, sw_context,
367 &decl->array.surfaceId);
368 if (unlikely(ret != 0))
369 return ret;
370 }
371
372 maxnum = (header->size - sizeof(cmd->body) -
373 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
374 if (unlikely(cmd->body.numRanges > maxnum)) {
375 DRM_ERROR("Illegal number of index ranges.\n");
376 return -EINVAL;
377 }
378
379 range = (SVGA3dPrimitiveRange *) decl;
380 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
381 ret = vmw_cmd_sid_check(dev_priv, sw_context,
382 &range->indexArray.surfaceId);
383 if (unlikely(ret != 0))
384 return ret;
385 }
386 return 0;
387}
388
389
390static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
391 struct vmw_sw_context *sw_context,
392 SVGA3dCmdHeader *header)
393{
394 struct vmw_tex_state_cmd {
395 SVGA3dCmdHeader header;
396 SVGA3dCmdSetTextureState state;
397 };
398
399 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
400 ((unsigned long) header + header->size + sizeof(header));
401 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
402 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
403 int ret;
404
405 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
406 if (unlikely(ret != 0))
407 return ret;
408
409 for (; cur_state < last_state; ++cur_state) {
410 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
411 continue;
412
413 ret = vmw_cmd_sid_check(dev_priv, sw_context,
414 &cur_state->value);
415 if (unlikely(ret != 0))
416 return ret;
417 }
418
419 return 0;
420}
421
422
423typedef int (*vmw_cmd_func) (struct vmw_private *,
424 struct vmw_sw_context *,
425 SVGA3dCmdHeader *);
426
427#define VMW_CMD_DEF(cmd, func) \
428 [cmd - SVGA_3D_CMD_BASE] = func
429
430static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
431 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
432 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
433 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
434 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
435 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
436 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
437 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
438 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
439 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
440 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
441 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
442 &vmw_cmd_set_render_target_check),
443 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
444 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
445 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
446 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
447 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
448 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
449 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
450 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
451 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
452 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
453 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
454 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
455 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
456 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
457 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
458 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
459 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
460 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
461 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
462 &vmw_cmd_blt_surf_screen_check)
463};
464
465static int vmw_cmd_check(struct vmw_private *dev_priv,
466 struct vmw_sw_context *sw_context,
467 void *buf, uint32_t *size)
468{
469 uint32_t cmd_id;
470 uint32_t size_remaining = *size;
471 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
472 int ret;
473
474 cmd_id = ((uint32_t *)buf)[0];
475 if (cmd_id == SVGA_CMD_UPDATE) {
476 *size = 5 << 2;
477 return 0;
478 }
479
480 cmd_id = le32_to_cpu(header->id);
481 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
482
483 cmd_id -= SVGA_3D_CMD_BASE;
484 if (unlikely(*size > size_remaining))
485 goto out_err;
486
487 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
488 goto out_err;
489
490 ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
491 if (unlikely(ret != 0))
492 goto out_err;
493
494 return 0;
495out_err:
496 DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
497 cmd_id + SVGA_3D_CMD_BASE);
498 return -EINVAL;
499}
500
501static int vmw_cmd_check_all(struct vmw_private *dev_priv,
502 struct vmw_sw_context *sw_context,
503 void *buf, uint32_t size)
504{
505 int32_t cur_size = size;
506 int ret;
507
508 while (cur_size > 0) {
509 size = cur_size;
510 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
511 if (unlikely(ret != 0))
512 return ret;
513 buf = (void *)((unsigned long) buf + size);
514 cur_size -= size;
515 }
516
517 if (unlikely(cur_size != 0)) {
518 DRM_ERROR("Command verifier out of sync.\n");
519 return -EINVAL;
520 }
521
522 return 0;
523}
524
525static void vmw_free_relocations(struct vmw_sw_context *sw_context)
526{
527 sw_context->cur_reloc = 0;
528}
529
530static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
531{
532 uint32_t i;
533 struct vmw_relocation *reloc;
534 struct ttm_validate_buffer *validate;
535 struct ttm_buffer_object *bo;
536
537 for (i = 0; i < sw_context->cur_reloc; ++i) {
538 reloc = &sw_context->relocs[i];
539 validate = &sw_context->val_bufs[reloc->index];
540 bo = validate->bo;
541 reloc->location->offset += bo->offset;
542 reloc->location->gmrId = vmw_dmabuf_gmr(bo);
543 }
544 vmw_free_relocations(sw_context);
545}
546
547static void vmw_clear_validations(struct vmw_sw_context *sw_context)
548{
549 struct ttm_validate_buffer *entry, *next;
550
551 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
552 head) {
553 list_del(&entry->head);
554 vmw_dmabuf_validate_clear(entry->bo);
555 ttm_bo_unref(&entry->bo);
556 sw_context->cur_val_buf--;
557 }
558 BUG_ON(sw_context->cur_val_buf != 0);
559}
560
561static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
562 struct ttm_buffer_object *bo)
563{
564 int ret;
565
566 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
567 return 0;
568
569 /**
570 * Put BO in VRAM, only if there is space.
571 */
572
573 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
574 if (unlikely(ret == -ERESTARTSYS))
575 return ret;
576
577 /**
578 * Otherwise, set it up as GMR.
579 */
580
581 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
582 return 0;
583
584 ret = vmw_gmr_bind(dev_priv, bo);
585 if (likely(ret == 0 || ret == -ERESTARTSYS))
586 return ret;
587
588 /**
589 * If that failed, try VRAM again, this time evicting
590 * previous contents.
591 */
592
593 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
594 return ret;
595}
596
597
598static int vmw_validate_buffers(struct vmw_private *dev_priv,
599 struct vmw_sw_context *sw_context)
600{
601 struct ttm_validate_buffer *entry;
602 int ret;
603
604 list_for_each_entry(entry, &sw_context->validate_nodes, head) {
605 ret = vmw_validate_single_buffer(dev_priv, entry->bo);
606 if (unlikely(ret != 0))
607 return ret;
608 }
609 return 0;
610}
611
612int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
613 struct drm_file *file_priv)
614{
615 struct vmw_private *dev_priv = vmw_priv(dev);
616 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
617 struct drm_vmw_fence_rep fence_rep;
618 struct drm_vmw_fence_rep __user *user_fence_rep;
619 int ret;
620 void *user_cmd;
621 void *cmd;
622 uint32_t sequence;
623 struct vmw_sw_context *sw_context = &dev_priv->ctx;
624 struct vmw_master *vmaster = vmw_master(file_priv->master);
625
626 ret = ttm_read_lock(&vmaster->lock, true);
627 if (unlikely(ret != 0))
628 return ret;
629
630 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
631 if (unlikely(ret != 0)) {
632 ret = -ERESTARTSYS;
633 goto out_no_cmd_mutex;
634 }
635
636 cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
637 if (unlikely(cmd == NULL)) {
638 DRM_ERROR("Failed reserving fifo space for commands.\n");
639 ret = -ENOMEM;
640 goto out_unlock;
641 }
642
643 user_cmd = (void __user *)(unsigned long)arg->commands;
644 ret = copy_from_user(cmd, user_cmd, arg->command_size);
645
646 if (unlikely(ret != 0)) {
647 DRM_ERROR("Failed copying commands.\n");
648 goto out_commit;
649 }
650
651 sw_context->tfile = vmw_fpriv(file_priv)->tfile;
652 sw_context->cid_valid = false;
653 sw_context->sid_valid = false;
654 sw_context->cur_reloc = 0;
655 sw_context->cur_val_buf = 0;
656
657 INIT_LIST_HEAD(&sw_context->validate_nodes);
658
659 ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
660 if (unlikely(ret != 0))
661 goto out_err;
662 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
663 dev_priv->val_seq++);
664 if (unlikely(ret != 0))
665 goto out_err;
666
667 ret = vmw_validate_buffers(dev_priv, sw_context);
668 if (unlikely(ret != 0))
669 goto out_err;
670
671 vmw_apply_relocations(sw_context);
672 vmw_fifo_commit(dev_priv, arg->command_size);
673
674 ret = vmw_fifo_send_fence(dev_priv, &sequence);
675
676 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
677 (void *)(unsigned long) sequence);
678 vmw_clear_validations(sw_context);
679 mutex_unlock(&dev_priv->cmdbuf_mutex);
680
681 /*
682 * This error is harmless, because if fence submission fails,
683 * vmw_fifo_send_fence will sync.
684 */
685
686 if (ret != 0)
687 DRM_ERROR("Fence submission error. Syncing.\n");
688
689 fence_rep.error = ret;
690 fence_rep.fence_seq = (uint64_t) sequence;
691
692 user_fence_rep = (struct drm_vmw_fence_rep __user *)
693 (unsigned long)arg->fence_rep;
694
695 /*
696 * copy_to_user errors will be detected by user space not
697 * seeing fence_rep::error filled in.
698 */
699
700 ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
701
702 vmw_kms_cursor_post_execbuf(dev_priv);
703 ttm_read_unlock(&vmaster->lock);
704 return 0;
705out_err:
706 vmw_free_relocations(sw_context);
707 ttm_eu_backoff_reservation(&sw_context->validate_nodes);
708 vmw_clear_validations(sw_context);
709out_commit:
710 vmw_fifo_commit(dev_priv, 0);
711out_unlock:
712 mutex_unlock(&dev_priv->cmdbuf_mutex);
713out_no_cmd_mutex:
714 ttm_read_unlock(&vmaster->lock);
715 return ret;
716}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
new file mode 100644
index 000000000000..a93367041cdc
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -0,0 +1,737 @@
1/**************************************************************************
2 *
3 * Copyright © 2007 David Airlie
4 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29#include "drmP.h"
30#include "vmwgfx_drv.h"
31
32#include "ttm/ttm_placement.h"
33
34#define VMW_DIRTY_DELAY (HZ / 30)
35
36struct vmw_fb_par {
37 struct vmw_private *vmw_priv;
38
39 void *vmalloc;
40
41 struct vmw_dma_buffer *vmw_bo;
42 struct ttm_bo_kmap_obj map;
43
44 u32 pseudo_palette[17];
45
46 unsigned depth;
47 unsigned bpp;
48
49 unsigned max_width;
50 unsigned max_height;
51
52 void *bo_ptr;
53 unsigned bo_size;
54 bool bo_iowrite;
55
56 struct {
57 spinlock_t lock;
58 bool active;
59 unsigned x1;
60 unsigned y1;
61 unsigned x2;
62 unsigned y2;
63 } dirty;
64};
65
66static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
67 unsigned blue, unsigned transp,
68 struct fb_info *info)
69{
70 struct vmw_fb_par *par = info->par;
71 u32 *pal = par->pseudo_palette;
72
73 if (regno > 15) {
74 DRM_ERROR("Bad regno %u.\n", regno);
75 return 1;
76 }
77
78 switch (par->depth) {
79 case 24:
80 case 32:
81 pal[regno] = ((red & 0xff00) << 8) |
82 (green & 0xff00) |
83 ((blue & 0xff00) >> 8);
84 break;
85 default:
86 DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
87 return 1;
88 }
89
90 return 0;
91}
92
93static int vmw_fb_check_var(struct fb_var_screeninfo *var,
94 struct fb_info *info)
95{
96 int depth = var->bits_per_pixel;
97 struct vmw_fb_par *par = info->par;
98 struct vmw_private *vmw_priv = par->vmw_priv;
99
100 switch (var->bits_per_pixel) {
101 case 32:
102 depth = (var->transp.length > 0) ? 32 : 24;
103 break;
104 default:
105 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
106 return -EINVAL;
107 }
108
109 switch (depth) {
110 case 24:
111 var->red.offset = 16;
112 var->green.offset = 8;
113 var->blue.offset = 0;
114 var->red.length = 8;
115 var->green.length = 8;
116 var->blue.length = 8;
117 var->transp.length = 0;
118 var->transp.offset = 0;
119 break;
120 case 32:
121 var->red.offset = 16;
122 var->green.offset = 8;
123 var->blue.offset = 0;
124 var->red.length = 8;
125 var->green.length = 8;
126 var->blue.length = 8;
127 var->transp.length = 8;
128 var->transp.offset = 24;
129 break;
130 default:
131 DRM_ERROR("Bad depth %u.\n", depth);
132 return -EINVAL;
133 }
134
135 /* without multimon its hard to resize */
136 if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
137 (var->xres != par->max_width ||
138 var->yres != par->max_height)) {
139 DRM_ERROR("Tried to resize, but we don't have multimon\n");
140 return -EINVAL;
141 }
142
143 if (var->xres > par->max_width ||
144 var->yres > par->max_height) {
145 DRM_ERROR("Requested geom can not fit in framebuffer\n");
146 return -EINVAL;
147 }
148
149 return 0;
150}
151
152static int vmw_fb_set_par(struct fb_info *info)
153{
154 struct vmw_fb_par *par = info->par;
155 struct vmw_private *vmw_priv = par->vmw_priv;
156
157 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
158 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
159 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
160 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
161 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
162 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
163 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
164 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
165 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
166
167 vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
168 vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
169 vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
170 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
171 vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
172 vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
173 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
174 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
175
176 /* TODO check if pitch and offset changes */
177
178 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
179 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
180 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
181 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
182 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
183 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
184 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
185 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
186 } else {
187 vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
188 vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
189
190 /* TODO check if pitch and offset changes */
191 }
192
193 return 0;
194}
195
196static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
197 struct fb_info *info)
198{
199 return 0;
200}
201
202static int vmw_fb_blank(int blank, struct fb_info *info)
203{
204 return 0;
205}
206
207/*
208 * Dirty code
209 */
210
211static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
212{
213 struct vmw_private *vmw_priv = par->vmw_priv;
214 struct fb_info *info = vmw_priv->fb_info;
215 int stride = (info->fix.line_length / 4);
216 int *src = (int *)info->screen_base;
217 __le32 __iomem *vram_mem = par->bo_ptr;
218 unsigned long flags;
219 unsigned x, y, w, h;
220 int i, k;
221 struct {
222 uint32_t header;
223 SVGAFifoCmdUpdate body;
224 } *cmd;
225
226 spin_lock_irqsave(&par->dirty.lock, flags);
227 if (!par->dirty.active) {
228 spin_unlock_irqrestore(&par->dirty.lock, flags);
229 return;
230 }
231 x = par->dirty.x1;
232 y = par->dirty.y1;
233 w = min(par->dirty.x2, info->var.xres) - x;
234 h = min(par->dirty.y2, info->var.yres) - y;
235 par->dirty.x1 = par->dirty.x2 = 0;
236 par->dirty.y1 = par->dirty.y2 = 0;
237 spin_unlock_irqrestore(&par->dirty.lock, flags);
238
239 for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
240 for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
241 iowrite32(src[k], vram_mem + k);
242 }
243
244#if 0
245 DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
246#endif
247
248 cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
249 if (unlikely(cmd == NULL)) {
250 DRM_ERROR("Fifo reserve failed.\n");
251 return;
252 }
253
254 cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
255 cmd->body.x = cpu_to_le32(x);
256 cmd->body.y = cpu_to_le32(y);
257 cmd->body.width = cpu_to_le32(w);
258 cmd->body.height = cpu_to_le32(h);
259 vmw_fifo_commit(vmw_priv, sizeof(*cmd));
260}
261
262static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
263 unsigned x1, unsigned y1,
264 unsigned width, unsigned height)
265{
266 struct fb_info *info = par->vmw_priv->fb_info;
267 unsigned long flags;
268 unsigned x2 = x1 + width;
269 unsigned y2 = y1 + height;
270
271 spin_lock_irqsave(&par->dirty.lock, flags);
272 if (par->dirty.x1 == par->dirty.x2) {
273 par->dirty.x1 = x1;
274 par->dirty.y1 = y1;
275 par->dirty.x2 = x2;
276 par->dirty.y2 = y2;
277 /* if we are active start the dirty work
278 * we share the work with the defio system */
279 if (par->dirty.active)
280 schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
281 } else {
282 if (x1 < par->dirty.x1)
283 par->dirty.x1 = x1;
284 if (y1 < par->dirty.y1)
285 par->dirty.y1 = y1;
286 if (x2 > par->dirty.x2)
287 par->dirty.x2 = x2;
288 if (y2 > par->dirty.y2)
289 par->dirty.y2 = y2;
290 }
291 spin_unlock_irqrestore(&par->dirty.lock, flags);
292}
293
294static void vmw_deferred_io(struct fb_info *info,
295 struct list_head *pagelist)
296{
297 struct vmw_fb_par *par = info->par;
298 unsigned long start, end, min, max;
299 unsigned long flags;
300 struct page *page;
301 int y1, y2;
302
303 min = ULONG_MAX;
304 max = 0;
305 list_for_each_entry(page, pagelist, lru) {
306 start = page->index << PAGE_SHIFT;
307 end = start + PAGE_SIZE - 1;
308 min = min(min, start);
309 max = max(max, end);
310 }
311
312 if (min < max) {
313 y1 = min / info->fix.line_length;
314 y2 = (max / info->fix.line_length) + 1;
315
316 spin_lock_irqsave(&par->dirty.lock, flags);
317 par->dirty.x1 = 0;
318 par->dirty.y1 = y1;
319 par->dirty.x2 = info->var.xres;
320 par->dirty.y2 = y2;
321 spin_unlock_irqrestore(&par->dirty.lock, flags);
322 }
323
324 vmw_fb_dirty_flush(par);
325};
326
327struct fb_deferred_io vmw_defio = {
328 .delay = VMW_DIRTY_DELAY,
329 .deferred_io = vmw_deferred_io,
330};
331
332/*
333 * Draw code
334 */
335
336static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
337{
338 cfb_fillrect(info, rect);
339 vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
340 rect->width, rect->height);
341}
342
343static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
344{
345 cfb_copyarea(info, region);
346 vmw_fb_dirty_mark(info->par, region->dx, region->dy,
347 region->width, region->height);
348}
349
350static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
351{
352 cfb_imageblit(info, image);
353 vmw_fb_dirty_mark(info->par, image->dx, image->dy,
354 image->width, image->height);
355}
356
357/*
358 * Bring up code
359 */
360
361static struct fb_ops vmw_fb_ops = {
362 .owner = THIS_MODULE,
363 .fb_check_var = vmw_fb_check_var,
364 .fb_set_par = vmw_fb_set_par,
365 .fb_setcolreg = vmw_fb_setcolreg,
366 .fb_fillrect = vmw_fb_fillrect,
367 .fb_copyarea = vmw_fb_copyarea,
368 .fb_imageblit = vmw_fb_imageblit,
369 .fb_pan_display = vmw_fb_pan_display,
370 .fb_blank = vmw_fb_blank,
371};
372
373static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
374 size_t size, struct vmw_dma_buffer **out)
375{
376 struct vmw_dma_buffer *vmw_bo;
377 struct ttm_placement ne_placement = vmw_vram_ne_placement;
378 int ret;
379
380 ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
381
382 /* interuptable? */
383 ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
384 if (unlikely(ret != 0))
385 return ret;
386
387 vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
388 if (!vmw_bo)
389 goto err_unlock;
390
391 ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
392 &ne_placement,
393 false,
394 &vmw_dmabuf_bo_free);
395 if (unlikely(ret != 0))
396 goto err_unlock; /* init frees the buffer on failure */
397
398 *out = vmw_bo;
399
400 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
401
402 return 0;
403
404err_unlock:
405 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
406 return ret;
407}
408
409int vmw_fb_init(struct vmw_private *vmw_priv)
410{
411 struct device *device = &vmw_priv->dev->pdev->dev;
412 struct vmw_fb_par *par;
413 struct fb_info *info;
414 unsigned initial_width, initial_height;
415 unsigned fb_width, fb_height;
416 unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
417 int ret;
418
419 initial_width = 800;
420 initial_height = 600;
421
422 fb_bbp = 32;
423 fb_depth = 24;
424
425 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
426 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
427 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
428 } else {
429 fb_width = min(vmw_priv->fb_max_width, initial_width);
430 fb_height = min(vmw_priv->fb_max_height, initial_height);
431 }
432
433 initial_width = min(fb_width, initial_width);
434 initial_height = min(fb_height, initial_height);
435
436 vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
437 vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
438 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
439 vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
440 vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
441 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
442 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
443
444 fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
445 fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
446 fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
447
448 DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
449 DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
450 DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
451 DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
452 DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
453 DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
454 DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
455 DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
456 DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
457 DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
458 DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
459 DRM_DEBUG("fb_pitch %u\n", fb_pitch);
460 DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
461
462 info = framebuffer_alloc(sizeof(*par), device);
463 if (!info)
464 return -ENOMEM;
465
466 /*
467 * Par
468 */
469 vmw_priv->fb_info = info;
470 par = info->par;
471 par->vmw_priv = vmw_priv;
472 par->depth = fb_depth;
473 par->bpp = fb_bbp;
474 par->vmalloc = NULL;
475 par->max_width = fb_width;
476 par->max_height = fb_height;
477
478 /*
479 * Create buffers and alloc memory
480 */
481 par->vmalloc = vmalloc(fb_size);
482 if (unlikely(par->vmalloc == NULL)) {
483 ret = -ENOMEM;
484 goto err_free;
485 }
486
487 ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
488 if (unlikely(ret != 0))
489 goto err_free;
490
491 ret = ttm_bo_kmap(&par->vmw_bo->base,
492 0,
493 par->vmw_bo->base.num_pages,
494 &par->map);
495 if (unlikely(ret != 0))
496 goto err_unref;
497 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
498 par->bo_size = fb_size;
499
500 /*
501 * Fixed and var
502 */
503 strcpy(info->fix.id, "svgadrmfb");
504 info->fix.type = FB_TYPE_PACKED_PIXELS;
505 info->fix.visual = FB_VISUAL_TRUECOLOR;
506 info->fix.type_aux = 0;
507 info->fix.xpanstep = 1; /* doing it in hw */
508 info->fix.ypanstep = 1; /* doing it in hw */
509 info->fix.ywrapstep = 0;
510 info->fix.accel = FB_ACCEL_NONE;
511 info->fix.line_length = fb_pitch;
512
513 info->fix.smem_start = 0;
514 info->fix.smem_len = fb_size;
515
516 info->fix.mmio_start = 0;
517 info->fix.mmio_len = 0;
518
519 info->pseudo_palette = par->pseudo_palette;
520 info->screen_base = par->vmalloc;
521 info->screen_size = fb_size;
522
523 info->flags = FBINFO_DEFAULT;
524 info->fbops = &vmw_fb_ops;
525
526 /* 24 depth per default */
527 info->var.red.offset = 16;
528 info->var.green.offset = 8;
529 info->var.blue.offset = 0;
530 info->var.red.length = 8;
531 info->var.green.length = 8;
532 info->var.blue.length = 8;
533 info->var.transp.offset = 0;
534 info->var.transp.length = 0;
535
536 info->var.xres_virtual = fb_width;
537 info->var.yres_virtual = fb_height;
538 info->var.bits_per_pixel = par->bpp;
539 info->var.xoffset = 0;
540 info->var.yoffset = 0;
541 info->var.activate = FB_ACTIVATE_NOW;
542 info->var.height = -1;
543 info->var.width = -1;
544
545 info->var.xres = initial_width;
546 info->var.yres = initial_height;
547
548#if 0
549 info->pixmap.size = 64*1024;
550 info->pixmap.buf_align = 8;
551 info->pixmap.access_align = 32;
552 info->pixmap.flags = FB_PIXMAP_SYSTEM;
553 info->pixmap.scan_align = 1;
554#else
555 info->pixmap.size = 0;
556 info->pixmap.buf_align = 8;
557 info->pixmap.access_align = 32;
558 info->pixmap.flags = FB_PIXMAP_SYSTEM;
559 info->pixmap.scan_align = 1;
560#endif
561
562 info->aperture_base = vmw_priv->vram_start;
563 info->aperture_size = vmw_priv->vram_size;
564
565 /*
566 * Dirty & Deferred IO
567 */
568 par->dirty.x1 = par->dirty.x2 = 0;
569 par->dirty.y1 = par->dirty.y1 = 0;
570 par->dirty.active = true;
571 spin_lock_init(&par->dirty.lock);
572 info->fbdefio = &vmw_defio;
573 fb_deferred_io_init(info);
574
575 ret = register_framebuffer(info);
576 if (unlikely(ret != 0))
577 goto err_defio;
578
579 return 0;
580
581err_defio:
582 fb_deferred_io_cleanup(info);
583 ttm_bo_kunmap(&par->map);
584err_unref:
585 ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
586err_free:
587 vfree(par->vmalloc);
588 framebuffer_release(info);
589 vmw_priv->fb_info = NULL;
590
591 return ret;
592}
593
594int vmw_fb_close(struct vmw_private *vmw_priv)
595{
596 struct fb_info *info;
597 struct vmw_fb_par *par;
598 struct ttm_buffer_object *bo;
599
600 if (!vmw_priv->fb_info)
601 return 0;
602
603 info = vmw_priv->fb_info;
604 par = info->par;
605 bo = &par->vmw_bo->base;
606 par->vmw_bo = NULL;
607
608 /* ??? order */
609 fb_deferred_io_cleanup(info);
610 unregister_framebuffer(info);
611
612 ttm_bo_kunmap(&par->map);
613 ttm_bo_unref(&bo);
614
615 vfree(par->vmalloc);
616 framebuffer_release(info);
617
618 return 0;
619}
620
621int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
622 struct vmw_dma_buffer *vmw_bo)
623{
624 struct ttm_buffer_object *bo = &vmw_bo->base;
625 int ret = 0;
626
627 ret = ttm_bo_reserve(bo, false, false, false, 0);
628 if (unlikely(ret != 0))
629 return ret;
630
631 ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
632 ttm_bo_unreserve(bo);
633
634 return ret;
635}
636
637int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
638 struct vmw_dma_buffer *vmw_bo)
639{
640 struct ttm_buffer_object *bo = &vmw_bo->base;
641 struct ttm_placement ne_placement = vmw_vram_ne_placement;
642 int ret = 0;
643
644 ne_placement.lpfn = bo->num_pages;
645
646 /* interuptable? */
647 ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
648 if (unlikely(ret != 0))
649 return ret;
650
651 ret = ttm_bo_reserve(bo, false, false, false, 0);
652 if (unlikely(ret != 0))
653 goto err_unlock;
654
655 ret = ttm_bo_validate(bo, &ne_placement, false, false);
656 ttm_bo_unreserve(bo);
657err_unlock:
658 ttm_write_unlock(&vmw_priv->active_master->lock);
659
660 return ret;
661}
662
663int vmw_fb_off(struct vmw_private *vmw_priv)
664{
665 struct fb_info *info;
666 struct vmw_fb_par *par;
667 unsigned long flags;
668
669 if (!vmw_priv->fb_info)
670 return -EINVAL;
671
672 info = vmw_priv->fb_info;
673 par = info->par;
674
675 spin_lock_irqsave(&par->dirty.lock, flags);
676 par->dirty.active = false;
677 spin_unlock_irqrestore(&par->dirty.lock, flags);
678
679 flush_scheduled_work();
680
681 par->bo_ptr = NULL;
682 ttm_bo_kunmap(&par->map);
683
684 vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
685
686 return 0;
687}
688
689int vmw_fb_on(struct vmw_private *vmw_priv)
690{
691 struct fb_info *info;
692 struct vmw_fb_par *par;
693 unsigned long flags;
694 bool dummy;
695 int ret;
696
697 if (!vmw_priv->fb_info)
698 return -EINVAL;
699
700 info = vmw_priv->fb_info;
701 par = info->par;
702
703 /* we are already active */
704 if (par->bo_ptr != NULL)
705 return 0;
706
707 /* Make sure that all overlays are stoped when we take over */
708 vmw_overlay_stop_all(vmw_priv);
709
710 ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
711 if (unlikely(ret != 0)) {
712 DRM_ERROR("could not move buffer to start of VRAM\n");
713 goto err_no_buffer;
714 }
715
716 ret = ttm_bo_kmap(&par->vmw_bo->base,
717 0,
718 par->vmw_bo->base.num_pages,
719 &par->map);
720 BUG_ON(ret != 0);
721 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
722
723 spin_lock_irqsave(&par->dirty.lock, flags);
724 par->dirty.active = true;
725 spin_unlock_irqrestore(&par->dirty.lock, flags);
726
727err_no_buffer:
728 vmw_fb_set_par(info);
729
730 vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
731
732 /* If there already was stuff dirty we wont
733 * schedule a new work, so lets do it now */
734 schedule_delayed_work(&info->deferred_work, 0);
735
736 return 0;
737}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
new file mode 100644
index 000000000000..39d43a01d846
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -0,0 +1,538 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "drmP.h"
30#include "ttm/ttm_placement.h"
31
32bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
33{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t fifo_min, hwversion;
36
37 fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
38 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
39 return false;
40
41 hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
42 if (hwversion == 0)
43 return false;
44
45 if (hwversion < SVGA3D_HWVERSION_WS65_B1)
46 return false;
47
48 return true;
49}
50
51int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
52{
53 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
54 uint32_t max;
55 uint32_t min;
56 uint32_t dummy;
57 int ret;
58
59 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
60 fifo->static_buffer = vmalloc(fifo->static_buffer_size);
61 if (unlikely(fifo->static_buffer == NULL))
62 return -ENOMEM;
63
64 fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
65 fifo->last_data_size = 0;
66 fifo->last_buffer_add = false;
67 fifo->last_buffer = vmalloc(fifo->last_buffer_size);
68 if (unlikely(fifo->last_buffer == NULL)) {
69 ret = -ENOMEM;
70 goto out_err;
71 }
72
73 fifo->dynamic_buffer = NULL;
74 fifo->reserved_size = 0;
75 fifo->using_bounce_buffer = false;
76
77 mutex_init(&fifo->fifo_mutex);
78 init_rwsem(&fifo->rwsem);
79
80 /*
81 * Allow mapping the first page read-only to user-space.
82 */
83
84 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
85 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
86 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
87
88 mutex_lock(&dev_priv->hw_mutex);
89 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
90 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
91 vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
92
93 min = 4;
94 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
95 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
96 min <<= 2;
97
98 if (min < PAGE_SIZE)
99 min = PAGE_SIZE;
100
101 iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
102 iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
103 wmb();
104 iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
105 iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
106 iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
107 mb();
108
109 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
110 mutex_unlock(&dev_priv->hw_mutex);
111
112 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
113 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
114 fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
115
116 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
117 (unsigned int) max,
118 (unsigned int) min,
119 (unsigned int) fifo->capabilities);
120
121 atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
122 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
123
124 return vmw_fifo_send_fence(dev_priv, &dummy);
125out_err:
126 vfree(fifo->static_buffer);
127 fifo->static_buffer = NULL;
128 return ret;
129}
130
131void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
132{
133 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
134
135 mutex_lock(&dev_priv->hw_mutex);
136
137 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
138 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
139 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
140 }
141
142 mutex_unlock(&dev_priv->hw_mutex);
143}
144
145void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
146{
147 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
148
149 mutex_lock(&dev_priv->hw_mutex);
150
151 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
152 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
153
154 dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
155
156 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
157 dev_priv->config_done_state);
158 vmw_write(dev_priv, SVGA_REG_ENABLE,
159 dev_priv->enable_state);
160
161 mutex_unlock(&dev_priv->hw_mutex);
162
163 if (likely(fifo->last_buffer != NULL)) {
164 vfree(fifo->last_buffer);
165 fifo->last_buffer = NULL;
166 }
167
168 if (likely(fifo->static_buffer != NULL)) {
169 vfree(fifo->static_buffer);
170 fifo->static_buffer = NULL;
171 }
172
173 if (likely(fifo->dynamic_buffer != NULL)) {
174 vfree(fifo->dynamic_buffer);
175 fifo->dynamic_buffer = NULL;
176 }
177}
178
179static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
180{
181 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
182 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
183 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
184 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
185 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
186
187 return ((max - next_cmd) + (stop - min) <= bytes);
188}
189
190static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
191 uint32_t bytes, bool interruptible,
192 unsigned long timeout)
193{
194 int ret = 0;
195 unsigned long end_jiffies = jiffies + timeout;
196 DEFINE_WAIT(__wait);
197
198 DRM_INFO("Fifo wait noirq.\n");
199
200 for (;;) {
201 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
202 (interruptible) ?
203 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
204 if (!vmw_fifo_is_full(dev_priv, bytes))
205 break;
206 if (time_after_eq(jiffies, end_jiffies)) {
207 ret = -EBUSY;
208 DRM_ERROR("SVGA device lockup.\n");
209 break;
210 }
211 schedule_timeout(1);
212 if (interruptible && signal_pending(current)) {
213 ret = -ERESTARTSYS;
214 break;
215 }
216 }
217 finish_wait(&dev_priv->fifo_queue, &__wait);
218 wake_up_all(&dev_priv->fifo_queue);
219 DRM_INFO("Fifo noirq exit.\n");
220 return ret;
221}
222
223static int vmw_fifo_wait(struct vmw_private *dev_priv,
224 uint32_t bytes, bool interruptible,
225 unsigned long timeout)
226{
227 long ret = 1L;
228 unsigned long irq_flags;
229
230 if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
231 return 0;
232
233 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
234 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
235 return vmw_fifo_wait_noirq(dev_priv, bytes,
236 interruptible, timeout);
237
238 mutex_lock(&dev_priv->hw_mutex);
239 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
240 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
241 outl(SVGA_IRQFLAG_FIFO_PROGRESS,
242 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
243 vmw_write(dev_priv, SVGA_REG_IRQMASK,
244 vmw_read(dev_priv, SVGA_REG_IRQMASK) |
245 SVGA_IRQFLAG_FIFO_PROGRESS);
246 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
247 }
248 mutex_unlock(&dev_priv->hw_mutex);
249
250 if (interruptible)
251 ret = wait_event_interruptible_timeout
252 (dev_priv->fifo_queue,
253 !vmw_fifo_is_full(dev_priv, bytes), timeout);
254 else
255 ret = wait_event_timeout
256 (dev_priv->fifo_queue,
257 !vmw_fifo_is_full(dev_priv, bytes), timeout);
258
259 if (unlikely(ret == 0))
260 ret = -EBUSY;
261 else if (likely(ret > 0))
262 ret = 0;
263
264 mutex_lock(&dev_priv->hw_mutex);
265 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
266 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
267 vmw_write(dev_priv, SVGA_REG_IRQMASK,
268 vmw_read(dev_priv, SVGA_REG_IRQMASK) &
269 ~SVGA_IRQFLAG_FIFO_PROGRESS);
270 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
271 }
272 mutex_unlock(&dev_priv->hw_mutex);
273
274 return ret;
275}
276
277void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
278{
279 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
280 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
281 uint32_t max;
282 uint32_t min;
283 uint32_t next_cmd;
284 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
285 int ret;
286
287 mutex_lock(&fifo_state->fifo_mutex);
288 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
289 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
290 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
291
292 if (unlikely(bytes >= (max - min)))
293 goto out_err;
294
295 BUG_ON(fifo_state->reserved_size != 0);
296 BUG_ON(fifo_state->dynamic_buffer != NULL);
297
298 fifo_state->reserved_size = bytes;
299
300 while (1) {
301 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
302 bool need_bounce = false;
303 bool reserve_in_place = false;
304
305 if (next_cmd >= stop) {
306 if (likely((next_cmd + bytes < max ||
307 (next_cmd + bytes == max && stop > min))))
308 reserve_in_place = true;
309
310 else if (vmw_fifo_is_full(dev_priv, bytes)) {
311 ret = vmw_fifo_wait(dev_priv, bytes,
312 false, 3 * HZ);
313 if (unlikely(ret != 0))
314 goto out_err;
315 } else
316 need_bounce = true;
317
318 } else {
319
320 if (likely((next_cmd + bytes < stop)))
321 reserve_in_place = true;
322 else {
323 ret = vmw_fifo_wait(dev_priv, bytes,
324 false, 3 * HZ);
325 if (unlikely(ret != 0))
326 goto out_err;
327 }
328 }
329
330 if (reserve_in_place) {
331 if (reserveable || bytes <= sizeof(uint32_t)) {
332 fifo_state->using_bounce_buffer = false;
333
334 if (reserveable)
335 iowrite32(bytes, fifo_mem +
336 SVGA_FIFO_RESERVED);
337 return fifo_mem + (next_cmd >> 2);
338 } else {
339 need_bounce = true;
340 }
341 }
342
343 if (need_bounce) {
344 fifo_state->using_bounce_buffer = true;
345 if (bytes < fifo_state->static_buffer_size)
346 return fifo_state->static_buffer;
347 else {
348 fifo_state->dynamic_buffer = vmalloc(bytes);
349 return fifo_state->dynamic_buffer;
350 }
351 }
352 }
353out_err:
354 fifo_state->reserved_size = 0;
355 mutex_unlock(&fifo_state->fifo_mutex);
356 return NULL;
357}
358
359static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
360 __le32 __iomem *fifo_mem,
361 uint32_t next_cmd,
362 uint32_t max, uint32_t min, uint32_t bytes)
363{
364 uint32_t chunk_size = max - next_cmd;
365 uint32_t rest;
366 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
367 fifo_state->dynamic_buffer : fifo_state->static_buffer;
368
369 if (bytes < chunk_size)
370 chunk_size = bytes;
371
372 iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
373 mb();
374 memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
375 rest = bytes - chunk_size;
376 if (rest)
377 memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
378 rest);
379}
380
381static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
382 __le32 __iomem *fifo_mem,
383 uint32_t next_cmd,
384 uint32_t max, uint32_t min, uint32_t bytes)
385{
386 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
387 fifo_state->dynamic_buffer : fifo_state->static_buffer;
388
389 while (bytes > 0) {
390 iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
391 next_cmd += sizeof(uint32_t);
392 if (unlikely(next_cmd == max))
393 next_cmd = min;
394 mb();
395 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
396 mb();
397 bytes -= sizeof(uint32_t);
398 }
399}
400
401void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
402{
403 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
404 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
405 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
406 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
407 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
408 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
409
410 BUG_ON((bytes & 3) != 0);
411 BUG_ON(bytes > fifo_state->reserved_size);
412
413 fifo_state->reserved_size = 0;
414
415 if (fifo_state->using_bounce_buffer) {
416 if (reserveable)
417 vmw_fifo_res_copy(fifo_state, fifo_mem,
418 next_cmd, max, min, bytes);
419 else
420 vmw_fifo_slow_copy(fifo_state, fifo_mem,
421 next_cmd, max, min, bytes);
422
423 if (fifo_state->dynamic_buffer) {
424 vfree(fifo_state->dynamic_buffer);
425 fifo_state->dynamic_buffer = NULL;
426 }
427
428 }
429
430 down_write(&fifo_state->rwsem);
431 if (fifo_state->using_bounce_buffer || reserveable) {
432 next_cmd += bytes;
433 if (next_cmd >= max)
434 next_cmd -= max - min;
435 mb();
436 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
437 }
438
439 if (reserveable)
440 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
441 mb();
442 up_write(&fifo_state->rwsem);
443 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
444 mutex_unlock(&fifo_state->fifo_mutex);
445}
446
447int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
448{
449 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
450 struct svga_fifo_cmd_fence *cmd_fence;
451 void *fm;
452 int ret = 0;
453 uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
454
455 fm = vmw_fifo_reserve(dev_priv, bytes);
456 if (unlikely(fm == NULL)) {
457 *sequence = atomic_read(&dev_priv->fence_seq);
458 ret = -ENOMEM;
459 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
460 false, 3*HZ);
461 goto out_err;
462 }
463
464 do {
465 *sequence = atomic_add_return(1, &dev_priv->fence_seq);
466 } while (*sequence == 0);
467
468 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
469
470 /*
471 * Don't request hardware to send a fence. The
472 * waiting code in vmwgfx_irq.c will emulate this.
473 */
474
475 vmw_fifo_commit(dev_priv, 0);
476 return 0;
477 }
478
479 *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
480 cmd_fence = (struct svga_fifo_cmd_fence *)
481 ((unsigned long)fm + sizeof(__le32));
482
483 iowrite32(*sequence, &cmd_fence->fence);
484 fifo_state->last_buffer_add = true;
485 vmw_fifo_commit(dev_priv, bytes);
486 fifo_state->last_buffer_add = false;
487
488out_err:
489 return ret;
490}
491
492/**
493 * Map the first page of the FIFO read-only to user-space.
494 */
495
496static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
497{
498 int ret;
499 unsigned long address = (unsigned long)vmf->virtual_address;
500
501 if (address != vma->vm_start)
502 return VM_FAULT_SIGBUS;
503
504 ret = vm_insert_pfn(vma, address, vma->vm_pgoff);
505 if (likely(ret == -EBUSY || ret == 0))
506 return VM_FAULT_NOPAGE;
507 else if (ret == -ENOMEM)
508 return VM_FAULT_OOM;
509
510 return VM_FAULT_SIGBUS;
511}
512
513static struct vm_operations_struct vmw_fifo_vm_ops = {
514 .fault = vmw_fifo_vm_fault,
515 .open = NULL,
516 .close = NULL
517};
518
519int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma)
520{
521 struct drm_file *file_priv;
522 struct vmw_private *dev_priv;
523
524 file_priv = (struct drm_file *)filp->private_data;
525 dev_priv = vmw_priv(file_priv->minor->dev);
526
527 if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||
528 (vma->vm_end - vma->vm_start) != PAGE_SIZE)
529 return -EINVAL;
530
531 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
532 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED;
533 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
534 vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED,
535 vma->vm_page_prot);
536 vma->vm_ops = &vmw_fifo_vm_ops;
537 return 0;
538}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
new file mode 100644
index 000000000000..5f8908a5d7fd
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -0,0 +1,213 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "drmP.h"
30#include "ttm/ttm_bo_driver.h"
31
32/**
33 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
34 * the number of used descriptors.
35 */
36
37static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
38 struct page *pages[],
39 unsigned long num_pages)
40{
41 struct page *page, *next;
42 struct svga_guest_mem_descriptor *page_virtual = NULL;
43 struct svga_guest_mem_descriptor *desc_virtual = NULL;
44 unsigned int desc_per_page;
45 unsigned long prev_pfn;
46 unsigned long pfn;
47 int ret;
48
49 desc_per_page = PAGE_SIZE /
50 sizeof(struct svga_guest_mem_descriptor) - 1;
51
52 while (likely(num_pages != 0)) {
53 page = alloc_page(__GFP_HIGHMEM);
54 if (unlikely(page == NULL)) {
55 ret = -ENOMEM;
56 goto out_err;
57 }
58
59 list_add_tail(&page->lru, desc_pages);
60
61 /*
62 * Point previous page terminating descriptor to this
63 * page before unmapping it.
64 */
65
66 if (likely(page_virtual != NULL)) {
67 desc_virtual->ppn = page_to_pfn(page);
68 kunmap_atomic(page_virtual, KM_USER0);
69 }
70
71 page_virtual = kmap_atomic(page, KM_USER0);
72 desc_virtual = page_virtual - 1;
73 prev_pfn = ~(0UL);
74
75 while (likely(num_pages != 0)) {
76 pfn = page_to_pfn(*pages);
77
78 if (pfn != prev_pfn + 1) {
79
80 if (desc_virtual - page_virtual ==
81 desc_per_page - 1)
82 break;
83
84 (++desc_virtual)->ppn = cpu_to_le32(pfn);
85 desc_virtual->num_pages = cpu_to_le32(1);
86 } else {
87 uint32_t tmp =
88 le32_to_cpu(desc_virtual->num_pages);
89 desc_virtual->num_pages = cpu_to_le32(tmp + 1);
90 }
91 prev_pfn = pfn;
92 --num_pages;
93 ++pages;
94 }
95
96 (++desc_virtual)->ppn = cpu_to_le32(0);
97 desc_virtual->num_pages = cpu_to_le32(0);
98 }
99
100 if (likely(page_virtual != NULL))
101 kunmap_atomic(page_virtual, KM_USER0);
102
103 return 0;
104out_err:
105 list_for_each_entry_safe(page, next, desc_pages, lru) {
106 list_del_init(&page->lru);
107 __free_page(page);
108 }
109 return ret;
110}
111
112static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
113{
114 struct page *page, *next;
115
116 list_for_each_entry_safe(page, next, desc_pages, lru) {
117 list_del_init(&page->lru);
118 __free_page(page);
119 }
120}
121
122static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
123 int gmr_id, struct list_head *desc_pages)
124{
125 struct page *page;
126
127 if (unlikely(list_empty(desc_pages)))
128 return;
129
130 page = list_entry(desc_pages->next, struct page, lru);
131
132 mutex_lock(&dev_priv->hw_mutex);
133
134 vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
135 wmb();
136 vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
137 mb();
138
139 mutex_unlock(&dev_priv->hw_mutex);
140
141}
142
143/**
144 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
145 * the number of used descriptors.
146 */
147
148static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
149 unsigned long num_pages)
150{
151 unsigned long prev_pfn = ~(0UL);
152 unsigned long pfn;
153 unsigned long descriptors = 0;
154
155 while (num_pages--) {
156 pfn = page_to_pfn(*pages++);
157 if (prev_pfn + 1 != pfn)
158 ++descriptors;
159 prev_pfn = pfn;
160 }
161
162 return descriptors;
163}
164
165int vmw_gmr_bind(struct vmw_private *dev_priv,
166 struct ttm_buffer_object *bo)
167{
168 struct ttm_tt *ttm = bo->ttm;
169 unsigned long descriptors;
170 int ret;
171 uint32_t id;
172 struct list_head desc_pages;
173
174 if (!(dev_priv->capabilities & SVGA_CAP_GMR))
175 return -EINVAL;
176
177 ret = ttm_tt_populate(ttm);
178 if (unlikely(ret != 0))
179 return ret;
180
181 descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
182 if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
183 return -EINVAL;
184
185 INIT_LIST_HEAD(&desc_pages);
186 ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
187 ttm->num_pages);
188 if (unlikely(ret != 0))
189 return ret;
190
191 ret = vmw_gmr_id_alloc(dev_priv, &id);
192 if (unlikely(ret != 0))
193 goto out_no_id;
194
195 vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages);
196 vmw_gmr_free_descriptors(&desc_pages);
197 vmw_dmabuf_set_gmr(bo, id);
198 return 0;
199
200out_no_id:
201 vmw_gmr_free_descriptors(&desc_pages);
202 return ret;
203}
204
205void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
206{
207 mutex_lock(&dev_priv->hw_mutex);
208 vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
209 wmb();
210 vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
211 mb();
212 mutex_unlock(&dev_priv->hw_mutex);
213}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
new file mode 100644
index 000000000000..1c7a316454d8
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -0,0 +1,87 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_drm.h"
30
31int vmw_getparam_ioctl(struct drm_device *dev, void *data,
32 struct drm_file *file_priv)
33{
34 struct vmw_private *dev_priv = vmw_priv(dev);
35 struct drm_vmw_getparam_arg *param =
36 (struct drm_vmw_getparam_arg *)data;
37
38 switch (param->param) {
39 case DRM_VMW_PARAM_NUM_STREAMS:
40 param->value = vmw_overlay_num_overlays(dev_priv);
41 break;
42 case DRM_VMW_PARAM_NUM_FREE_STREAMS:
43 param->value = vmw_overlay_num_free_overlays(dev_priv);
44 break;
45 case DRM_VMW_PARAM_3D:
46 param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
47 break;
48 case DRM_VMW_PARAM_FIFO_OFFSET:
49 param->value = dev_priv->mmio_start;
50 break;
51 case DRM_VMW_PARAM_HW_CAPS:
52 param->value = dev_priv->capabilities;
53 break;
54 case DRM_VMW_PARAM_FIFO_CAPS:
55 param->value = dev_priv->fifo.capabilities;
56 break;
57 default:
58 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
59 param->param);
60 return -EINVAL;
61 }
62
63 return 0;
64}
65
66int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
67 struct drm_file *file_priv)
68{
69 struct vmw_private *dev_priv = vmw_priv(dev);
70 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
71 struct drm_vmw_fifo_debug_arg *arg =
72 (struct drm_vmw_fifo_debug_arg *)data;
73 __le32 __user *buffer = (__le32 __user *)
74 (unsigned long)arg->debug_buffer;
75
76 if (unlikely(fifo_state->last_buffer == NULL))
77 return -EINVAL;
78
79 if (arg->debug_buffer_size < fifo_state->last_data_size) {
80 arg->used_size = arg->debug_buffer_size;
81 arg->did_not_fit = 1;
82 } else {
83 arg->used_size = fifo_state->last_data_size;
84 arg->did_not_fit = 0;
85 }
86 return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size);
87}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
new file mode 100644
index 000000000000..4d7cb5393860
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -0,0 +1,286 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "drmP.h"
29#include "vmwgfx_drv.h"
30
31#define VMW_FENCE_WRAP (1 << 24)
32
33irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
34{
35 struct drm_device *dev = (struct drm_device *)arg;
36 struct vmw_private *dev_priv = vmw_priv(dev);
37 uint32_t status;
38
39 spin_lock(&dev_priv->irq_lock);
40 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
41 spin_unlock(&dev_priv->irq_lock);
42
43 if (status & SVGA_IRQFLAG_ANY_FENCE)
44 wake_up_all(&dev_priv->fence_queue);
45 if (status & SVGA_IRQFLAG_FIFO_PROGRESS)
46 wake_up_all(&dev_priv->fifo_queue);
47
48 if (likely(status)) {
49 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
50 return IRQ_HANDLED;
51 }
52
53 return IRQ_NONE;
54}
55
56static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
57{
58 uint32_t busy;
59
60 mutex_lock(&dev_priv->hw_mutex);
61 busy = vmw_read(dev_priv, SVGA_REG_BUSY);
62 mutex_unlock(&dev_priv->hw_mutex);
63
64 return (busy == 0);
65}
66
67
68bool vmw_fence_signaled(struct vmw_private *dev_priv,
69 uint32_t sequence)
70{
71 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
72 struct vmw_fifo_state *fifo_state;
73 bool ret;
74
75 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
76 return true;
77
78 dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
79 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
80 return true;
81
82 fifo_state = &dev_priv->fifo;
83 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
84 vmw_fifo_idle(dev_priv, sequence))
85 return true;
86
87 /**
88 * Then check if the sequence is higher than what we've actually
89 * emitted. Then the fence is stale and signaled.
90 */
91
92 ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
93 > VMW_FENCE_WRAP);
94
95 return ret;
96}
97
98int vmw_fallback_wait(struct vmw_private *dev_priv,
99 bool lazy,
100 bool fifo_idle,
101 uint32_t sequence,
102 bool interruptible,
103 unsigned long timeout)
104{
105 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
106
107 uint32_t count = 0;
108 uint32_t signal_seq;
109 int ret;
110 unsigned long end_jiffies = jiffies + timeout;
111 bool (*wait_condition)(struct vmw_private *, uint32_t);
112 DEFINE_WAIT(__wait);
113
114 wait_condition = (fifo_idle) ? &vmw_fifo_idle :
115 &vmw_fence_signaled;
116
117 /**
118 * Block command submission while waiting for idle.
119 */
120
121 if (fifo_idle)
122 down_read(&fifo_state->rwsem);
123 signal_seq = atomic_read(&dev_priv->fence_seq);
124 ret = 0;
125
126 for (;;) {
127 prepare_to_wait(&dev_priv->fence_queue, &__wait,
128 (interruptible) ?
129 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
130 if (wait_condition(dev_priv, sequence))
131 break;
132 if (time_after_eq(jiffies, end_jiffies)) {
133 DRM_ERROR("SVGA device lockup.\n");
134 break;
135 }
136 if (lazy)
137 schedule_timeout(1);
138 else if ((++count & 0x0F) == 0) {
139 /**
140 * FIXME: Use schedule_hr_timeout here for
141 * newer kernels and lower CPU utilization.
142 */
143
144 __set_current_state(TASK_RUNNING);
145 schedule();
146 __set_current_state((interruptible) ?
147 TASK_INTERRUPTIBLE :
148 TASK_UNINTERRUPTIBLE);
149 }
150 if (interruptible && signal_pending(current)) {
151 ret = -ERESTARTSYS;
152 break;
153 }
154 }
155 finish_wait(&dev_priv->fence_queue, &__wait);
156 if (ret == 0 && fifo_idle) {
157 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
158 iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
159 }
160 wake_up_all(&dev_priv->fence_queue);
161 if (fifo_idle)
162 up_read(&fifo_state->rwsem);
163
164 return ret;
165}
166
167int vmw_wait_fence(struct vmw_private *dev_priv,
168 bool lazy, uint32_t sequence,
169 bool interruptible, unsigned long timeout)
170{
171 long ret;
172 unsigned long irq_flags;
173 struct vmw_fifo_state *fifo = &dev_priv->fifo;
174
175 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
176 return 0;
177
178 if (likely(vmw_fence_signaled(dev_priv, sequence)))
179 return 0;
180
181 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
182
183 if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
184 return vmw_fallback_wait(dev_priv, lazy, true, sequence,
185 interruptible, timeout);
186
187 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
188 return vmw_fallback_wait(dev_priv, lazy, false, sequence,
189 interruptible, timeout);
190
191 mutex_lock(&dev_priv->hw_mutex);
192 if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) {
193 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
194 outl(SVGA_IRQFLAG_ANY_FENCE,
195 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
196 vmw_write(dev_priv, SVGA_REG_IRQMASK,
197 vmw_read(dev_priv, SVGA_REG_IRQMASK) |
198 SVGA_IRQFLAG_ANY_FENCE);
199 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
200 }
201 mutex_unlock(&dev_priv->hw_mutex);
202
203 if (interruptible)
204 ret = wait_event_interruptible_timeout
205 (dev_priv->fence_queue,
206 vmw_fence_signaled(dev_priv, sequence),
207 timeout);
208 else
209 ret = wait_event_timeout
210 (dev_priv->fence_queue,
211 vmw_fence_signaled(dev_priv, sequence),
212 timeout);
213
214 if (unlikely(ret == 0))
215 ret = -EBUSY;
216 else if (likely(ret > 0))
217 ret = 0;
218
219 mutex_lock(&dev_priv->hw_mutex);
220 if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) {
221 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
222 vmw_write(dev_priv, SVGA_REG_IRQMASK,
223 vmw_read(dev_priv, SVGA_REG_IRQMASK) &
224 ~SVGA_IRQFLAG_ANY_FENCE);
225 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
226 }
227 mutex_unlock(&dev_priv->hw_mutex);
228
229 return ret;
230}
231
232void vmw_irq_preinstall(struct drm_device *dev)
233{
234 struct vmw_private *dev_priv = vmw_priv(dev);
235 uint32_t status;
236
237 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
238 return;
239
240 spin_lock_init(&dev_priv->irq_lock);
241 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
242 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
243}
244
245int vmw_irq_postinstall(struct drm_device *dev)
246{
247 return 0;
248}
249
250void vmw_irq_uninstall(struct drm_device *dev)
251{
252 struct vmw_private *dev_priv = vmw_priv(dev);
253 uint32_t status;
254
255 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
256 return;
257
258 mutex_lock(&dev_priv->hw_mutex);
259 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
260 mutex_unlock(&dev_priv->hw_mutex);
261
262 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
263 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
264}
265
266#define VMW_FENCE_WAIT_TIMEOUT 3*HZ;
267
268int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
269 struct drm_file *file_priv)
270{
271 struct drm_vmw_fence_wait_arg *arg =
272 (struct drm_vmw_fence_wait_arg *)data;
273 unsigned long timeout;
274
275 if (!arg->cookie_valid) {
276 arg->cookie_valid = 1;
277 arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT;
278 }
279
280 timeout = jiffies;
281 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie))
282 return -EBUSY;
283
284 timeout = (unsigned long)arg->kernel_cookie - timeout;
285 return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout);
286}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
new file mode 100644
index 000000000000..31f9afed0a63
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -0,0 +1,880 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_kms.h"
29
30/* Might need a hrtimer here? */
31#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
32
33
34void vmw_display_unit_cleanup(struct vmw_display_unit *du)
35{
36 if (du->cursor_surface)
37 vmw_surface_unreference(&du->cursor_surface);
38 if (du->cursor_dmabuf)
39 vmw_dmabuf_unreference(&du->cursor_dmabuf);
40 drm_crtc_cleanup(&du->crtc);
41 drm_encoder_cleanup(&du->encoder);
42 drm_connector_cleanup(&du->connector);
43}
44
45/*
46 * Display Unit Cursor functions
47 */
48
49int vmw_cursor_update_image(struct vmw_private *dev_priv,
50 u32 *image, u32 width, u32 height,
51 u32 hotspotX, u32 hotspotY)
52{
53 struct {
54 u32 cmd;
55 SVGAFifoCmdDefineAlphaCursor cursor;
56 } *cmd;
57 u32 image_size = width * height * 4;
58 u32 cmd_size = sizeof(*cmd) + image_size;
59
60 if (!image)
61 return -EINVAL;
62
63 cmd = vmw_fifo_reserve(dev_priv, cmd_size);
64 if (unlikely(cmd == NULL)) {
65 DRM_ERROR("Fifo reserve failed.\n");
66 return -ENOMEM;
67 }
68
69 memset(cmd, 0, sizeof(*cmd));
70
71 memcpy(&cmd[1], image, image_size);
72
73 cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
74 cmd->cursor.id = cpu_to_le32(0);
75 cmd->cursor.width = cpu_to_le32(width);
76 cmd->cursor.height = cpu_to_le32(height);
77 cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
78 cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
79
80 vmw_fifo_commit(dev_priv, cmd_size);
81
82 return 0;
83}
84
85void vmw_cursor_update_position(struct vmw_private *dev_priv,
86 bool show, int x, int y)
87{
88 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
89 uint32_t count;
90
91 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
92 iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
93 iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
94 count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
95 iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
96}
97
98int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
99 uint32_t handle, uint32_t width, uint32_t height)
100{
101 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
102 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
103 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
104 struct vmw_surface *surface = NULL;
105 struct vmw_dma_buffer *dmabuf = NULL;
106 int ret;
107
108 if (handle) {
109 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
110 handle, &surface);
111 if (!ret) {
112 if (!surface->snooper.image) {
113 DRM_ERROR("surface not suitable for cursor\n");
114 return -EINVAL;
115 }
116 } else {
117 ret = vmw_user_dmabuf_lookup(tfile,
118 handle, &dmabuf);
119 if (ret) {
120 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
121 return -EINVAL;
122 }
123 }
124 }
125
126 /* takedown old cursor */
127 if (du->cursor_surface) {
128 du->cursor_surface->snooper.crtc = NULL;
129 vmw_surface_unreference(&du->cursor_surface);
130 }
131 if (du->cursor_dmabuf)
132 vmw_dmabuf_unreference(&du->cursor_dmabuf);
133
134 /* setup new image */
135 if (surface) {
136 /* vmw_user_surface_lookup takes one reference */
137 du->cursor_surface = surface;
138
139 du->cursor_surface->snooper.crtc = crtc;
140 du->cursor_age = du->cursor_surface->snooper.age;
141 vmw_cursor_update_image(dev_priv, surface->snooper.image,
142 64, 64, du->hotspot_x, du->hotspot_y);
143 } else if (dmabuf) {
144 struct ttm_bo_kmap_obj map;
145 unsigned long kmap_offset;
146 unsigned long kmap_num;
147 void *virtual;
148 bool dummy;
149
150 /* vmw_user_surface_lookup takes one reference */
151 du->cursor_dmabuf = dmabuf;
152
153 kmap_offset = 0;
154 kmap_num = (64*64*4) >> PAGE_SHIFT;
155
156 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
157 if (unlikely(ret != 0)) {
158 DRM_ERROR("reserve failed\n");
159 return -EINVAL;
160 }
161
162 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
163 if (unlikely(ret != 0))
164 goto err_unreserve;
165
166 virtual = ttm_kmap_obj_virtual(&map, &dummy);
167 vmw_cursor_update_image(dev_priv, virtual, 64, 64,
168 du->hotspot_x, du->hotspot_y);
169
170 ttm_bo_kunmap(&map);
171err_unreserve:
172 ttm_bo_unreserve(&dmabuf->base);
173
174 } else {
175 vmw_cursor_update_position(dev_priv, false, 0, 0);
176 return 0;
177 }
178
179 vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
180
181 return 0;
182}
183
184int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
185{
186 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
187 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
188 bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
189
190 du->cursor_x = x + crtc->x;
191 du->cursor_y = y + crtc->y;
192
193 vmw_cursor_update_position(dev_priv, shown,
194 du->cursor_x, du->cursor_y);
195
196 return 0;
197}
198
199void vmw_kms_cursor_snoop(struct vmw_surface *srf,
200 struct ttm_object_file *tfile,
201 struct ttm_buffer_object *bo,
202 SVGA3dCmdHeader *header)
203{
204 struct ttm_bo_kmap_obj map;
205 unsigned long kmap_offset;
206 unsigned long kmap_num;
207 SVGA3dCopyBox *box;
208 unsigned box_count;
209 void *virtual;
210 bool dummy;
211 struct vmw_dma_cmd {
212 SVGA3dCmdHeader header;
213 SVGA3dCmdSurfaceDMA dma;
214 } *cmd;
215 int ret;
216
217 cmd = container_of(header, struct vmw_dma_cmd, header);
218
219 /* No snooper installed */
220 if (!srf->snooper.image)
221 return;
222
223 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
224 DRM_ERROR("face and mipmap for cursors should never != 0\n");
225 return;
226 }
227
228 if (cmd->header.size < 64) {
229 DRM_ERROR("at least one full copy box must be given\n");
230 return;
231 }
232
233 box = (SVGA3dCopyBox *)&cmd[1];
234 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
235 sizeof(SVGA3dCopyBox);
236
237 if (cmd->dma.guest.pitch != (64 * 4) ||
238 cmd->dma.guest.ptr.offset % PAGE_SIZE ||
239 box->x != 0 || box->y != 0 || box->z != 0 ||
240 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
241 box->w != 64 || box->h != 64 || box->d != 1 ||
242 box_count != 1) {
243 /* TODO handle none page aligned offsets */
244 /* TODO handle partial uploads and pitch != 256 */
245 /* TODO handle more then one copy (size != 64) */
246 DRM_ERROR("lazy programer, cant handle wierd stuff\n");
247 return;
248 }
249
250 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
251 kmap_num = (64*64*4) >> PAGE_SHIFT;
252
253 ret = ttm_bo_reserve(bo, true, false, false, 0);
254 if (unlikely(ret != 0)) {
255 DRM_ERROR("reserve failed\n");
256 return;
257 }
258
259 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
260 if (unlikely(ret != 0))
261 goto err_unreserve;
262
263 virtual = ttm_kmap_obj_virtual(&map, &dummy);
264
265 memcpy(srf->snooper.image, virtual, 64*64*4);
266 srf->snooper.age++;
267
268 /* we can't call this function from this function since execbuf has
269 * reserved fifo space.
270 *
271 * if (srf->snooper.crtc)
272 * vmw_ldu_crtc_cursor_update_image(dev_priv,
273 * srf->snooper.image, 64, 64,
274 * du->hotspot_x, du->hotspot_y);
275 */
276
277 ttm_bo_kunmap(&map);
278err_unreserve:
279 ttm_bo_unreserve(bo);
280}
281
282void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
283{
284 struct drm_device *dev = dev_priv->dev;
285 struct vmw_display_unit *du;
286 struct drm_crtc *crtc;
287
288 mutex_lock(&dev->mode_config.mutex);
289
290 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
291 du = vmw_crtc_to_du(crtc);
292 if (!du->cursor_surface ||
293 du->cursor_age == du->cursor_surface->snooper.age)
294 continue;
295
296 du->cursor_age = du->cursor_surface->snooper.age;
297 vmw_cursor_update_image(dev_priv,
298 du->cursor_surface->snooper.image,
299 64, 64, du->hotspot_x, du->hotspot_y);
300 }
301
302 mutex_unlock(&dev->mode_config.mutex);
303}
304
305/*
306 * Generic framebuffer code
307 */
308
309int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
310 struct drm_file *file_priv,
311 unsigned int *handle)
312{
313 if (handle)
314 handle = 0;
315
316 return 0;
317}
318
319/*
320 * Surface framebuffer code
321 */
322
323#define vmw_framebuffer_to_vfbs(x) \
324 container_of(x, struct vmw_framebuffer_surface, base.base)
325
326struct vmw_framebuffer_surface {
327 struct vmw_framebuffer base;
328 struct vmw_surface *surface;
329 struct delayed_work d_work;
330 struct mutex work_lock;
331 bool present_fs;
332};
333
334void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
335{
336 struct vmw_framebuffer_surface *vfb =
337 vmw_framebuffer_to_vfbs(framebuffer);
338
339 cancel_delayed_work_sync(&vfb->d_work);
340 drm_framebuffer_cleanup(framebuffer);
341 vmw_surface_unreference(&vfb->surface);
342
343 kfree(framebuffer);
344}
345
346static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
347{
348 struct delayed_work *d_work =
349 container_of(work, struct delayed_work, work);
350 struct vmw_framebuffer_surface *vfbs =
351 container_of(d_work, struct vmw_framebuffer_surface, d_work);
352 struct vmw_surface *surf = vfbs->surface;
353 struct drm_framebuffer *framebuffer = &vfbs->base.base;
354 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
355
356 struct {
357 SVGA3dCmdHeader header;
358 SVGA3dCmdPresent body;
359 SVGA3dCopyRect cr;
360 } *cmd;
361
362 mutex_lock(&vfbs->work_lock);
363 if (!vfbs->present_fs)
364 goto out_unlock;
365
366 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
367 if (unlikely(cmd == NULL))
368 goto out_resched;
369
370 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
371 cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
372 cmd->body.sid = cpu_to_le32(surf->res.id);
373 cmd->cr.x = cpu_to_le32(0);
374 cmd->cr.y = cpu_to_le32(0);
375 cmd->cr.srcx = cmd->cr.x;
376 cmd->cr.srcy = cmd->cr.y;
377 cmd->cr.w = cpu_to_le32(framebuffer->width);
378 cmd->cr.h = cpu_to_le32(framebuffer->height);
379 vfbs->present_fs = false;
380 vmw_fifo_commit(dev_priv, sizeof(*cmd));
381out_resched:
382 /**
383 * Will not re-add if already pending.
384 */
385 schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
386out_unlock:
387 mutex_unlock(&vfbs->work_lock);
388}
389
390
391int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
392 unsigned flags, unsigned color,
393 struct drm_clip_rect *clips,
394 unsigned num_clips)
395{
396 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
397 struct vmw_framebuffer_surface *vfbs =
398 vmw_framebuffer_to_vfbs(framebuffer);
399 struct vmw_surface *surf = vfbs->surface;
400 struct drm_clip_rect norect;
401 SVGA3dCopyRect *cr;
402 int i, inc = 1;
403
404 struct {
405 SVGA3dCmdHeader header;
406 SVGA3dCmdPresent body;
407 SVGA3dCopyRect cr;
408 } *cmd;
409
410 if (!num_clips ||
411 !(dev_priv->fifo.capabilities &
412 SVGA_FIFO_CAP_SCREEN_OBJECT)) {
413 int ret;
414
415 mutex_lock(&vfbs->work_lock);
416 vfbs->present_fs = true;
417 ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
418 mutex_unlock(&vfbs->work_lock);
419 if (ret) {
420 /**
421 * No work pending, Force immediate present.
422 */
423 vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
424 }
425 return 0;
426 }
427
428 if (!num_clips) {
429 num_clips = 1;
430 clips = &norect;
431 norect.x1 = norect.y1 = 0;
432 norect.x2 = framebuffer->width;
433 norect.y2 = framebuffer->height;
434 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
435 num_clips /= 2;
436 inc = 2; /* skip source rects */
437 }
438
439 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
440 if (unlikely(cmd == NULL)) {
441 DRM_ERROR("Fifo reserve failed.\n");
442 return -ENOMEM;
443 }
444
445 memset(cmd, 0, sizeof(*cmd));
446
447 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
448 cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
449 cmd->body.sid = cpu_to_le32(surf->res.id);
450
451 for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
452 cr->x = cpu_to_le16(clips->x1);
453 cr->y = cpu_to_le16(clips->y1);
454 cr->srcx = cr->x;
455 cr->srcy = cr->y;
456 cr->w = cpu_to_le16(clips->x2 - clips->x1);
457 cr->h = cpu_to_le16(clips->y2 - clips->y1);
458 }
459
460 vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
461
462 return 0;
463}
464
465static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
466 .destroy = vmw_framebuffer_surface_destroy,
467 .dirty = vmw_framebuffer_surface_dirty,
468 .create_handle = vmw_framebuffer_create_handle,
469};
470
471int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
472 struct vmw_surface *surface,
473 struct vmw_framebuffer **out,
474 unsigned width, unsigned height)
475
476{
477 struct drm_device *dev = dev_priv->dev;
478 struct vmw_framebuffer_surface *vfbs;
479 int ret;
480
481 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
482 if (!vfbs) {
483 ret = -ENOMEM;
484 goto out_err1;
485 }
486
487 ret = drm_framebuffer_init(dev, &vfbs->base.base,
488 &vmw_framebuffer_surface_funcs);
489 if (ret)
490 goto out_err2;
491
492 if (!vmw_surface_reference(surface)) {
493 DRM_ERROR("failed to reference surface %p\n", surface);
494 goto out_err3;
495 }
496
497 /* XXX get the first 3 from the surface info */
498 vfbs->base.base.bits_per_pixel = 32;
499 vfbs->base.base.pitch = width * 32 / 4;
500 vfbs->base.base.depth = 24;
501 vfbs->base.base.width = width;
502 vfbs->base.base.height = height;
503 vfbs->base.pin = NULL;
504 vfbs->base.unpin = NULL;
505 vfbs->surface = surface;
506 mutex_init(&vfbs->work_lock);
507 INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
508 *out = &vfbs->base;
509
510 return 0;
511
512out_err3:
513 drm_framebuffer_cleanup(&vfbs->base.base);
514out_err2:
515 kfree(vfbs);
516out_err1:
517 return ret;
518}
519
520/*
521 * Dmabuf framebuffer code
522 */
523
524#define vmw_framebuffer_to_vfbd(x) \
525 container_of(x, struct vmw_framebuffer_dmabuf, base.base)
526
527struct vmw_framebuffer_dmabuf {
528 struct vmw_framebuffer base;
529 struct vmw_dma_buffer *buffer;
530};
531
532void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
533{
534 struct vmw_framebuffer_dmabuf *vfbd =
535 vmw_framebuffer_to_vfbd(framebuffer);
536
537 drm_framebuffer_cleanup(framebuffer);
538 vmw_dmabuf_unreference(&vfbd->buffer);
539
540 kfree(vfbd);
541}
542
543int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
544 unsigned flags, unsigned color,
545 struct drm_clip_rect *clips,
546 unsigned num_clips)
547{
548 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
549 struct drm_clip_rect norect;
550 struct {
551 uint32_t header;
552 SVGAFifoCmdUpdate body;
553 } *cmd;
554 int i, increment = 1;
555
556 if (!num_clips) {
557 num_clips = 1;
558 clips = &norect;
559 norect.x1 = norect.y1 = 0;
560 norect.x2 = framebuffer->width;
561 norect.y2 = framebuffer->height;
562 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
563 num_clips /= 2;
564 increment = 2;
565 }
566
567 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
568 if (unlikely(cmd == NULL)) {
569 DRM_ERROR("Fifo reserve failed.\n");
570 return -ENOMEM;
571 }
572
573 for (i = 0; i < num_clips; i++, clips += increment) {
574 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
575 cmd[i].body.x = cpu_to_le32(clips->x1);
576 cmd[i].body.y = cpu_to_le32(clips->y1);
577 cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
578 cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
579 }
580
581 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
582
583 return 0;
584}
585
586static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
587 .destroy = vmw_framebuffer_dmabuf_destroy,
588 .dirty = vmw_framebuffer_dmabuf_dirty,
589 .create_handle = vmw_framebuffer_create_handle,
590};
591
592static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
593{
594 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
595 struct vmw_framebuffer_dmabuf *vfbd =
596 vmw_framebuffer_to_vfbd(&vfb->base);
597 int ret;
598
599 vmw_overlay_pause_all(dev_priv);
600
601 ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
602
603 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
604 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
605 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
606 vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
607 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
608 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
609 vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
610 vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
611 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
612
613 vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
614 vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
615 vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
616 vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
617 vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
618 vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
619 vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
620 vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
621 } else
622 WARN_ON(true);
623
624 vmw_overlay_resume_all(dev_priv);
625
626 return 0;
627}
628
629static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
630{
631 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
632 struct vmw_framebuffer_dmabuf *vfbd =
633 vmw_framebuffer_to_vfbd(&vfb->base);
634
635 if (!vfbd->buffer) {
636 WARN_ON(!vfbd->buffer);
637 return 0;
638 }
639
640 return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
641}
642
643int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
644 struct vmw_dma_buffer *dmabuf,
645 struct vmw_framebuffer **out,
646 unsigned width, unsigned height)
647
648{
649 struct drm_device *dev = dev_priv->dev;
650 struct vmw_framebuffer_dmabuf *vfbd;
651 int ret;
652
653 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
654 if (!vfbd) {
655 ret = -ENOMEM;
656 goto out_err1;
657 }
658
659 ret = drm_framebuffer_init(dev, &vfbd->base.base,
660 &vmw_framebuffer_dmabuf_funcs);
661 if (ret)
662 goto out_err2;
663
664 if (!vmw_dmabuf_reference(dmabuf)) {
665 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
666 goto out_err3;
667 }
668
669 /* XXX get the first 3 from the surface info */
670 vfbd->base.base.bits_per_pixel = 32;
671 vfbd->base.base.pitch = width * 32 / 4;
672 vfbd->base.base.depth = 24;
673 vfbd->base.base.width = width;
674 vfbd->base.base.height = height;
675 vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
676 vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
677 vfbd->buffer = dmabuf;
678 *out = &vfbd->base;
679
680 return 0;
681
682out_err3:
683 drm_framebuffer_cleanup(&vfbd->base.base);
684out_err2:
685 kfree(vfbd);
686out_err1:
687 return ret;
688}
689
690/*
691 * Generic Kernel modesetting functions
692 */
693
694static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
695 struct drm_file *file_priv,
696 struct drm_mode_fb_cmd *mode_cmd)
697{
698 struct vmw_private *dev_priv = vmw_priv(dev);
699 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
700 struct vmw_framebuffer *vfb = NULL;
701 struct vmw_surface *surface = NULL;
702 struct vmw_dma_buffer *bo = NULL;
703 int ret;
704
705 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
706 mode_cmd->handle, &surface);
707 if (ret)
708 goto try_dmabuf;
709
710 if (!surface->scanout)
711 goto err_not_scanout;
712
713 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
714 mode_cmd->width, mode_cmd->height);
715
716 /* vmw_user_surface_lookup takes one ref so does new_fb */
717 vmw_surface_unreference(&surface);
718
719 if (ret) {
720 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
721 return NULL;
722 }
723 return &vfb->base;
724
725try_dmabuf:
726 DRM_INFO("%s: trying buffer\n", __func__);
727
728 ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
729 if (ret) {
730 DRM_ERROR("failed to find buffer: %i\n", ret);
731 return NULL;
732 }
733
734 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
735 mode_cmd->width, mode_cmd->height);
736
737 /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
738 vmw_dmabuf_unreference(&bo);
739
740 if (ret) {
741 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
742 return NULL;
743 }
744
745 return &vfb->base;
746
747err_not_scanout:
748 DRM_ERROR("surface not marked as scanout\n");
749 /* vmw_user_surface_lookup takes one ref */
750 vmw_surface_unreference(&surface);
751
752 return NULL;
753}
754
755static int vmw_kms_fb_changed(struct drm_device *dev)
756{
757 return 0;
758}
759
760static struct drm_mode_config_funcs vmw_kms_funcs = {
761 .fb_create = vmw_kms_fb_create,
762 .fb_changed = vmw_kms_fb_changed,
763};
764
765int vmw_kms_init(struct vmw_private *dev_priv)
766{
767 struct drm_device *dev = dev_priv->dev;
768 int ret;
769
770 drm_mode_config_init(dev);
771 dev->mode_config.funcs = &vmw_kms_funcs;
772 dev->mode_config.min_width = 1;
773 dev->mode_config.min_height = 1;
774 dev->mode_config.max_width = dev_priv->fb_max_width;
775 dev->mode_config.max_height = dev_priv->fb_max_height;
776
777 ret = vmw_kms_init_legacy_display_system(dev_priv);
778
779 return 0;
780}
781
782int vmw_kms_close(struct vmw_private *dev_priv)
783{
784 /*
785 * Docs says we should take the lock before calling this function
786 * but since it destroys encoders and our destructor calls
787 * drm_encoder_cleanup which takes the lock we deadlock.
788 */
789 drm_mode_config_cleanup(dev_priv->dev);
790 vmw_kms_close_legacy_display_system(dev_priv);
791 return 0;
792}
793
794int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
795 struct drm_file *file_priv)
796{
797 struct drm_vmw_cursor_bypass_arg *arg = data;
798 struct vmw_display_unit *du;
799 struct drm_mode_object *obj;
800 struct drm_crtc *crtc;
801 int ret = 0;
802
803
804 mutex_lock(&dev->mode_config.mutex);
805 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
806
807 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
808 du = vmw_crtc_to_du(crtc);
809 du->hotspot_x = arg->xhot;
810 du->hotspot_y = arg->yhot;
811 }
812
813 mutex_unlock(&dev->mode_config.mutex);
814 return 0;
815 }
816
817 obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
818 if (!obj) {
819 ret = -EINVAL;
820 goto out;
821 }
822
823 crtc = obj_to_crtc(obj);
824 du = vmw_crtc_to_du(crtc);
825
826 du->hotspot_x = arg->xhot;
827 du->hotspot_y = arg->yhot;
828
829out:
830 mutex_unlock(&dev->mode_config.mutex);
831
832 return ret;
833}
834
835int vmw_kms_save_vga(struct vmw_private *vmw_priv)
836{
837 /*
838 * setup a single multimon monitor with the size
839 * of 0x0, this stops the UI from resizing when we
840 * change the framebuffer size
841 */
842 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
843 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
844 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
845 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
846 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
847 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
848 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
849 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
850 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
851 }
852
853 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
854 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
855 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
856 vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
857 vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
858 vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
859 vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
860 vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
861
862 return 0;
863}
864
865int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
866{
867 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
868 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
869 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
870 vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
871 vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
872 vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
873 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
874 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
875
876 /* TODO check for multimon */
877 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
878
879 return 0;
880}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
new file mode 100644
index 000000000000..8b95249f0531
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -0,0 +1,102 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef VMWGFX_KMS_H_
29#define VMWGFX_KMS_H_
30
31#include "drmP.h"
32#include "vmwgfx_drv.h"
33
34
35#define vmw_framebuffer_to_vfb(x) \
36 container_of(x, struct vmw_framebuffer, base)
37
38/**
39 * Base class for framebuffers
40 *
41 * @pin is called the when ever a crtc uses this framebuffer
42 * @unpin is called
43 */
44struct vmw_framebuffer {
45 struct drm_framebuffer base;
46 int (*pin)(struct vmw_framebuffer *fb);
47 int (*unpin)(struct vmw_framebuffer *fb);
48};
49
50
51#define vmw_crtc_to_du(x) \
52 container_of(x, struct vmw_display_unit, crtc)
53
54/*
55 * Basic cursor manipulation
56 */
57int vmw_cursor_update_image(struct vmw_private *dev_priv,
58 u32 *image, u32 width, u32 height,
59 u32 hotspotX, u32 hotspotY);
60void vmw_cursor_update_position(struct vmw_private *dev_priv,
61 bool show, int x, int y);
62
63/**
64 * Base class display unit.
65 *
66 * Since the SVGA hw doesn't have a concept of a crtc, encoder or connector
67 * so the display unit is all of them at the same time. This is true for both
68 * legacy multimon and screen objects.
69 */
70struct vmw_display_unit {
71 struct drm_crtc crtc;
72 struct drm_encoder encoder;
73 struct drm_connector connector;
74
75 struct vmw_surface *cursor_surface;
76 struct vmw_dma_buffer *cursor_dmabuf;
77 size_t cursor_age;
78
79 int cursor_x;
80 int cursor_y;
81
82 int hotspot_x;
83 int hotspot_y;
84
85 unsigned unit;
86};
87
88/*
89 * Shared display unit functions - vmwgfx_kms.c
90 */
91void vmw_display_unit_cleanup(struct vmw_display_unit *du);
92int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
93 uint32_t handle, uint32_t width, uint32_t height);
94int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
95
96/*
97 * Legacy display unit functions - vmwgfx_ldu.h
98 */
99int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
100int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
101
102#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
new file mode 100644
index 000000000000..90891593bf6c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -0,0 +1,516 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_kms.h"
29
30#define vmw_crtc_to_ldu(x) \
31 container_of(x, struct vmw_legacy_display_unit, base.crtc)
32#define vmw_encoder_to_ldu(x) \
33 container_of(x, struct vmw_legacy_display_unit, base.encoder)
34#define vmw_connector_to_ldu(x) \
35 container_of(x, struct vmw_legacy_display_unit, base.connector)
36
37struct vmw_legacy_display {
38 struct list_head active;
39
40 unsigned num_active;
41
42 struct vmw_framebuffer *fb;
43};
44
45/**
46 * Display unit using the legacy register interface.
47 */
48struct vmw_legacy_display_unit {
49 struct vmw_display_unit base;
50
51 struct list_head active;
52
53 unsigned unit;
54};
55
56static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
57{
58 list_del_init(&ldu->active);
59 vmw_display_unit_cleanup(&ldu->base);
60 kfree(ldu);
61}
62
63
64/*
65 * Legacy Display Unit CRTC functions
66 */
67
68static void vmw_ldu_crtc_save(struct drm_crtc *crtc)
69{
70}
71
72static void vmw_ldu_crtc_restore(struct drm_crtc *crtc)
73{
74}
75
76static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc,
77 u16 *r, u16 *g, u16 *b,
78 uint32_t size)
79{
80}
81
82static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
83{
84 vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
85}
86
87static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
88{
89 struct vmw_legacy_display *lds = dev_priv->ldu_priv;
90 struct vmw_legacy_display_unit *entry;
91 struct drm_crtc *crtc;
92 int i = 0;
93
94 /* to stop the screen from changing size on resize */
95 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
96 for (i = 0; i < lds->num_active; i++) {
97 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
98 vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
99 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
100 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
101 vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
102 vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
103 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
104 }
105
106 /* Now set the mode */
107 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active);
108 i = 0;
109 list_for_each_entry(entry, &lds->active, active) {
110 crtc = &entry->base.crtc;
111
112 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
113 vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
114 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x);
115 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
116 vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
117 vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
118 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
119
120 i++;
121 }
122
123 return 0;
124}
125
126static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
127 struct vmw_legacy_display_unit *ldu)
128{
129 struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
130 if (list_empty(&ldu->active))
131 return 0;
132
133 list_del_init(&ldu->active);
134 if (--(ld->num_active) == 0) {
135 BUG_ON(!ld->fb);
136 if (ld->fb->unpin)
137 ld->fb->unpin(ld->fb);
138 ld->fb = NULL;
139 }
140
141 return 0;
142}
143
144static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
145 struct vmw_legacy_display_unit *ldu,
146 struct vmw_framebuffer *vfb)
147{
148 struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
149 struct vmw_legacy_display_unit *entry;
150 struct list_head *at;
151
152 if (!list_empty(&ldu->active))
153 return 0;
154
155 at = &ld->active;
156 list_for_each_entry(entry, &ld->active, active) {
157 if (entry->unit > ldu->unit)
158 break;
159
160 at = &entry->active;
161 }
162
163 list_add(&ldu->active, at);
164 if (ld->num_active++ == 0) {
165 BUG_ON(ld->fb);
166 if (vfb->pin)
167 vfb->pin(vfb);
168 ld->fb = vfb;
169 }
170
171 return 0;
172}
173
174static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
175{
176 struct vmw_private *dev_priv;
177 struct vmw_legacy_display_unit *ldu;
178 struct drm_connector *connector;
179 struct drm_display_mode *mode;
180 struct drm_encoder *encoder;
181 struct vmw_framebuffer *vfb;
182 struct drm_framebuffer *fb;
183 struct drm_crtc *crtc;
184
185 if (!set)
186 return -EINVAL;
187
188 if (!set->crtc)
189 return -EINVAL;
190
191 /* get the ldu */
192 crtc = set->crtc;
193 ldu = vmw_crtc_to_ldu(crtc);
194 vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
195 dev_priv = vmw_priv(crtc->dev);
196
197 if (set->num_connectors > 1) {
198 DRM_ERROR("to many connectors\n");
199 return -EINVAL;
200 }
201
202 if (set->num_connectors == 1 &&
203 set->connectors[0] != &ldu->base.connector) {
204 DRM_ERROR("connector doesn't match %p %p\n",
205 set->connectors[0], &ldu->base.connector);
206 return -EINVAL;
207 }
208
209 /* ldu only supports one fb active at the time */
210 if (dev_priv->ldu_priv->fb && vfb &&
211 dev_priv->ldu_priv->fb != vfb) {
212 DRM_ERROR("Multiple framebuffers not supported\n");
213 return -EINVAL;
214 }
215
216 /* since they always map one to one these are safe */
217 connector = &ldu->base.connector;
218 encoder = &ldu->base.encoder;
219
220 /* should we turn the crtc off? */
221 if (set->num_connectors == 0 || !set->mode || !set->fb) {
222
223 connector->encoder = NULL;
224 encoder->crtc = NULL;
225 crtc->fb = NULL;
226
227 vmw_ldu_del_active(dev_priv, ldu);
228
229 vmw_ldu_commit_list(dev_priv);
230
231 return 0;
232 }
233
234
235 /* we now know we want to set a mode */
236 mode = set->mode;
237 fb = set->fb;
238
239 if (set->x + mode->hdisplay > fb->width ||
240 set->y + mode->vdisplay > fb->height) {
241 DRM_ERROR("set outside of framebuffer\n");
242 return -EINVAL;
243 }
244
245 vmw_fb_off(dev_priv);
246
247 crtc->fb = fb;
248 encoder->crtc = crtc;
249 connector->encoder = encoder;
250 crtc->x = set->x;
251 crtc->y = set->y;
252 crtc->mode = *mode;
253
254 vmw_ldu_add_active(dev_priv, ldu, vfb);
255
256 vmw_ldu_commit_list(dev_priv);
257
258 return 0;
259}
260
261static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
262 .save = vmw_ldu_crtc_save,
263 .restore = vmw_ldu_crtc_restore,
264 .cursor_set = vmw_du_crtc_cursor_set,
265 .cursor_move = vmw_du_crtc_cursor_move,
266 .gamma_set = vmw_ldu_crtc_gamma_set,
267 .destroy = vmw_ldu_crtc_destroy,
268 .set_config = vmw_ldu_crtc_set_config,
269};
270
271/*
272 * Legacy Display Unit encoder functions
273 */
274
275static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder)
276{
277 vmw_ldu_destroy(vmw_encoder_to_ldu(encoder));
278}
279
280static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
281 .destroy = vmw_ldu_encoder_destroy,
282};
283
284/*
285 * Legacy Display Unit connector functions
286 */
287
288static void vmw_ldu_connector_dpms(struct drm_connector *connector, int mode)
289{
290}
291
292static void vmw_ldu_connector_save(struct drm_connector *connector)
293{
294}
295
296static void vmw_ldu_connector_restore(struct drm_connector *connector)
297{
298}
299
300static enum drm_connector_status
301 vmw_ldu_connector_detect(struct drm_connector *connector)
302{
303 /* XXX vmwctrl should control connection status */
304 if (vmw_connector_to_ldu(connector)->base.unit == 0)
305 return connector_status_connected;
306 return connector_status_disconnected;
307}
308
309static struct drm_display_mode vmw_ldu_connector_builtin[] = {
310 /* 640x480@60Hz */
311 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
312 752, 800, 0, 480, 489, 492, 525, 0,
313 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
314 /* 800x600@60Hz */
315 { DRM_MODE("800x600",
316 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
317 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628,
318 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
319 /* 1024x768@60Hz */
320 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
321 1184, 1344, 0, 768, 771, 777, 806, 0,
322 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
323 /* 1152x864@75Hz */
324 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
325 1344, 1600, 0, 864, 865, 868, 900, 0,
326 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
327 /* 1280x768@60Hz */
328 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
329 1472, 1664, 0, 768, 771, 778, 798, 0,
330 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
331 /* 1280x800@60Hz */
332 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
333 1480, 1680, 0, 800, 803, 809, 831, 0,
334 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
335 /* 1280x960@60Hz */
336 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
337 1488, 1800, 0, 960, 961, 964, 1000, 0,
338 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
339 /* 1280x1024@60Hz */
340 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
341 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
342 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
343 /* 1360x768@60Hz */
344 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
345 1536, 1792, 0, 768, 771, 777, 795, 0,
346 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
347 /* 1440x1050@60Hz */
348 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
349 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
350 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
351 /* 1440x900@60Hz */
352 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
353 1672, 1904, 0, 900, 903, 909, 934, 0,
354 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
355 /* 1600x1200@60Hz */
356 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
357 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
358 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
359 /* 1680x1050@60Hz */
360 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
361 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
362 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
363 /* 1792x1344@60Hz */
364 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
365 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
366 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
367 /* 1853x1392@60Hz */
368 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
369 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
370 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
371 /* 1920x1200@60Hz */
372 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
373 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
374 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
375 /* 1920x1440@60Hz */
376 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
377 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
378 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
379 /* 2560x1600@60Hz */
380 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
381 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
382 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
383 /* Terminate */
384 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
385};
386
387static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
388 uint32_t max_width, uint32_t max_height)
389{
390 struct drm_device *dev = connector->dev;
391 struct drm_display_mode *mode = NULL;
392 int i;
393
394 for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
395 if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
396 vmw_ldu_connector_builtin[i].vdisplay > max_height)
397 continue;
398
399 mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]);
400 if (!mode)
401 return 0;
402 mode->vrefresh = drm_mode_vrefresh(mode);
403
404 drm_mode_probed_add(connector, mode);
405 }
406
407 drm_mode_connector_list_update(connector);
408
409 return 1;
410}
411
412static int vmw_ldu_connector_set_property(struct drm_connector *connector,
413 struct drm_property *property,
414 uint64_t val)
415{
416 return 0;
417}
418
419static void vmw_ldu_connector_destroy(struct drm_connector *connector)
420{
421 vmw_ldu_destroy(vmw_connector_to_ldu(connector));
422}
423
424static struct drm_connector_funcs vmw_legacy_connector_funcs = {
425 .dpms = vmw_ldu_connector_dpms,
426 .save = vmw_ldu_connector_save,
427 .restore = vmw_ldu_connector_restore,
428 .detect = vmw_ldu_connector_detect,
429 .fill_modes = vmw_ldu_connector_fill_modes,
430 .set_property = vmw_ldu_connector_set_property,
431 .destroy = vmw_ldu_connector_destroy,
432};
433
434static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
435{
436 struct vmw_legacy_display_unit *ldu;
437 struct drm_device *dev = dev_priv->dev;
438 struct drm_connector *connector;
439 struct drm_encoder *encoder;
440 struct drm_crtc *crtc;
441
442 ldu = kzalloc(sizeof(*ldu), GFP_KERNEL);
443 if (!ldu)
444 return -ENOMEM;
445
446 ldu->unit = unit;
447 crtc = &ldu->base.crtc;
448 encoder = &ldu->base.encoder;
449 connector = &ldu->base.connector;
450
451 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
452 DRM_MODE_CONNECTOR_LVDS);
453 /* Initial status */
454 if (unit == 0)
455 connector->status = connector_status_connected;
456 else
457 connector->status = connector_status_disconnected;
458
459 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
460 DRM_MODE_ENCODER_LVDS);
461 drm_mode_connector_attach_encoder(connector, encoder);
462 encoder->possible_crtcs = (1 << unit);
463 encoder->possible_clones = 0;
464
465 INIT_LIST_HEAD(&ldu->active);
466
467 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
468
469 drm_connector_attach_property(connector,
470 dev->mode_config.dirty_info_property,
471 1);
472
473 return 0;
474}
475
476int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
477{
478 if (dev_priv->ldu_priv) {
479 DRM_INFO("ldu system already on\n");
480 return -EINVAL;
481 }
482
483 dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv));
484
485 if (!dev_priv->ldu_priv)
486 return -ENOMEM;
487
488 INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
489 dev_priv->ldu_priv->num_active = 0;
490 dev_priv->ldu_priv->fb = NULL;
491
492 drm_mode_create_dirty_info_property(dev_priv->dev);
493
494 vmw_ldu_init(dev_priv, 0);
495 vmw_ldu_init(dev_priv, 1);
496 vmw_ldu_init(dev_priv, 2);
497 vmw_ldu_init(dev_priv, 3);
498 vmw_ldu_init(dev_priv, 4);
499 vmw_ldu_init(dev_priv, 5);
500 vmw_ldu_init(dev_priv, 6);
501 vmw_ldu_init(dev_priv, 7);
502
503 return 0;
504}
505
506int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
507{
508 if (!dev_priv->ldu_priv)
509 return -ENOSYS;
510
511 BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
512
513 kfree(dev_priv->ldu_priv);
514
515 return 0;
516}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
new file mode 100644
index 000000000000..5b6eabeb7f51
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -0,0 +1,625 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29#include "drmP.h"
30#include "vmwgfx_drv.h"
31
32#include "ttm/ttm_placement.h"
33
34#include "svga_overlay.h"
35#include "svga_escape.h"
36
37#define VMW_MAX_NUM_STREAMS 1
38
39struct vmw_stream {
40 struct vmw_dma_buffer *buf;
41 bool claimed;
42 bool paused;
43 struct drm_vmw_control_stream_arg saved;
44};
45
46/**
47 * Overlay control
48 */
49struct vmw_overlay {
50 /*
51 * Each stream is a single overlay. In Xv these are called ports.
52 */
53 struct mutex mutex;
54 struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
55};
56
57static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
58{
59 struct vmw_private *dev_priv = vmw_priv(dev);
60 return dev_priv ? dev_priv->overlay_priv : NULL;
61}
62
63struct vmw_escape_header {
64 uint32_t cmd;
65 SVGAFifoCmdEscape body;
66};
67
68struct vmw_escape_video_flush {
69 struct vmw_escape_header escape;
70 SVGAEscapeVideoFlush flush;
71};
72
73static inline void fill_escape(struct vmw_escape_header *header,
74 uint32_t size)
75{
76 header->cmd = SVGA_CMD_ESCAPE;
77 header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
78 header->body.size = size;
79}
80
81static inline void fill_flush(struct vmw_escape_video_flush *cmd,
82 uint32_t stream_id)
83{
84 fill_escape(&cmd->escape, sizeof(cmd->flush));
85 cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
86 cmd->flush.streamId = stream_id;
87}
88
89/**
90 * Pin or unpin a buffer in vram.
91 *
92 * @dev_priv: Driver private.
93 * @buf: DMA buffer to pin or unpin.
94 * @pin: Pin buffer in vram if true.
95 * @interruptible: Use interruptible wait.
96 *
97 * Takes the current masters ttm lock in read.
98 *
99 * Returns
100 * -ERESTARTSYS if interrupted by a signal.
101 */
102static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
103 struct vmw_dma_buffer *buf,
104 bool pin, bool interruptible)
105{
106 struct ttm_buffer_object *bo = &buf->base;
107 struct ttm_placement *overlay_placement = &vmw_vram_placement;
108 int ret;
109
110 ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
111 if (unlikely(ret != 0))
112 return ret;
113
114 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
115 if (unlikely(ret != 0))
116 goto err;
117
118 if (pin)
119 overlay_placement = &vmw_vram_ne_placement;
120
121 ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
122
123 ttm_bo_unreserve(bo);
124
125err:
126 ttm_read_unlock(&dev_priv->active_master->lock);
127
128 return ret;
129}
130
131/**
132 * Send put command to hw.
133 *
134 * Returns
135 * -ERESTARTSYS if interrupted by a signal.
136 */
137static int vmw_overlay_send_put(struct vmw_private *dev_priv,
138 struct vmw_dma_buffer *buf,
139 struct drm_vmw_control_stream_arg *arg,
140 bool interruptible)
141{
142 struct {
143 struct vmw_escape_header escape;
144 struct {
145 struct {
146 uint32_t cmdType;
147 uint32_t streamId;
148 } header;
149 struct {
150 uint32_t registerId;
151 uint32_t value;
152 } items[SVGA_VIDEO_PITCH_3 + 1];
153 } body;
154 struct vmw_escape_video_flush flush;
155 } *cmds;
156 uint32_t offset;
157 int i, ret;
158
159 for (;;) {
160 cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
161 if (cmds)
162 break;
163
164 ret = vmw_fallback_wait(dev_priv, false, true, 0,
165 interruptible, 3*HZ);
166 if (interruptible && ret == -ERESTARTSYS)
167 return ret;
168 else
169 BUG_ON(ret != 0);
170 }
171
172 fill_escape(&cmds->escape, sizeof(cmds->body));
173 cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
174 cmds->body.header.streamId = arg->stream_id;
175
176 for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++)
177 cmds->body.items[i].registerId = i;
178
179 offset = buf->base.offset + arg->offset;
180
181 cmds->body.items[SVGA_VIDEO_ENABLED].value = true;
182 cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags;
183 cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset;
184 cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format;
185 cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
186 cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size;
187 cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width;
188 cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height;
189 cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x;
190 cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
191 cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
192 cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
193 cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x;
194 cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
195 cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
196 cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
197 cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
198 cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
199 cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
200
201 fill_flush(&cmds->flush, arg->stream_id);
202
203 vmw_fifo_commit(dev_priv, sizeof(*cmds));
204
205 return 0;
206}
207
208/**
209 * Send stop command to hw.
210 *
211 * Returns
212 * -ERESTARTSYS if interrupted by a signal.
213 */
214static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
215 uint32_t stream_id,
216 bool interruptible)
217{
218 struct {
219 struct vmw_escape_header escape;
220 SVGAEscapeVideoSetRegs body;
221 struct vmw_escape_video_flush flush;
222 } *cmds;
223 int ret;
224
225 for (;;) {
226 cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
227 if (cmds)
228 break;
229
230 ret = vmw_fallback_wait(dev_priv, false, true, 0,
231 interruptible, 3*HZ);
232 if (interruptible && ret == -ERESTARTSYS)
233 return ret;
234 else
235 BUG_ON(ret != 0);
236 }
237
238 fill_escape(&cmds->escape, sizeof(cmds->body));
239 cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
240 cmds->body.header.streamId = stream_id;
241 cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
242 cmds->body.items[0].value = false;
243 fill_flush(&cmds->flush, stream_id);
244
245 vmw_fifo_commit(dev_priv, sizeof(*cmds));
246
247 return 0;
248}
249
250/**
251 * Stop or pause a stream.
252 *
253 * If the stream is paused the no evict flag is removed from the buffer
254 * but left in vram. This allows for instance mode_set to evict it
255 * should it need to.
256 *
257 * The caller must hold the overlay lock.
258 *
259 * @stream_id which stream to stop/pause.
260 * @pause true to pause, false to stop completely.
261 */
262static int vmw_overlay_stop(struct vmw_private *dev_priv,
263 uint32_t stream_id, bool pause,
264 bool interruptible)
265{
266 struct vmw_overlay *overlay = dev_priv->overlay_priv;
267 struct vmw_stream *stream = &overlay->stream[stream_id];
268 int ret;
269
270 /* no buffer attached the stream is completely stopped */
271 if (!stream->buf)
272 return 0;
273
274 /* If the stream is paused this is already done */
275 if (!stream->paused) {
276 ret = vmw_overlay_send_stop(dev_priv, stream_id,
277 interruptible);
278 if (ret)
279 return ret;
280
281 /* We just remove the NO_EVICT flag so no -ENOMEM */
282 ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false,
283 interruptible);
284 if (interruptible && ret == -ERESTARTSYS)
285 return ret;
286 else
287 BUG_ON(ret != 0);
288 }
289
290 if (!pause) {
291 vmw_dmabuf_unreference(&stream->buf);
292 stream->paused = false;
293 } else {
294 stream->paused = true;
295 }
296
297 return 0;
298}
299
300/**
301 * Update a stream and send any put or stop fifo commands needed.
302 *
303 * The caller must hold the overlay lock.
304 *
305 * Returns
306 * -ENOMEM if buffer doesn't fit in vram.
307 * -ERESTARTSYS if interrupted.
308 */
309static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
310 struct vmw_dma_buffer *buf,
311 struct drm_vmw_control_stream_arg *arg,
312 bool interruptible)
313{
314 struct vmw_overlay *overlay = dev_priv->overlay_priv;
315 struct vmw_stream *stream = &overlay->stream[arg->stream_id];
316 int ret = 0;
317
318 if (!buf)
319 return -EINVAL;
320
321 DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
322 stream->buf, buf, stream->paused ? "" : "not ");
323
324 if (stream->buf != buf) {
325 ret = vmw_overlay_stop(dev_priv, arg->stream_id,
326 false, interruptible);
327 if (ret)
328 return ret;
329 } else if (!stream->paused) {
330 /* If the buffers match and not paused then just send
331 * the put command, no need to do anything else.
332 */
333 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
334 if (ret == 0)
335 stream->saved = *arg;
336 else
337 BUG_ON(!interruptible);
338
339 return ret;
340 }
341
342 /* We don't start the old stream if we are interrupted.
343 * Might return -ENOMEM if it can't fit the buffer in vram.
344 */
345 ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible);
346 if (ret)
347 return ret;
348
349 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
350 if (ret) {
351 /* This one needs to happen no matter what. We only remove
352 * the NO_EVICT flag so this is safe from -ENOMEM.
353 */
354 BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0);
355 return ret;
356 }
357
358 if (stream->buf != buf)
359 stream->buf = vmw_dmabuf_reference(buf);
360 stream->saved = *arg;
361
362 return 0;
363}
364
365/**
366 * Stop all streams.
367 *
368 * Used by the fb code when starting.
369 *
370 * Takes the overlay lock.
371 */
372int vmw_overlay_stop_all(struct vmw_private *dev_priv)
373{
374 struct vmw_overlay *overlay = dev_priv->overlay_priv;
375 int i, ret;
376
377 if (!overlay)
378 return 0;
379
380 mutex_lock(&overlay->mutex);
381
382 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
383 struct vmw_stream *stream = &overlay->stream[i];
384 if (!stream->buf)
385 continue;
386
387 ret = vmw_overlay_stop(dev_priv, i, false, false);
388 WARN_ON(ret != 0);
389 }
390
391 mutex_unlock(&overlay->mutex);
392
393 return 0;
394}
395
396/**
397 * Try to resume all paused streams.
398 *
399 * Used by the kms code after moving a new scanout buffer to vram.
400 *
401 * Takes the overlay lock.
402 */
403int vmw_overlay_resume_all(struct vmw_private *dev_priv)
404{
405 struct vmw_overlay *overlay = dev_priv->overlay_priv;
406 int i, ret;
407
408 if (!overlay)
409 return 0;
410
411 mutex_lock(&overlay->mutex);
412
413 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
414 struct vmw_stream *stream = &overlay->stream[i];
415 if (!stream->paused)
416 continue;
417
418 ret = vmw_overlay_update_stream(dev_priv, stream->buf,
419 &stream->saved, false);
420 if (ret != 0)
421 DRM_INFO("%s: *warning* failed to resume stream %i\n",
422 __func__, i);
423 }
424
425 mutex_unlock(&overlay->mutex);
426
427 return 0;
428}
429
430/**
431 * Pauses all active streams.
432 *
433 * Used by the kms code when moving a new scanout buffer to vram.
434 *
435 * Takes the overlay lock.
436 */
437int vmw_overlay_pause_all(struct vmw_private *dev_priv)
438{
439 struct vmw_overlay *overlay = dev_priv->overlay_priv;
440 int i, ret;
441
442 if (!overlay)
443 return 0;
444
445 mutex_lock(&overlay->mutex);
446
447 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
448 if (overlay->stream[i].paused)
449 DRM_INFO("%s: *warning* stream %i already paused\n",
450 __func__, i);
451 ret = vmw_overlay_stop(dev_priv, i, true, false);
452 WARN_ON(ret != 0);
453 }
454
455 mutex_unlock(&overlay->mutex);
456
457 return 0;
458}
459
460int vmw_overlay_ioctl(struct drm_device *dev, void *data,
461 struct drm_file *file_priv)
462{
463 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
464 struct vmw_private *dev_priv = vmw_priv(dev);
465 struct vmw_overlay *overlay = dev_priv->overlay_priv;
466 struct drm_vmw_control_stream_arg *arg =
467 (struct drm_vmw_control_stream_arg *)data;
468 struct vmw_dma_buffer *buf;
469 struct vmw_resource *res;
470 int ret;
471
472 if (!overlay)
473 return -ENOSYS;
474
475 ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
476 if (ret)
477 return ret;
478
479 mutex_lock(&overlay->mutex);
480
481 if (!arg->enabled) {
482 ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
483 goto out_unlock;
484 }
485
486 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
487 if (ret)
488 goto out_unlock;
489
490 ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
491
492 vmw_dmabuf_unreference(&buf);
493
494out_unlock:
495 mutex_unlock(&overlay->mutex);
496 vmw_resource_unreference(&res);
497
498 return ret;
499}
500
501int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
502{
503 if (!dev_priv->overlay_priv)
504 return 0;
505
506 return VMW_MAX_NUM_STREAMS;
507}
508
509int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
510{
511 struct vmw_overlay *overlay = dev_priv->overlay_priv;
512 int i, k;
513
514 if (!overlay)
515 return 0;
516
517 mutex_lock(&overlay->mutex);
518
519 for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
520 if (!overlay->stream[i].claimed)
521 k++;
522
523 mutex_unlock(&overlay->mutex);
524
525 return k;
526}
527
528int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
529{
530 struct vmw_overlay *overlay = dev_priv->overlay_priv;
531 int i;
532
533 if (!overlay)
534 return -ENOSYS;
535
536 mutex_lock(&overlay->mutex);
537
538 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
539
540 if (overlay->stream[i].claimed)
541 continue;
542
543 overlay->stream[i].claimed = true;
544 *out = i;
545 mutex_unlock(&overlay->mutex);
546 return 0;
547 }
548
549 mutex_unlock(&overlay->mutex);
550 return -ESRCH;
551}
552
553int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
554{
555 struct vmw_overlay *overlay = dev_priv->overlay_priv;
556
557 BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
558
559 if (!overlay)
560 return -ENOSYS;
561
562 mutex_lock(&overlay->mutex);
563
564 WARN_ON(!overlay->stream[stream_id].claimed);
565 vmw_overlay_stop(dev_priv, stream_id, false, false);
566 overlay->stream[stream_id].claimed = false;
567
568 mutex_unlock(&overlay->mutex);
569 return 0;
570}
571
572int vmw_overlay_init(struct vmw_private *dev_priv)
573{
574 struct vmw_overlay *overlay;
575 int i;
576
577 if (dev_priv->overlay_priv)
578 return -EINVAL;
579
580 if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
581 (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
582 DRM_INFO("hardware doesn't support overlays\n");
583 return -ENOSYS;
584 }
585
586 overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
587 if (!overlay)
588 return -ENOMEM;
589
590 memset(overlay, 0, sizeof(*overlay));
591 mutex_init(&overlay->mutex);
592 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
593 overlay->stream[i].buf = NULL;
594 overlay->stream[i].paused = false;
595 overlay->stream[i].claimed = false;
596 }
597
598 dev_priv->overlay_priv = overlay;
599
600 return 0;
601}
602
603int vmw_overlay_close(struct vmw_private *dev_priv)
604{
605 struct vmw_overlay *overlay = dev_priv->overlay_priv;
606 bool forgotten_buffer = false;
607 int i;
608
609 if (!overlay)
610 return -ENOSYS;
611
612 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
613 if (overlay->stream[i].buf) {
614 forgotten_buffer = true;
615 vmw_overlay_stop(dev_priv, i, false, false);
616 }
617 }
618
619 WARN_ON(forgotten_buffer);
620
621 dev_priv->overlay_priv = NULL;
622 kfree(overlay);
623
624 return 0;
625}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
new file mode 100644
index 000000000000..9d0dd3a342eb
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -0,0 +1,57 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/**
29 * This file contains virtual hardware defines for kernel space.
30 */
31
32#ifndef _VMWGFX_REG_H_
33#define _VMWGFX_REG_H_
34
35#include <linux/types.h>
36
37#define VMWGFX_INDEX_PORT 0x0
38#define VMWGFX_VALUE_PORT 0x1
39#define VMWGFX_IRQSTATUS_PORT 0x8
40
41struct svga_guest_mem_descriptor {
42 __le32 ppn;
43 __le32 num_pages;
44};
45
46struct svga_fifo_cmd_fence {
47 __le32 fence;
48};
49
50#define SVGA_SYNC_GENERIC 1
51#define SVGA_SYNC_FIFOFULL 2
52
53#include "svga_types.h"
54
55#include "svga3d_reg.h"
56
57#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
new file mode 100644
index 000000000000..f8fbbc67a406
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -0,0 +1,1187 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_drm.h"
30#include "ttm/ttm_object.h"
31#include "ttm/ttm_placement.h"
32#include "drmP.h"
33
34#define VMW_RES_CONTEXT ttm_driver_type0
35#define VMW_RES_SURFACE ttm_driver_type1
36#define VMW_RES_STREAM ttm_driver_type2
37
38struct vmw_user_context {
39 struct ttm_base_object base;
40 struct vmw_resource res;
41};
42
43struct vmw_user_surface {
44 struct ttm_base_object base;
45 struct vmw_surface srf;
46};
47
48struct vmw_user_dma_buffer {
49 struct ttm_base_object base;
50 struct vmw_dma_buffer dma;
51};
52
53struct vmw_bo_user_rep {
54 uint32_t handle;
55 uint64_t map_handle;
56};
57
58struct vmw_stream {
59 struct vmw_resource res;
60 uint32_t stream_id;
61};
62
63struct vmw_user_stream {
64 struct ttm_base_object base;
65 struct vmw_stream stream;
66};
67
68static inline struct vmw_dma_buffer *
69vmw_dma_buffer(struct ttm_buffer_object *bo)
70{
71 return container_of(bo, struct vmw_dma_buffer, base);
72}
73
74static inline struct vmw_user_dma_buffer *
75vmw_user_dma_buffer(struct ttm_buffer_object *bo)
76{
77 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
78 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
79}
80
81struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
82{
83 kref_get(&res->kref);
84 return res;
85}
86
87static void vmw_resource_release(struct kref *kref)
88{
89 struct vmw_resource *res =
90 container_of(kref, struct vmw_resource, kref);
91 struct vmw_private *dev_priv = res->dev_priv;
92
93 idr_remove(res->idr, res->id);
94 write_unlock(&dev_priv->resource_lock);
95
96 if (likely(res->hw_destroy != NULL))
97 res->hw_destroy(res);
98
99 if (res->res_free != NULL)
100 res->res_free(res);
101 else
102 kfree(res);
103
104 write_lock(&dev_priv->resource_lock);
105}
106
107void vmw_resource_unreference(struct vmw_resource **p_res)
108{
109 struct vmw_resource *res = *p_res;
110 struct vmw_private *dev_priv = res->dev_priv;
111
112 *p_res = NULL;
113 write_lock(&dev_priv->resource_lock);
114 kref_put(&res->kref, vmw_resource_release);
115 write_unlock(&dev_priv->resource_lock);
116}
117
118static int vmw_resource_init(struct vmw_private *dev_priv,
119 struct vmw_resource *res,
120 struct idr *idr,
121 enum ttm_object_type obj_type,
122 void (*res_free) (struct vmw_resource *res))
123{
124 int ret;
125
126 kref_init(&res->kref);
127 res->hw_destroy = NULL;
128 res->res_free = res_free;
129 res->res_type = obj_type;
130 res->idr = idr;
131 res->avail = false;
132 res->dev_priv = dev_priv;
133
134 do {
135 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
136 return -ENOMEM;
137
138 write_lock(&dev_priv->resource_lock);
139 ret = idr_get_new_above(idr, res, 1, &res->id);
140 write_unlock(&dev_priv->resource_lock);
141
142 } while (ret == -EAGAIN);
143
144 return ret;
145}
146
147/**
148 * vmw_resource_activate
149 *
150 * @res: Pointer to the newly created resource
151 * @hw_destroy: Destroy function. NULL if none.
152 *
153 * Activate a resource after the hardware has been made aware of it.
154 * Set tye destroy function to @destroy. Typically this frees the
155 * resource and destroys the hardware resources associated with it.
156 * Activate basically means that the function vmw_resource_lookup will
157 * find it.
158 */
159
160static void vmw_resource_activate(struct vmw_resource *res,
161 void (*hw_destroy) (struct vmw_resource *))
162{
163 struct vmw_private *dev_priv = res->dev_priv;
164
165 write_lock(&dev_priv->resource_lock);
166 res->avail = true;
167 res->hw_destroy = hw_destroy;
168 write_unlock(&dev_priv->resource_lock);
169}
170
171struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
172 struct idr *idr, int id)
173{
174 struct vmw_resource *res;
175
176 read_lock(&dev_priv->resource_lock);
177 res = idr_find(idr, id);
178 if (res && res->avail)
179 kref_get(&res->kref);
180 else
181 res = NULL;
182 read_unlock(&dev_priv->resource_lock);
183
184 if (unlikely(res == NULL))
185 return NULL;
186
187 return res;
188}
189
190/**
191 * Context management:
192 */
193
194static void vmw_hw_context_destroy(struct vmw_resource *res)
195{
196
197 struct vmw_private *dev_priv = res->dev_priv;
198 struct {
199 SVGA3dCmdHeader header;
200 SVGA3dCmdDestroyContext body;
201 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
202
203 if (unlikely(cmd == NULL)) {
204 DRM_ERROR("Failed reserving FIFO space for surface "
205 "destruction.\n");
206 return;
207 }
208
209 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
210 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
211 cmd->body.cid = cpu_to_le32(res->id);
212
213 vmw_fifo_commit(dev_priv, sizeof(*cmd));
214}
215
216static int vmw_context_init(struct vmw_private *dev_priv,
217 struct vmw_resource *res,
218 void (*res_free) (struct vmw_resource *res))
219{
220 int ret;
221
222 struct {
223 SVGA3dCmdHeader header;
224 SVGA3dCmdDefineContext body;
225 } *cmd;
226
227 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
228 VMW_RES_CONTEXT, res_free);
229
230 if (unlikely(ret != 0)) {
231 if (res_free == NULL)
232 kfree(res);
233 else
234 res_free(res);
235 return ret;
236 }
237
238 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
239 if (unlikely(cmd == NULL)) {
240 DRM_ERROR("Fifo reserve failed.\n");
241 vmw_resource_unreference(&res);
242 return -ENOMEM;
243 }
244
245 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
246 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
247 cmd->body.cid = cpu_to_le32(res->id);
248
249 vmw_fifo_commit(dev_priv, sizeof(*cmd));
250 vmw_resource_activate(res, vmw_hw_context_destroy);
251 return 0;
252}
253
254struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
255{
256 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
257 int ret;
258
259 if (unlikely(res == NULL))
260 return NULL;
261
262 ret = vmw_context_init(dev_priv, res, NULL);
263 return (ret == 0) ? res : NULL;
264}
265
266/**
267 * User-space context management:
268 */
269
270static void vmw_user_context_free(struct vmw_resource *res)
271{
272 struct vmw_user_context *ctx =
273 container_of(res, struct vmw_user_context, res);
274
275 kfree(ctx);
276}
277
278/**
279 * This function is called when user space has no more references on the
280 * base object. It releases the base-object's reference on the resource object.
281 */
282
283static void vmw_user_context_base_release(struct ttm_base_object **p_base)
284{
285 struct ttm_base_object *base = *p_base;
286 struct vmw_user_context *ctx =
287 container_of(base, struct vmw_user_context, base);
288 struct vmw_resource *res = &ctx->res;
289
290 *p_base = NULL;
291 vmw_resource_unreference(&res);
292}
293
294int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
295 struct drm_file *file_priv)
296{
297 struct vmw_private *dev_priv = vmw_priv(dev);
298 struct vmw_resource *res;
299 struct vmw_user_context *ctx;
300 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
301 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
302 int ret = 0;
303
304 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
305 if (unlikely(res == NULL))
306 return -EINVAL;
307
308 if (res->res_free != &vmw_user_context_free) {
309 ret = -EINVAL;
310 goto out;
311 }
312
313 ctx = container_of(res, struct vmw_user_context, res);
314 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
315 ret = -EPERM;
316 goto out;
317 }
318
319 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
320out:
321 vmw_resource_unreference(&res);
322 return ret;
323}
324
325int vmw_context_define_ioctl(struct drm_device *dev, void *data,
326 struct drm_file *file_priv)
327{
328 struct vmw_private *dev_priv = vmw_priv(dev);
329 struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
330 struct vmw_resource *res;
331 struct vmw_resource *tmp;
332 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
333 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
334 int ret;
335
336 if (unlikely(ctx == NULL))
337 return -ENOMEM;
338
339 res = &ctx->res;
340 ctx->base.shareable = false;
341 ctx->base.tfile = NULL;
342
343 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
344 if (unlikely(ret != 0))
345 return ret;
346
347 tmp = vmw_resource_reference(&ctx->res);
348 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
349 &vmw_user_context_base_release, NULL);
350
351 if (unlikely(ret != 0)) {
352 vmw_resource_unreference(&tmp);
353 goto out_err;
354 }
355
356 arg->cid = res->id;
357out_err:
358 vmw_resource_unreference(&res);
359 return ret;
360
361}
362
363int vmw_context_check(struct vmw_private *dev_priv,
364 struct ttm_object_file *tfile,
365 int id)
366{
367 struct vmw_resource *res;
368 int ret = 0;
369
370 read_lock(&dev_priv->resource_lock);
371 res = idr_find(&dev_priv->context_idr, id);
372 if (res && res->avail) {
373 struct vmw_user_context *ctx =
374 container_of(res, struct vmw_user_context, res);
375 if (ctx->base.tfile != tfile && !ctx->base.shareable)
376 ret = -EPERM;
377 } else
378 ret = -EINVAL;
379 read_unlock(&dev_priv->resource_lock);
380
381 return ret;
382}
383
384
385/**
386 * Surface management.
387 */
388
389static void vmw_hw_surface_destroy(struct vmw_resource *res)
390{
391
392 struct vmw_private *dev_priv = res->dev_priv;
393 struct {
394 SVGA3dCmdHeader header;
395 SVGA3dCmdDestroySurface body;
396 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
397
398 if (unlikely(cmd == NULL)) {
399 DRM_ERROR("Failed reserving FIFO space for surface "
400 "destruction.\n");
401 return;
402 }
403
404 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
405 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
406 cmd->body.sid = cpu_to_le32(res->id);
407
408 vmw_fifo_commit(dev_priv, sizeof(*cmd));
409}
410
411void vmw_surface_res_free(struct vmw_resource *res)
412{
413 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
414
415 kfree(srf->sizes);
416 kfree(srf->snooper.image);
417 kfree(srf);
418}
419
420int vmw_surface_init(struct vmw_private *dev_priv,
421 struct vmw_surface *srf,
422 void (*res_free) (struct vmw_resource *res))
423{
424 int ret;
425 struct {
426 SVGA3dCmdHeader header;
427 SVGA3dCmdDefineSurface body;
428 } *cmd;
429 SVGA3dSize *cmd_size;
430 struct vmw_resource *res = &srf->res;
431 struct drm_vmw_size *src_size;
432 size_t submit_size;
433 uint32_t cmd_len;
434 int i;
435
436 BUG_ON(res_free == NULL);
437 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
438 VMW_RES_SURFACE, res_free);
439
440 if (unlikely(ret != 0)) {
441 res_free(res);
442 return ret;
443 }
444
445 submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
446 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
447
448 cmd = vmw_fifo_reserve(dev_priv, submit_size);
449 if (unlikely(cmd == NULL)) {
450 DRM_ERROR("Fifo reserve failed for create surface.\n");
451 vmw_resource_unreference(&res);
452 return -ENOMEM;
453 }
454
455 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
456 cmd->header.size = cpu_to_le32(cmd_len);
457 cmd->body.sid = cpu_to_le32(res->id);
458 cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
459 cmd->body.format = cpu_to_le32(srf->format);
460 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
461 cmd->body.face[i].numMipLevels =
462 cpu_to_le32(srf->mip_levels[i]);
463 }
464
465 cmd += 1;
466 cmd_size = (SVGA3dSize *) cmd;
467 src_size = srf->sizes;
468
469 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
470 cmd_size->width = cpu_to_le32(src_size->width);
471 cmd_size->height = cpu_to_le32(src_size->height);
472 cmd_size->depth = cpu_to_le32(src_size->depth);
473 }
474
475 vmw_fifo_commit(dev_priv, submit_size);
476 vmw_resource_activate(res, vmw_hw_surface_destroy);
477 return 0;
478}
479
480static void vmw_user_surface_free(struct vmw_resource *res)
481{
482 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
483 struct vmw_user_surface *user_srf =
484 container_of(srf, struct vmw_user_surface, srf);
485
486 kfree(srf->sizes);
487 kfree(srf->snooper.image);
488 kfree(user_srf);
489}
490
491int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
492 struct ttm_object_file *tfile,
493 uint32_t handle, struct vmw_surface **out)
494{
495 struct vmw_resource *res;
496 struct vmw_surface *srf;
497 struct vmw_user_surface *user_srf;
498 struct ttm_base_object *base;
499 int ret = -EINVAL;
500
501 base = ttm_base_object_lookup(tfile, handle);
502 if (unlikely(base == NULL))
503 return -EINVAL;
504
505 if (unlikely(base->object_type != VMW_RES_SURFACE))
506 goto out_bad_resource;
507
508 user_srf = container_of(base, struct vmw_user_surface, base);
509 srf = &user_srf->srf;
510 res = &srf->res;
511
512 read_lock(&dev_priv->resource_lock);
513
514 if (!res->avail || res->res_free != &vmw_user_surface_free) {
515 read_unlock(&dev_priv->resource_lock);
516 goto out_bad_resource;
517 }
518
519 kref_get(&res->kref);
520 read_unlock(&dev_priv->resource_lock);
521
522 *out = srf;
523 ret = 0;
524
525out_bad_resource:
526 ttm_base_object_unref(&base);
527
528 return ret;
529}
530
531static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
532{
533 struct ttm_base_object *base = *p_base;
534 struct vmw_user_surface *user_srf =
535 container_of(base, struct vmw_user_surface, base);
536 struct vmw_resource *res = &user_srf->srf.res;
537
538 *p_base = NULL;
539 vmw_resource_unreference(&res);
540}
541
542int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
543 struct drm_file *file_priv)
544{
545 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
546 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
547
548 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
549}
550
551int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
552 struct drm_file *file_priv)
553{
554 struct vmw_private *dev_priv = vmw_priv(dev);
555 struct vmw_user_surface *user_srf =
556 kmalloc(sizeof(*user_srf), GFP_KERNEL);
557 struct vmw_surface *srf;
558 struct vmw_resource *res;
559 struct vmw_resource *tmp;
560 union drm_vmw_surface_create_arg *arg =
561 (union drm_vmw_surface_create_arg *)data;
562 struct drm_vmw_surface_create_req *req = &arg->req;
563 struct drm_vmw_surface_arg *rep = &arg->rep;
564 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
565 struct drm_vmw_size __user *user_sizes;
566 int ret;
567 int i;
568
569 if (unlikely(user_srf == NULL))
570 return -ENOMEM;
571
572 srf = &user_srf->srf;
573 res = &srf->res;
574
575 srf->flags = req->flags;
576 srf->format = req->format;
577 srf->scanout = req->scanout;
578 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
579 srf->num_sizes = 0;
580 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
581 srf->num_sizes += srf->mip_levels[i];
582
583 if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
584 DRM_VMW_MAX_MIP_LEVELS) {
585 ret = -EINVAL;
586 goto out_err0;
587 }
588
589 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
590 if (unlikely(srf->sizes == NULL)) {
591 ret = -ENOMEM;
592 goto out_err0;
593 }
594
595 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
596 req->size_addr;
597
598 ret = copy_from_user(srf->sizes, user_sizes,
599 srf->num_sizes * sizeof(*srf->sizes));
600 if (unlikely(ret != 0))
601 goto out_err1;
602
603 if (srf->scanout &&
604 srf->num_sizes == 1 &&
605 srf->sizes[0].width == 64 &&
606 srf->sizes[0].height == 64 &&
607 srf->format == SVGA3D_A8R8G8B8) {
608
609 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
610 /* clear the image */
611 if (srf->snooper.image) {
612 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
613 } else {
614 DRM_ERROR("Failed to allocate cursor_image\n");
615 ret = -ENOMEM;
616 goto out_err1;
617 }
618 } else {
619 srf->snooper.image = NULL;
620 }
621 srf->snooper.crtc = NULL;
622
623 user_srf->base.shareable = false;
624 user_srf->base.tfile = NULL;
625
626 /**
627 * From this point, the generic resource management functions
628 * destroy the object on failure.
629 */
630
631 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
632 if (unlikely(ret != 0))
633 return ret;
634
635 tmp = vmw_resource_reference(&srf->res);
636 ret = ttm_base_object_init(tfile, &user_srf->base,
637 req->shareable, VMW_RES_SURFACE,
638 &vmw_user_surface_base_release, NULL);
639
640 if (unlikely(ret != 0)) {
641 vmw_resource_unreference(&tmp);
642 vmw_resource_unreference(&res);
643 return ret;
644 }
645
646 rep->sid = user_srf->base.hash.key;
647 if (rep->sid == SVGA3D_INVALID_ID)
648 DRM_ERROR("Created bad Surface ID.\n");
649
650 vmw_resource_unreference(&res);
651 return 0;
652out_err1:
653 kfree(srf->sizes);
654out_err0:
655 kfree(user_srf);
656 return ret;
657}
658
659int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
660 struct drm_file *file_priv)
661{
662 union drm_vmw_surface_reference_arg *arg =
663 (union drm_vmw_surface_reference_arg *)data;
664 struct drm_vmw_surface_arg *req = &arg->req;
665 struct drm_vmw_surface_create_req *rep = &arg->rep;
666 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
667 struct vmw_surface *srf;
668 struct vmw_user_surface *user_srf;
669 struct drm_vmw_size __user *user_sizes;
670 struct ttm_base_object *base;
671 int ret = -EINVAL;
672
673 base = ttm_base_object_lookup(tfile, req->sid);
674 if (unlikely(base == NULL)) {
675 DRM_ERROR("Could not find surface to reference.\n");
676 return -EINVAL;
677 }
678
679 if (unlikely(base->object_type != VMW_RES_SURFACE))
680 goto out_bad_resource;
681
682 user_srf = container_of(base, struct vmw_user_surface, base);
683 srf = &user_srf->srf;
684
685 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
686 if (unlikely(ret != 0)) {
687 DRM_ERROR("Could not add a reference to a surface.\n");
688 goto out_no_reference;
689 }
690
691 rep->flags = srf->flags;
692 rep->format = srf->format;
693 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
694 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
695 rep->size_addr;
696
697 if (user_sizes)
698 ret = copy_to_user(user_sizes, srf->sizes,
699 srf->num_sizes * sizeof(*srf->sizes));
700 if (unlikely(ret != 0))
701 DRM_ERROR("copy_to_user failed %p %u\n",
702 user_sizes, srf->num_sizes);
703out_bad_resource:
704out_no_reference:
705 ttm_base_object_unref(&base);
706
707 return ret;
708}
709
710int vmw_surface_check(struct vmw_private *dev_priv,
711 struct ttm_object_file *tfile,
712 uint32_t handle, int *id)
713{
714 struct ttm_base_object *base;
715 struct vmw_user_surface *user_srf;
716
717 int ret = -EPERM;
718
719 base = ttm_base_object_lookup(tfile, handle);
720 if (unlikely(base == NULL))
721 return -EINVAL;
722
723 if (unlikely(base->object_type != VMW_RES_SURFACE))
724 goto out_bad_surface;
725
726 user_srf = container_of(base, struct vmw_user_surface, base);
727 *id = user_srf->srf.res.id;
728 ret = 0;
729
730out_bad_surface:
731 /**
732 * FIXME: May deadlock here when called from the
733 * command parsing code.
734 */
735
736 ttm_base_object_unref(&base);
737 return ret;
738}
739
740/**
741 * Buffer management.
742 */
743
744static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
745 unsigned long num_pages)
746{
747 static size_t bo_user_size = ~0;
748
749 size_t page_array_size =
750 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
751
752 if (unlikely(bo_user_size == ~0)) {
753 bo_user_size = glob->ttm_bo_extra_size +
754 ttm_round_pot(sizeof(struct vmw_dma_buffer));
755 }
756
757 return bo_user_size + page_array_size;
758}
759
760void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
761{
762 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
763 struct ttm_bo_global *glob = bo->glob;
764 struct vmw_private *dev_priv =
765 container_of(bo->bdev, struct vmw_private, bdev);
766
767 if (vmw_bo->gmr_bound) {
768 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
769 spin_lock(&glob->lru_lock);
770 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
771 spin_unlock(&glob->lru_lock);
772 vmw_bo->gmr_bound = false;
773 }
774}
775
776void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
777{
778 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
779 struct ttm_bo_global *glob = bo->glob;
780
781 vmw_dmabuf_gmr_unbind(bo);
782 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
783 kfree(vmw_bo);
784}
785
786int vmw_dmabuf_init(struct vmw_private *dev_priv,
787 struct vmw_dma_buffer *vmw_bo,
788 size_t size, struct ttm_placement *placement,
789 bool interruptible,
790 void (*bo_free) (struct ttm_buffer_object *bo))
791{
792 struct ttm_bo_device *bdev = &dev_priv->bdev;
793 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
794 size_t acc_size;
795 int ret;
796
797 BUG_ON(!bo_free);
798
799 acc_size =
800 vmw_dmabuf_acc_size(bdev->glob,
801 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
802
803 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
804 if (unlikely(ret != 0)) {
805 /* we must free the bo here as
806 * ttm_buffer_object_init does so as well */
807 bo_free(&vmw_bo->base);
808 return ret;
809 }
810
811 memset(vmw_bo, 0, sizeof(*vmw_bo));
812
813 INIT_LIST_HEAD(&vmw_bo->gmr_lru);
814 INIT_LIST_HEAD(&vmw_bo->validate_list);
815 vmw_bo->gmr_id = 0;
816 vmw_bo->gmr_bound = false;
817
818 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
819 ttm_bo_type_device, placement,
820 0, 0, interruptible,
821 NULL, acc_size, bo_free);
822 return ret;
823}
824
825static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
826{
827 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
828 struct ttm_bo_global *glob = bo->glob;
829
830 vmw_dmabuf_gmr_unbind(bo);
831 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
832 kfree(vmw_user_bo);
833}
834
835static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
836{
837 struct vmw_user_dma_buffer *vmw_user_bo;
838 struct ttm_base_object *base = *p_base;
839 struct ttm_buffer_object *bo;
840
841 *p_base = NULL;
842
843 if (unlikely(base == NULL))
844 return;
845
846 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
847 bo = &vmw_user_bo->dma.base;
848 ttm_bo_unref(&bo);
849}
850
851int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
852 struct drm_file *file_priv)
853{
854 struct vmw_private *dev_priv = vmw_priv(dev);
855 union drm_vmw_alloc_dmabuf_arg *arg =
856 (union drm_vmw_alloc_dmabuf_arg *)data;
857 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
858 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
859 struct vmw_user_dma_buffer *vmw_user_bo;
860 struct ttm_buffer_object *tmp;
861 struct vmw_master *vmaster = vmw_master(file_priv->master);
862 int ret;
863
864 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
865 if (unlikely(vmw_user_bo == NULL))
866 return -ENOMEM;
867
868 ret = ttm_read_lock(&vmaster->lock, true);
869 if (unlikely(ret != 0)) {
870 kfree(vmw_user_bo);
871 return ret;
872 }
873
874 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
875 &vmw_vram_sys_placement, true,
876 &vmw_user_dmabuf_destroy);
877 if (unlikely(ret != 0))
878 return ret;
879
880 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
881 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
882 &vmw_user_bo->base,
883 false,
884 ttm_buffer_type,
885 &vmw_user_dmabuf_release, NULL);
886 if (unlikely(ret != 0)) {
887 ttm_bo_unref(&tmp);
888 } else {
889 rep->handle = vmw_user_bo->base.hash.key;
890 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
891 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
892 rep->cur_gmr_offset = 0;
893 }
894 ttm_bo_unref(&tmp);
895
896 ttm_read_unlock(&vmaster->lock);
897
898 return 0;
899}
900
901int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
902 struct drm_file *file_priv)
903{
904 struct drm_vmw_unref_dmabuf_arg *arg =
905 (struct drm_vmw_unref_dmabuf_arg *)data;
906
907 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
908 arg->handle,
909 TTM_REF_USAGE);
910}
911
912uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
913 uint32_t cur_validate_node)
914{
915 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
916
917 if (likely(vmw_bo->on_validate_list))
918 return vmw_bo->cur_validate_node;
919
920 vmw_bo->cur_validate_node = cur_validate_node;
921 vmw_bo->on_validate_list = true;
922
923 return cur_validate_node;
924}
925
926void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
927{
928 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
929
930 vmw_bo->on_validate_list = false;
931}
932
933uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
934{
935 struct vmw_dma_buffer *vmw_bo;
936
937 if (bo->mem.mem_type == TTM_PL_VRAM)
938 return SVGA_GMR_FRAMEBUFFER;
939
940 vmw_bo = vmw_dma_buffer(bo);
941
942 return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
943}
944
945void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
946{
947 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
948 vmw_bo->gmr_bound = true;
949 vmw_bo->gmr_id = id;
950}
951
952int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
953 uint32_t handle, struct vmw_dma_buffer **out)
954{
955 struct vmw_user_dma_buffer *vmw_user_bo;
956 struct ttm_base_object *base;
957
958 base = ttm_base_object_lookup(tfile, handle);
959 if (unlikely(base == NULL)) {
960 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
961 (unsigned long)handle);
962 return -ESRCH;
963 }
964
965 if (unlikely(base->object_type != ttm_buffer_type)) {
966 ttm_base_object_unref(&base);
967 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
968 (unsigned long)handle);
969 return -EINVAL;
970 }
971
972 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
973 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
974 ttm_base_object_unref(&base);
975 *out = &vmw_user_bo->dma;
976
977 return 0;
978}
979
980/**
981 * TODO: Implement a gmr id eviction mechanism. Currently we just fail
982 * when we're out of ids, causing GMR space to be allocated
983 * out of VRAM.
984 */
985
986int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
987{
988 struct ttm_bo_global *glob = dev_priv->bdev.glob;
989 int id;
990 int ret;
991
992 do {
993 if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
994 return -ENOMEM;
995
996 spin_lock(&glob->lru_lock);
997 ret = ida_get_new(&dev_priv->gmr_ida, &id);
998 spin_unlock(&glob->lru_lock);
999 } while (ret == -EAGAIN);
1000
1001 if (unlikely(ret != 0))
1002 return ret;
1003
1004 if (unlikely(id >= dev_priv->max_gmr_ids)) {
1005 spin_lock(&glob->lru_lock);
1006 ida_remove(&dev_priv->gmr_ida, id);
1007 spin_unlock(&glob->lru_lock);
1008 return -EBUSY;
1009 }
1010
1011 *p_id = (uint32_t) id;
1012 return 0;
1013}
1014
1015/*
1016 * Stream managment
1017 */
1018
1019static void vmw_stream_destroy(struct vmw_resource *res)
1020{
1021 struct vmw_private *dev_priv = res->dev_priv;
1022 struct vmw_stream *stream;
1023 int ret;
1024
1025 DRM_INFO("%s: unref\n", __func__);
1026 stream = container_of(res, struct vmw_stream, res);
1027
1028 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1029 WARN_ON(ret != 0);
1030}
1031
1032static int vmw_stream_init(struct vmw_private *dev_priv,
1033 struct vmw_stream *stream,
1034 void (*res_free) (struct vmw_resource *res))
1035{
1036 struct vmw_resource *res = &stream->res;
1037 int ret;
1038
1039 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1040 VMW_RES_STREAM, res_free);
1041
1042 if (unlikely(ret != 0)) {
1043 if (res_free == NULL)
1044 kfree(stream);
1045 else
1046 res_free(&stream->res);
1047 return ret;
1048 }
1049
1050 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1051 if (ret) {
1052 vmw_resource_unreference(&res);
1053 return ret;
1054 }
1055
1056 DRM_INFO("%s: claimed\n", __func__);
1057
1058 vmw_resource_activate(&stream->res, vmw_stream_destroy);
1059 return 0;
1060}
1061
1062/**
1063 * User-space context management:
1064 */
1065
1066static void vmw_user_stream_free(struct vmw_resource *res)
1067{
1068 struct vmw_user_stream *stream =
1069 container_of(res, struct vmw_user_stream, stream.res);
1070
1071 kfree(stream);
1072}
1073
1074/**
1075 * This function is called when user space has no more references on the
1076 * base object. It releases the base-object's reference on the resource object.
1077 */
1078
1079static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1080{
1081 struct ttm_base_object *base = *p_base;
1082 struct vmw_user_stream *stream =
1083 container_of(base, struct vmw_user_stream, base);
1084 struct vmw_resource *res = &stream->stream.res;
1085
1086 *p_base = NULL;
1087 vmw_resource_unreference(&res);
1088}
1089
1090int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1091 struct drm_file *file_priv)
1092{
1093 struct vmw_private *dev_priv = vmw_priv(dev);
1094 struct vmw_resource *res;
1095 struct vmw_user_stream *stream;
1096 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1097 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1098 int ret = 0;
1099
1100 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1101 if (unlikely(res == NULL))
1102 return -EINVAL;
1103
1104 if (res->res_free != &vmw_user_stream_free) {
1105 ret = -EINVAL;
1106 goto out;
1107 }
1108
1109 stream = container_of(res, struct vmw_user_stream, stream.res);
1110 if (stream->base.tfile != tfile) {
1111 ret = -EINVAL;
1112 goto out;
1113 }
1114
1115 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1116out:
1117 vmw_resource_unreference(&res);
1118 return ret;
1119}
1120
1121int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1122 struct drm_file *file_priv)
1123{
1124 struct vmw_private *dev_priv = vmw_priv(dev);
1125 struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1126 struct vmw_resource *res;
1127 struct vmw_resource *tmp;
1128 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1129 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1130 int ret;
1131
1132 if (unlikely(stream == NULL))
1133 return -ENOMEM;
1134
1135 res = &stream->stream.res;
1136 stream->base.shareable = false;
1137 stream->base.tfile = NULL;
1138
1139 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1140 if (unlikely(ret != 0))
1141 return ret;
1142
1143 tmp = vmw_resource_reference(res);
1144 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1145 &vmw_user_stream_base_release, NULL);
1146
1147 if (unlikely(ret != 0)) {
1148 vmw_resource_unreference(&tmp);
1149 goto out_err;
1150 }
1151
1152 arg->stream_id = res->id;
1153out_err:
1154 vmw_resource_unreference(&res);
1155 return ret;
1156}
1157
1158int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1159 struct ttm_object_file *tfile,
1160 uint32_t *inout_id, struct vmw_resource **out)
1161{
1162 struct vmw_user_stream *stream;
1163 struct vmw_resource *res;
1164 int ret;
1165
1166 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1167 if (unlikely(res == NULL))
1168 return -EINVAL;
1169
1170 if (res->res_free != &vmw_user_stream_free) {
1171 ret = -EINVAL;
1172 goto err_ref;
1173 }
1174
1175 stream = container_of(res, struct vmw_user_stream, stream.res);
1176 if (stream->base.tfile != tfile) {
1177 ret = -EPERM;
1178 goto err_ref;
1179 }
1180
1181 *inout_id = stream->stream.stream_id;
1182 *out = res;
1183 return 0;
1184err_ref:
1185 vmw_resource_unreference(&res);
1186 return ret;
1187}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
new file mode 100644
index 000000000000..e3df4adfb4d8
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -0,0 +1,99 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "drmP.h"
29#include "vmwgfx_drv.h"
30
31int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
32{
33 struct drm_file *file_priv;
34 struct vmw_private *dev_priv;
35
36 if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
37 if (vmw_fifo_mmap(filp, vma) == 0)
38 return 0;
39 return drm_mmap(filp, vma);
40 }
41
42 file_priv = (struct drm_file *)filp->private_data;
43 dev_priv = vmw_priv(file_priv->minor->dev);
44 return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
45}
46
47static int vmw_ttm_mem_global_init(struct ttm_global_reference *ref)
48{
49 DRM_INFO("global init.\n");
50 return ttm_mem_global_init(ref->object);
51}
52
53static void vmw_ttm_mem_global_release(struct ttm_global_reference *ref)
54{
55 ttm_mem_global_release(ref->object);
56}
57
58int vmw_ttm_global_init(struct vmw_private *dev_priv)
59{
60 struct ttm_global_reference *global_ref;
61 int ret;
62
63 global_ref = &dev_priv->mem_global_ref;
64 global_ref->global_type = TTM_GLOBAL_TTM_MEM;
65 global_ref->size = sizeof(struct ttm_mem_global);
66 global_ref->init = &vmw_ttm_mem_global_init;
67 global_ref->release = &vmw_ttm_mem_global_release;
68
69 ret = ttm_global_item_ref(global_ref);
70 if (unlikely(ret != 0)) {
71 DRM_ERROR("Failed setting up TTM memory accounting.\n");
72 return ret;
73 }
74
75 dev_priv->bo_global_ref.mem_glob =
76 dev_priv->mem_global_ref.object;
77 global_ref = &dev_priv->bo_global_ref.ref;
78 global_ref->global_type = TTM_GLOBAL_TTM_BO;
79 global_ref->size = sizeof(struct ttm_bo_global);
80 global_ref->init = &ttm_bo_global_init;
81 global_ref->release = &ttm_bo_global_release;
82 ret = ttm_global_item_ref(global_ref);
83
84 if (unlikely(ret != 0)) {
85 DRM_ERROR("Failed setting up TTM buffer objects.\n");
86 goto out_no_bo;
87 }
88
89 return 0;
90out_no_bo:
91 ttm_global_item_unref(&dev_priv->mem_global_ref);
92 return ret;
93}
94
95void vmw_ttm_global_release(struct vmw_private *dev_priv)
96{
97 ttm_global_item_unref(&dev_priv->bo_global_ref.ref);
98 ttm_global_item_unref(&dev_priv->mem_global_ref);
99}