aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-08-17 02:03:48 -0400
committerDave Airlie <airlied@redhat.com>2015-08-17 02:03:48 -0400
commit294947a5c7f6d228b70fcc51a89527e74a38a2c5 (patch)
treed0f79f3978a1a129e164ce576a771e00550fb985
parent6406e45cc6f4976ace2b6d23b76bb5f07541e68f (diff)
parent54fbde8a94a8a78547597215c9e4be590d075ee0 (diff)
Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next
A couple of fixes from the previous pull request as well as gl3 support. There is one drm core change, an export of a previously private function. Take 2 implementing screen targets, this time with the fbdev code adjusted accordingly. Also there is an implementation of register-driven command buffers, that overrides the FIFO ring for command processing. It's needed for our upcoming hardware revision. * 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux: (35 commits) drm/vmwgfx: Fix copyright headers drm/vmwgfx: Add DX query support. Various fixes. drm/vmwgfx: Add command parser support for a couple of DX commands drm/vmwgfx: Command parser fixes for DX drm/vmwgfx: Initial DX support drm/vmwgfx: Update device includes for DX device functionality drm: export the DRM permission check code drm/vmwgfx: Fix crash when unloading vmwgfx v2 drm/vmwgfx: Fix framebuffer creation on older hardware drm/vmwgfx: Fixed topology boundary checking for Screen Targets drm/vmwgfx: Fix an uninitialized value drm/vmwgfx: Fix compiler warning with 32-bit dma_addr_t drm/vmwgfx: Kill a bunch of sparse warnings drm/vmwgfx: Fix kms preferred mode sorting drm/vmwgfx: Reinstate the legacy display system dirty callback drm/vmwgfx: Implement fbdev on kms v2 drm/vmwgfx: Add a kernel interface to create a framebuffer v2 drm/vmwgfx: Avoid cmdbuf alloc sleeping if !TASK_RUNNING drm/vmwgfx: Convert screen targets to new helpers v3 drm/vmwgfx: Convert screen objects to the new helpers ...
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/includeCheck.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h110
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h2071
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h457
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h1487
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h99
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h50
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h1204
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h1633
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_escape.h (renamed from drivers/gpu/drm/vmwgfx/svga_escape.h)2
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h (renamed from drivers/gpu/drm/vmwgfx/svga_overlay.h)10
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h (renamed from drivers/gpu/drm/vmwgfx/svga_reg.h)664
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_types.h46
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h21
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h2627
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h912
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_types.h45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c1294
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h209
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c1303
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c784
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c662
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c184
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c498
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h335
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1935
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c560
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c145
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c47
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1651
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h192
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c49
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c212
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c277
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c556
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c500
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c555
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.h160
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c1266
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c309
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c2
-rw-r--r--include/drm/drmP.h1
-rw-r--r--include/uapi/drm/vmwgfx_drm.h38
56 files changed, 19174 insertions, 6171 deletions
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index b1d303fa2327..9a860ca1e9d7 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -480,7 +480,7 @@ static int drm_version(struct drm_device *dev, void *data,
480 * indicated permissions. If so, returns zero. Otherwise returns an 480 * indicated permissions. If so, returns zero. Otherwise returns an
481 * error code suitable for ioctl return. 481 * error code suitable for ioctl return.
482 */ 482 */
483static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) 483int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
484{ 484{
485 /* ROOT_ONLY is only for CAP_SYS_ADMIN */ 485 /* ROOT_ONLY is only for CAP_SYS_ADMIN */
486 if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN))) 486 if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
@@ -508,6 +508,7 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
508 508
509 return 0; 509 return 0;
510} 510}
511EXPORT_SYMBOL(drm_ioctl_permit);
511 512
512#define DRM_IOCTL_DEF(ioctl, _func, _flags) \ 513#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
513 [DRM_IOCTL_NR(ioctl)] = { \ 514 [DRM_IOCTL_NR(ioctl)] = { \
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index ce0ab951f507..d281575bbe11 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -7,6 +7,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ 7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ 8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
9 vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ 9 vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
10 vmwgfx_cmdbuf_res.o \ 10 vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
11 vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o
11 12
12obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o 13obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
new file mode 100644
index 000000000000..8cce7f15b6eb
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
@@ -0,0 +1,3 @@
1/*
2 * Intentionally empty file.
3 */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
new file mode 100644
index 000000000000..9ce2466a5d00
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
@@ -0,0 +1,110 @@
1/**********************************************************
2 * Copyright 2007-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_caps.h --
28 *
29 * Definitions for SVGA3D hardware capabilities. Capabilities
30 * are used to query for optional rendering features during
31 * driver initialization. The capability data is stored as very
32 * basic key/value dictionary within the "FIFO register" memory
33 * area at the beginning of BAR2.
34 *
35 * Note that these definitions are only for 3D capabilities.
36 * The SVGA device also has "device capabilities" and "FIFO
37 * capabilities", which are non-3D-specific and are stored as
38 * bitfields rather than key/value pairs.
39 */
40
41#ifndef _SVGA3D_CAPS_H_
42#define _SVGA3D_CAPS_H_
43
44#define INCLUDE_ALLOW_MODULE
45#define INCLUDE_ALLOW_USERLEVEL
46
47#include "includeCheck.h"
48
49#include "svga_reg.h"
50
51#define SVGA_FIFO_3D_CAPS_SIZE (SVGA_FIFO_3D_CAPS_LAST - \
52 SVGA_FIFO_3D_CAPS + 1)
53
54
55/*
56 * SVGA3dCapsRecordType
57 *
58 * Record types that can be found in the caps block.
59 * Related record types are grouped together numerically so that
60 * SVGA3dCaps_FindRecord() can be applied on a range of record
61 * types.
62 */
63
64typedef enum {
65 SVGA3DCAPS_RECORD_UNKNOWN = 0,
66 SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
67 SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
68 SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
69} SVGA3dCapsRecordType;
70
71
72/*
73 * SVGA3dCapsRecordHeader
74 *
75 * Header field leading each caps block record. Contains the offset (in
76 * register words, NOT bytes) to the next caps block record (or the end
77 * of caps block records which will be a zero word) and the record type
78 * as defined above.
79 */
80
81typedef
82#include "vmware_pack_begin.h"
83struct SVGA3dCapsRecordHeader {
84 uint32 length;
85 SVGA3dCapsRecordType type;
86}
87#include "vmware_pack_end.h"
88SVGA3dCapsRecordHeader;
89
90
91/*
92 * SVGA3dCapsRecord
93 *
94 * Caps block record; "data" is a placeholder for the actual data structure
95 * contained within the record;
96 */
97
98typedef
99#include "vmware_pack_begin.h"
100struct SVGA3dCapsRecord {
101 SVGA3dCapsRecordHeader header;
102 uint32 data[1];
103}
104#include "vmware_pack_end.h"
105SVGA3dCapsRecord;
106
107
108typedef uint32 SVGA3dCapPair[2];
109
110#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
new file mode 100644
index 000000000000..2dfd57c5f463
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -0,0 +1,2071 @@
1/**********************************************************
2 * Copyright 1998-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_cmd.h --
28 *
29 * SVGA 3d hardware cmd definitions
30 */
31
32#ifndef _SVGA3D_CMD_H_
33#define _SVGA3D_CMD_H_
34
35#define INCLUDE_ALLOW_MODULE
36#define INCLUDE_ALLOW_USERLEVEL
37#define INCLUDE_ALLOW_VMCORE
38
39#include "includeCheck.h"
40#include "svga3d_types.h"
41
42/*
43 * Identifiers for commands in the command FIFO.
44 *
45 * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
46 * the SVGA3D protocol and remain reserved; they should not be used in the
47 * future.
48 *
49 * IDs between 1040 and 1999 (inclusive) are available for use by the
50 * current SVGA3D protocol.
51 *
52 * FIFO clients other than SVGA3D should stay below 1000, or at 2000
53 * and up.
54 */
55
56typedef enum {
57 SVGA_3D_CMD_LEGACY_BASE = 1000,
58 SVGA_3D_CMD_BASE = 1040,
59
60 SVGA_3D_CMD_SURFACE_DEFINE = 1040,
61 SVGA_3D_CMD_SURFACE_DESTROY = 1041,
62 SVGA_3D_CMD_SURFACE_COPY = 1042,
63 SVGA_3D_CMD_SURFACE_STRETCHBLT = 1043,
64 SVGA_3D_CMD_SURFACE_DMA = 1044,
65 SVGA_3D_CMD_CONTEXT_DEFINE = 1045,
66 SVGA_3D_CMD_CONTEXT_DESTROY = 1046,
67 SVGA_3D_CMD_SETTRANSFORM = 1047,
68 SVGA_3D_CMD_SETZRANGE = 1048,
69 SVGA_3D_CMD_SETRENDERSTATE = 1049,
70 SVGA_3D_CMD_SETRENDERTARGET = 1050,
71 SVGA_3D_CMD_SETTEXTURESTATE = 1051,
72 SVGA_3D_CMD_SETMATERIAL = 1052,
73 SVGA_3D_CMD_SETLIGHTDATA = 1053,
74 SVGA_3D_CMD_SETLIGHTENABLED = 1054,
75 SVGA_3D_CMD_SETVIEWPORT = 1055,
76 SVGA_3D_CMD_SETCLIPPLANE = 1056,
77 SVGA_3D_CMD_CLEAR = 1057,
78 SVGA_3D_CMD_PRESENT = 1058,
79 SVGA_3D_CMD_SHADER_DEFINE = 1059,
80 SVGA_3D_CMD_SHADER_DESTROY = 1060,
81 SVGA_3D_CMD_SET_SHADER = 1061,
82 SVGA_3D_CMD_SET_SHADER_CONST = 1062,
83 SVGA_3D_CMD_DRAW_PRIMITIVES = 1063,
84 SVGA_3D_CMD_SETSCISSORRECT = 1064,
85 SVGA_3D_CMD_BEGIN_QUERY = 1065,
86 SVGA_3D_CMD_END_QUERY = 1066,
87 SVGA_3D_CMD_WAIT_FOR_QUERY = 1067,
88 SVGA_3D_CMD_PRESENT_READBACK = 1068,
89 SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN = 1069,
90 SVGA_3D_CMD_SURFACE_DEFINE_V2 = 1070,
91 SVGA_3D_CMD_GENERATE_MIPMAPS = 1071,
92 SVGA_3D_CMD_VIDEO_CREATE_DECODER = 1072,
93 SVGA_3D_CMD_VIDEO_DESTROY_DECODER = 1073,
94 SVGA_3D_CMD_VIDEO_CREATE_PROCESSOR = 1074,
95 SVGA_3D_CMD_VIDEO_DESTROY_PROCESSOR = 1075,
96 SVGA_3D_CMD_VIDEO_DECODE_START_FRAME = 1076,
97 SVGA_3D_CMD_VIDEO_DECODE_RENDER = 1077,
98 SVGA_3D_CMD_VIDEO_DECODE_END_FRAME = 1078,
99 SVGA_3D_CMD_VIDEO_PROCESS_FRAME = 1079,
100 SVGA_3D_CMD_ACTIVATE_SURFACE = 1080,
101 SVGA_3D_CMD_DEACTIVATE_SURFACE = 1081,
102 SVGA_3D_CMD_SCREEN_DMA = 1082,
103 SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE = 1083,
104 SVGA_3D_CMD_OPEN_CONTEXT_SURFACE = 1084,
105
106 SVGA_3D_CMD_LOGICOPS_BITBLT = 1085,
107 SVGA_3D_CMD_LOGICOPS_TRANSBLT = 1086,
108 SVGA_3D_CMD_LOGICOPS_STRETCHBLT = 1087,
109 SVGA_3D_CMD_LOGICOPS_COLORFILL = 1088,
110 SVGA_3D_CMD_LOGICOPS_ALPHABLEND = 1089,
111 SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND = 1090,
112
113 SVGA_3D_CMD_SET_OTABLE_BASE = 1091,
114 SVGA_3D_CMD_READBACK_OTABLE = 1092,
115
116 SVGA_3D_CMD_DEFINE_GB_MOB = 1093,
117 SVGA_3D_CMD_DESTROY_GB_MOB = 1094,
118 SVGA_3D_CMD_DEAD3 = 1095,
119 SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING = 1096,
120
121 SVGA_3D_CMD_DEFINE_GB_SURFACE = 1097,
122 SVGA_3D_CMD_DESTROY_GB_SURFACE = 1098,
123 SVGA_3D_CMD_BIND_GB_SURFACE = 1099,
124 SVGA_3D_CMD_COND_BIND_GB_SURFACE = 1100,
125 SVGA_3D_CMD_UPDATE_GB_IMAGE = 1101,
126 SVGA_3D_CMD_UPDATE_GB_SURFACE = 1102,
127 SVGA_3D_CMD_READBACK_GB_IMAGE = 1103,
128 SVGA_3D_CMD_READBACK_GB_SURFACE = 1104,
129 SVGA_3D_CMD_INVALIDATE_GB_IMAGE = 1105,
130 SVGA_3D_CMD_INVALIDATE_GB_SURFACE = 1106,
131
132 SVGA_3D_CMD_DEFINE_GB_CONTEXT = 1107,
133 SVGA_3D_CMD_DESTROY_GB_CONTEXT = 1108,
134 SVGA_3D_CMD_BIND_GB_CONTEXT = 1109,
135 SVGA_3D_CMD_READBACK_GB_CONTEXT = 1110,
136 SVGA_3D_CMD_INVALIDATE_GB_CONTEXT = 1111,
137
138 SVGA_3D_CMD_DEFINE_GB_SHADER = 1112,
139 SVGA_3D_CMD_DESTROY_GB_SHADER = 1113,
140 SVGA_3D_CMD_BIND_GB_SHADER = 1114,
141
142 SVGA_3D_CMD_SET_OTABLE_BASE64 = 1115,
143
144 SVGA_3D_CMD_BEGIN_GB_QUERY = 1116,
145 SVGA_3D_CMD_END_GB_QUERY = 1117,
146 SVGA_3D_CMD_WAIT_FOR_GB_QUERY = 1118,
147
148 SVGA_3D_CMD_NOP = 1119,
149
150 SVGA_3D_CMD_ENABLE_GART = 1120,
151 SVGA_3D_CMD_DISABLE_GART = 1121,
152 SVGA_3D_CMD_MAP_MOB_INTO_GART = 1122,
153 SVGA_3D_CMD_UNMAP_GART_RANGE = 1123,
154
155 SVGA_3D_CMD_DEFINE_GB_SCREENTARGET = 1124,
156 SVGA_3D_CMD_DESTROY_GB_SCREENTARGET = 1125,
157 SVGA_3D_CMD_BIND_GB_SCREENTARGET = 1126,
158 SVGA_3D_CMD_UPDATE_GB_SCREENTARGET = 1127,
159
160 SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL = 1128,
161 SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL = 1129,
162
163 SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE = 1130,
164
165 SVGA_3D_CMD_GB_SCREEN_DMA = 1131,
166 SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH = 1132,
167 SVGA_3D_CMD_GB_MOB_FENCE = 1133,
168 SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 = 1134,
169 SVGA_3D_CMD_DEFINE_GB_MOB64 = 1135,
170 SVGA_3D_CMD_REDEFINE_GB_MOB64 = 1136,
171 SVGA_3D_CMD_NOP_ERROR = 1137,
172
173 SVGA_3D_CMD_SET_VERTEX_STREAMS = 1138,
174 SVGA_3D_CMD_SET_VERTEX_DECLS = 1139,
175 SVGA_3D_CMD_SET_VERTEX_DIVISORS = 1140,
176 SVGA_3D_CMD_DRAW = 1141,
177 SVGA_3D_CMD_DRAW_INDEXED = 1142,
178
179 /*
180 * DX10 Commands
181 */
182 SVGA_3D_CMD_DX_MIN = 1143,
183 SVGA_3D_CMD_DX_DEFINE_CONTEXT = 1143,
184 SVGA_3D_CMD_DX_DESTROY_CONTEXT = 1144,
185 SVGA_3D_CMD_DX_BIND_CONTEXT = 1145,
186 SVGA_3D_CMD_DX_READBACK_CONTEXT = 1146,
187 SVGA_3D_CMD_DX_INVALIDATE_CONTEXT = 1147,
188 SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER = 1148,
189 SVGA_3D_CMD_DX_SET_SHADER_RESOURCES = 1149,
190 SVGA_3D_CMD_DX_SET_SHADER = 1150,
191 SVGA_3D_CMD_DX_SET_SAMPLERS = 1151,
192 SVGA_3D_CMD_DX_DRAW = 1152,
193 SVGA_3D_CMD_DX_DRAW_INDEXED = 1153,
194 SVGA_3D_CMD_DX_DRAW_INSTANCED = 1154,
195 SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED = 1155,
196 SVGA_3D_CMD_DX_DRAW_AUTO = 1156,
197 SVGA_3D_CMD_DX_SET_INPUT_LAYOUT = 1157,
198 SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS = 1158,
199 SVGA_3D_CMD_DX_SET_INDEX_BUFFER = 1159,
200 SVGA_3D_CMD_DX_SET_TOPOLOGY = 1160,
201 SVGA_3D_CMD_DX_SET_RENDERTARGETS = 1161,
202 SVGA_3D_CMD_DX_SET_BLEND_STATE = 1162,
203 SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE = 1163,
204 SVGA_3D_CMD_DX_SET_RASTERIZER_STATE = 1164,
205 SVGA_3D_CMD_DX_DEFINE_QUERY = 1165,
206 SVGA_3D_CMD_DX_DESTROY_QUERY = 1166,
207 SVGA_3D_CMD_DX_BIND_QUERY = 1167,
208 SVGA_3D_CMD_DX_SET_QUERY_OFFSET = 1168,
209 SVGA_3D_CMD_DX_BEGIN_QUERY = 1169,
210 SVGA_3D_CMD_DX_END_QUERY = 1170,
211 SVGA_3D_CMD_DX_READBACK_QUERY = 1171,
212 SVGA_3D_CMD_DX_SET_PREDICATION = 1172,
213 SVGA_3D_CMD_DX_SET_SOTARGETS = 1173,
214 SVGA_3D_CMD_DX_SET_VIEWPORTS = 1174,
215 SVGA_3D_CMD_DX_SET_SCISSORRECTS = 1175,
216 SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW = 1176,
217 SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW = 1177,
218 SVGA_3D_CMD_DX_PRED_COPY_REGION = 1178,
219 SVGA_3D_CMD_DX_PRED_COPY = 1179,
220 SVGA_3D_CMD_DX_STRETCHBLT = 1180,
221 SVGA_3D_CMD_DX_GENMIPS = 1181,
222 SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE = 1182,
223 SVGA_3D_CMD_DX_READBACK_SUBRESOURCE = 1183,
224 SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE = 1184,
225 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW = 1185,
226 SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW = 1186,
227 SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW = 1187,
228 SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW = 1188,
229 SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW = 1189,
230 SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW = 1190,
231 SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT = 1191,
232 SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT = 1192,
233 SVGA_3D_CMD_DX_DEFINE_BLEND_STATE = 1193,
234 SVGA_3D_CMD_DX_DESTROY_BLEND_STATE = 1194,
235 SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE = 1195,
236 SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE = 1196,
237 SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE = 1197,
238 SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE = 1198,
239 SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE = 1199,
240 SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE = 1200,
241 SVGA_3D_CMD_DX_DEFINE_SHADER = 1201,
242 SVGA_3D_CMD_DX_DESTROY_SHADER = 1202,
243 SVGA_3D_CMD_DX_BIND_SHADER = 1203,
244 SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT = 1204,
245 SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT = 1205,
246 SVGA_3D_CMD_DX_SET_STREAMOUTPUT = 1206,
247 SVGA_3D_CMD_DX_SET_COTABLE = 1207,
248 SVGA_3D_CMD_DX_READBACK_COTABLE = 1208,
249 SVGA_3D_CMD_DX_BUFFER_COPY = 1209,
250 SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER = 1210,
251 SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK = 1211,
252 SVGA_3D_CMD_DX_MOVE_QUERY = 1212,
253 SVGA_3D_CMD_DX_BIND_ALL_QUERY = 1213,
254 SVGA_3D_CMD_DX_READBACK_ALL_QUERY = 1214,
255 SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER = 1215,
256 SVGA_3D_CMD_DX_MOB_FENCE_64 = 1216,
257 SVGA_3D_CMD_DX_BIND_SHADER_ON_CONTEXT = 1217,
258 SVGA_3D_CMD_DX_HINT = 1218,
259 SVGA_3D_CMD_DX_BUFFER_UPDATE = 1219,
260 SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET = 1220,
261 SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET = 1221,
262 SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET = 1222,
263
264 /*
265 * Reserve some IDs to be used for the DX11 shader types.
266 */
267 SVGA_3D_CMD_DX_RESERVED1 = 1223,
268 SVGA_3D_CMD_DX_RESERVED2 = 1224,
269 SVGA_3D_CMD_DX_RESERVED3 = 1225,
270
271 SVGA_3D_CMD_DX_MAX = 1226,
272 SVGA_3D_CMD_MAX = 1226,
273 SVGA_3D_CMD_FUTURE_MAX = 3000
274} SVGAFifo3dCmdId;
275
276/*
277 * FIFO command format definitions:
278 */
279
280/*
281 * The data size header following cmdNum for every 3d command
282 */
283typedef
284#include "vmware_pack_begin.h"
285struct {
286 uint32 id;
287 uint32 size;
288}
289#include "vmware_pack_end.h"
290SVGA3dCmdHeader;
291
292typedef
293#include "vmware_pack_begin.h"
294struct {
295 uint32 numMipLevels;
296}
297#include "vmware_pack_end.h"
298SVGA3dSurfaceFace;
299
300typedef
301#include "vmware_pack_begin.h"
302struct {
303 uint32 sid;
304 SVGA3dSurfaceFlags surfaceFlags;
305 SVGA3dSurfaceFormat format;
306 /*
307 * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
308 * structures must have the same value of numMipLevels field.
309 * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
310 * numMipLevels set to 0.
311 */
312 SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
313 /*
314 * Followed by an SVGA3dSize structure for each mip level in each face.
315 *
316 * A note on surface sizes: Sizes are always specified in pixels,
317 * even if the true surface size is not a multiple of the minimum
318 * block size of the surface's format. For example, a 3x3x1 DXT1
319 * compressed texture would actually be stored as a 4x4x1 image in
320 * memory.
321 */
322}
323#include "vmware_pack_end.h"
324SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */
325
326typedef
327#include "vmware_pack_begin.h"
328struct {
329 uint32 sid;
330 SVGA3dSurfaceFlags surfaceFlags;
331 SVGA3dSurfaceFormat format;
332 /*
333 * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
334 * structures must have the same value of numMipLevels field.
335 * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
336 * numMipLevels set to 0.
337 */
338 SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
339 uint32 multisampleCount;
340 SVGA3dTextureFilter autogenFilter;
341 /*
342 * Followed by an SVGA3dSize structure for each mip level in each face.
343 *
344 * A note on surface sizes: Sizes are always specified in pixels,
345 * even if the true surface size is not a multiple of the minimum
346 * block size of the surface's format. For example, a 3x3x1 DXT1
347 * compressed texture would actually be stored as a 4x4x1 image in
348 * memory.
349 */
350}
351#include "vmware_pack_end.h"
352SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
353
354typedef
355#include "vmware_pack_begin.h"
356struct {
357 uint32 sid;
358}
359#include "vmware_pack_end.h"
360SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
361
362typedef
363#include "vmware_pack_begin.h"
364struct {
365 uint32 cid;
366}
367#include "vmware_pack_end.h"
368SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */
369
370typedef
371#include "vmware_pack_begin.h"
372struct {
373 uint32 cid;
374}
375#include "vmware_pack_end.h"
376SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */
377
378typedef
379#include "vmware_pack_begin.h"
380struct {
381 uint32 cid;
382 SVGA3dClearFlag clearFlag;
383 uint32 color;
384 float depth;
385 uint32 stencil;
386 /* Followed by variable number of SVGA3dRect structures */
387}
388#include "vmware_pack_end.h"
389SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */
390
391typedef
392#include "vmware_pack_begin.h"
393struct {
394 SVGA3dLightType type;
395 SVGA3dBool inWorldSpace;
396 float diffuse[4];
397 float specular[4];
398 float ambient[4];
399 float position[4];
400 float direction[4];
401 float range;
402 float falloff;
403 float attenuation0;
404 float attenuation1;
405 float attenuation2;
406 float theta;
407 float phi;
408}
409#include "vmware_pack_end.h"
410SVGA3dLightData;
411
412typedef
413#include "vmware_pack_begin.h"
414struct {
415 uint32 sid;
416 /* Followed by variable number of SVGA3dCopyRect structures */
417}
418#include "vmware_pack_end.h"
419SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */
420
421typedef
422#include "vmware_pack_begin.h"
423struct {
424 SVGA3dRenderStateName state;
425 union {
426 uint32 uintValue;
427 float floatValue;
428 };
429}
430#include "vmware_pack_end.h"
431SVGA3dRenderState;
432
433typedef
434#include "vmware_pack_begin.h"
435struct {
436 uint32 cid;
437 /* Followed by variable number of SVGA3dRenderState structures */
438}
439#include "vmware_pack_end.h"
440SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */
441
442typedef
443#include "vmware_pack_begin.h"
444struct {
445 uint32 cid;
446 SVGA3dRenderTargetType type;
447 SVGA3dSurfaceImageId target;
448}
449#include "vmware_pack_end.h"
450SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */
451
452typedef
453#include "vmware_pack_begin.h"
454struct {
455 SVGA3dSurfaceImageId src;
456 SVGA3dSurfaceImageId dest;
457 /* Followed by variable number of SVGA3dCopyBox structures */
458}
459#include "vmware_pack_end.h"
460SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
461
462typedef
463#include "vmware_pack_begin.h"
464struct {
465 SVGA3dSurfaceImageId src;
466 SVGA3dSurfaceImageId dest;
467 SVGA3dBox boxSrc;
468 SVGA3dBox boxDest;
469 SVGA3dStretchBltMode mode;
470}
471#include "vmware_pack_end.h"
472SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
473
474typedef
475#include "vmware_pack_begin.h"
476struct {
477 /*
478 * If the discard flag is present in a surface DMA operation, the host may
479 * discard the contents of the current mipmap level and face of the target
480 * surface before applying the surface DMA contents.
481 */
482 uint32 discard : 1;
483
484 /*
485 * If the unsynchronized flag is present, the host may perform this upload
486 * without syncing to pending reads on this surface.
487 */
488 uint32 unsynchronized : 1;
489
490 /*
491 * Guests *MUST* set the reserved bits to 0 before submitting the command
492 * suffix as future flags may occupy these bits.
493 */
494 uint32 reserved : 30;
495}
496#include "vmware_pack_end.h"
497SVGA3dSurfaceDMAFlags;
498
499typedef
500#include "vmware_pack_begin.h"
501struct {
502 SVGAGuestImage guest;
503 SVGA3dSurfaceImageId host;
504 SVGA3dTransferType transfer;
505 /*
506 * Followed by variable number of SVGA3dCopyBox structures. For consistency
507 * in all clipping logic and coordinate translation, we define the
508 * "source" in each copyBox as the guest image and the
509 * "destination" as the host image, regardless of transfer
510 * direction.
511 *
512 * For efficiency, the SVGA3D device is free to copy more data than
513 * specified. For example, it may round copy boxes outwards such
514 * that they lie on particular alignment boundaries.
515 */
516}
517#include "vmware_pack_end.h"
518SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */
519
520/*
521 * SVGA3dCmdSurfaceDMASuffix --
522 *
523 * This is a command suffix that will appear after a SurfaceDMA command in
524 * the FIFO. It contains some extra information that hosts may use to
525 * optimize performance or protect the guest. This suffix exists to preserve
526 * backwards compatibility while also allowing for new functionality to be
527 * implemented.
528 */
529
530typedef
531#include "vmware_pack_begin.h"
532struct {
533 uint32 suffixSize;
534
535 /*
536 * The maximum offset is used to determine the maximum offset from the
537 * guestPtr base address that will be accessed or written to during this
538 * surfaceDMA. If the suffix is supported, the host will respect this
539 * boundary while performing surface DMAs.
540 *
541 * Defaults to MAX_UINT32
542 */
543 uint32 maximumOffset;
544
545 /*
546 * A set of flags that describes optimizations that the host may perform
547 * while performing this surface DMA operation. The guest should never rely
548 * on behaviour that is different when these flags are set for correctness.
549 *
550 * Defaults to 0
551 */
552 SVGA3dSurfaceDMAFlags flags;
553}
554#include "vmware_pack_end.h"
555SVGA3dCmdSurfaceDMASuffix;
556
557/*
558 * SVGA_3D_CMD_DRAW_PRIMITIVES --
559 *
560 * This command is the SVGA3D device's generic drawing entry point.
561 * It can draw multiple ranges of primitives, optionally using an
562 * index buffer, using an arbitrary collection of vertex buffers.
563 *
564 * Each SVGA3dVertexDecl defines a distinct vertex array to bind
565 * during this draw call. The declarations specify which surface
566 * the vertex data lives in, what that vertex data is used for,
567 * and how to interpret it.
568 *
569 * Each SVGA3dPrimitiveRange defines a collection of primitives
570 * to render using the same vertex arrays. An index buffer is
571 * optional.
572 */
573
574typedef
575#include "vmware_pack_begin.h"
576struct {
577 /*
578 * A range hint is an optional specification for the range of indices
579 * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
580 * that the entire array will be used.
581 *
582 * These are only hints. The SVGA3D device may use them for
583 * performance optimization if possible, but it's also allowed to
584 * ignore these values.
585 */
586 uint32 first;
587 uint32 last;
588}
589#include "vmware_pack_end.h"
590SVGA3dArrayRangeHint;
591
592typedef
593#include "vmware_pack_begin.h"
594struct {
595 /*
596 * Define the origin and shape of a vertex or index array. Both
597 * 'offset' and 'stride' are in bytes. The provided surface will be
598 * reinterpreted as a flat array of bytes in the same format used
599 * by surface DMA operations. To avoid unnecessary conversions, the
600 * surface should be created with the SVGA3D_BUFFER format.
601 *
602 * Index 0 in the array starts 'offset' bytes into the surface.
603 * Index 1 begins at byte 'offset + stride', etc. Array indices may
604 * not be negative.
605 */
606 uint32 surfaceId;
607 uint32 offset;
608 uint32 stride;
609}
610#include "vmware_pack_end.h"
611SVGA3dArray;
612
613typedef
614#include "vmware_pack_begin.h"
615struct {
616 /*
617 * Describe a vertex array's data type, and define how it is to be
618 * used by the fixed function pipeline or the vertex shader. It
619 * isn't useful to have two VertexDecls with the same
620 * VertexArrayIdentity in one draw call.
621 */
622 SVGA3dDeclType type;
623 SVGA3dDeclMethod method;
624 SVGA3dDeclUsage usage;
625 uint32 usageIndex;
626}
627#include "vmware_pack_end.h"
628SVGA3dVertexArrayIdentity;
629
630typedef
631#include "vmware_pack_begin.h"
632struct SVGA3dVertexDecl {
633 SVGA3dVertexArrayIdentity identity;
634 SVGA3dArray array;
635 SVGA3dArrayRangeHint rangeHint;
636}
637#include "vmware_pack_end.h"
638SVGA3dVertexDecl;
639
640typedef
641#include "vmware_pack_begin.h"
642struct SVGA3dPrimitiveRange {
643 /*
644 * Define a group of primitives to render, from sequential indices.
645 *
646 * The value of 'primitiveType' and 'primitiveCount' imply the
647 * total number of vertices that will be rendered.
648 */
649 SVGA3dPrimitiveType primType;
650 uint32 primitiveCount;
651
652 /*
653 * Optional index buffer. If indexArray.surfaceId is
654 * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
655 * without an index buffer is identical to rendering with an index
656 * buffer containing the sequence [0, 1, 2, 3, ...].
657 *
658 * If an index buffer is in use, indexWidth specifies the width in
659 * bytes of each index value. It must be less than or equal to
660 * indexArray.stride.
661 *
662 * (Currently, the SVGA3D device requires index buffers to be tightly
663 * packed. In other words, indexWidth == indexArray.stride)
664 */
665 SVGA3dArray indexArray;
666 uint32 indexWidth;
667
668 /*
669 * Optional index bias. This number is added to all indices from
670 * indexArray before they are used as vertex array indices. This
671 * can be used in multiple ways:
672 *
673 * - When not using an indexArray, this bias can be used to
674 * specify where in the vertex arrays to begin rendering.
675 *
676 * - A positive number here is equivalent to increasing the
677 * offset in each vertex array.
678 *
679 * - A negative number can be used to render using a small
680 * vertex array and an index buffer that contains large
681 * values. This may be used by some applications that
682 * crop a vertex buffer without modifying their index
683 * buffer.
684 *
685 * Note that rendering with a negative bias value may be slower and
686 * use more memory than rendering with a positive or zero bias.
687 */
688 int32 indexBias;
689}
690#include "vmware_pack_end.h"
691SVGA3dPrimitiveRange;
692
693typedef
694#include "vmware_pack_begin.h"
695struct {
696 uint32 cid;
697 uint32 numVertexDecls;
698 uint32 numRanges;
699
700 /*
701 * There are two variable size arrays after the
702 * SVGA3dCmdDrawPrimitives structure. In order,
703 * they are:
704 *
705 * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
706 * SVGA3D_MAX_VERTEX_ARRAYS;
707 * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
708 * SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
709 * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
710 * the frequency divisor for the corresponding vertex decl).
711 */
712}
713#include "vmware_pack_end.h"
714SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
715
716typedef
717#include "vmware_pack_begin.h"
718struct {
719 uint32 cid;
720
721 uint32 primitiveCount; /* How many primitives to render */
722 uint32 startVertexLocation; /* Which vertex do we start rendering at. */
723
724 uint8 primitiveType; /* SVGA3dPrimitiveType */
725 uint8 padding[3];
726}
727#include "vmware_pack_end.h"
728SVGA3dCmdDraw;
729
730typedef
731#include "vmware_pack_begin.h"
732struct {
733 uint32 cid;
734
735 uint8 primitiveType; /* SVGA3dPrimitiveType */
736
737 uint32 indexBufferSid; /* Valid index buffer sid. */
738 uint32 indexBufferOffset; /* Byte offset into the vertex buffer, almost */
739 /* always 0 for DX9 guests, non-zero for OpenGL */
740 /* guests. We can't represent non-multiple of */
741 /* stride offsets in D3D9Renderer... */
742 uint8 indexBufferStride; /* Allowable values = 1, 2, or 4 */
743
744 int32 baseVertexLocation; /* Bias applied to the index when selecting a */
745 /* vertex from the streams, may be negative */
746
747 uint32 primitiveCount; /* How many primitives to render */
748 uint32 pad0;
749 uint16 pad1;
750}
751#include "vmware_pack_end.h"
752SVGA3dCmdDrawIndexed;
753
754typedef
755#include "vmware_pack_begin.h"
756struct {
757 /*
758 * Describe a vertex array's data type, and define how it is to be
759 * used by the fixed function pipeline or the vertex shader. It
760 * isn't useful to have two VertexDecls with the same
761 * VertexArrayIdentity in one draw call.
762 */
763 uint16 streamOffset;
764 uint8 stream;
765 uint8 type; /* SVGA3dDeclType */
766 uint8 method; /* SVGA3dDeclMethod */
767 uint8 usage; /* SVGA3dDeclUsage */
768 uint8 usageIndex;
769 uint8 padding;
770
771}
772#include "vmware_pack_end.h"
773SVGA3dVertexElement;
774
775typedef
776#include "vmware_pack_begin.h"
777struct {
778 uint32 cid;
779
780 uint32 numElements;
781
782 /*
783 * Followed by numElements SVGA3dVertexElement structures.
784 *
785 * If numElements < SVGA3D_MAX_VERTEX_ARRAYS, the remaining elements
786 * are cleared and will not be used by following draws.
787 */
788}
789#include "vmware_pack_end.h"
790SVGA3dCmdSetVertexDecls;
791
792typedef
793#include "vmware_pack_begin.h"
794struct {
795 uint32 sid;
796 uint32 stride;
797 uint32 offset;
798}
799#include "vmware_pack_end.h"
800SVGA3dVertexStream;
801
802typedef
803#include "vmware_pack_begin.h"
804struct {
805 uint32 cid;
806
807 uint32 numStreams;
808 /*
809 * Followed by numStream SVGA3dVertexStream structures.
810 *
811 * If numStreams < SVGA3D_MAX_VERTEX_ARRAYS, the remaining streams
812 * are cleared and will not be used by following draws.
813 */
814}
815#include "vmware_pack_end.h"
816SVGA3dCmdSetVertexStreams;
817
818typedef
819#include "vmware_pack_begin.h"
820struct {
821 uint32 cid;
822 uint32 numDivisors;
823}
824#include "vmware_pack_end.h"
825SVGA3dCmdSetVertexDivisors;
826
827typedef
828#include "vmware_pack_begin.h"
829struct {
830 uint32 stage;
831 SVGA3dTextureStateName name;
832 union {
833 uint32 value;
834 float floatValue;
835 };
836}
837#include "vmware_pack_end.h"
838SVGA3dTextureState;
839
840typedef
841#include "vmware_pack_begin.h"
842struct {
843 uint32 cid;
844 /* Followed by variable number of SVGA3dTextureState structures */
845}
846#include "vmware_pack_end.h"
847SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */
848
849typedef
850#include "vmware_pack_begin.h"
851struct {
852 uint32 cid;
853 SVGA3dTransformType type;
854 float matrix[16];
855}
856#include "vmware_pack_end.h"
857SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */
858
859typedef
860#include "vmware_pack_begin.h"
861struct {
862 float min;
863 float max;
864}
865#include "vmware_pack_end.h"
866SVGA3dZRange;
867
868typedef
869#include "vmware_pack_begin.h"
870struct {
871 uint32 cid;
872 SVGA3dZRange zRange;
873}
874#include "vmware_pack_end.h"
875SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */
876
877typedef
878#include "vmware_pack_begin.h"
879struct {
880 float diffuse[4];
881 float ambient[4];
882 float specular[4];
883 float emissive[4];
884 float shininess;
885}
886#include "vmware_pack_end.h"
887SVGA3dMaterial;
888
889typedef
890#include "vmware_pack_begin.h"
891struct {
892 uint32 cid;
893 SVGA3dFace face;
894 SVGA3dMaterial material;
895}
896#include "vmware_pack_end.h"
897SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */
898
899typedef
900#include "vmware_pack_begin.h"
901struct {
902 uint32 cid;
903 uint32 index;
904 SVGA3dLightData data;
905}
906#include "vmware_pack_end.h"
907SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */
908
909typedef
910#include "vmware_pack_begin.h"
911struct {
912 uint32 cid;
913 uint32 index;
914 uint32 enabled;
915}
916#include "vmware_pack_end.h"
917SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */
918
919typedef
920#include "vmware_pack_begin.h"
921struct {
922 uint32 cid;
923 SVGA3dRect rect;
924}
925#include "vmware_pack_end.h"
926SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */
927
928typedef
929#include "vmware_pack_begin.h"
930struct {
931 uint32 cid;
932 SVGA3dRect rect;
933}
934#include "vmware_pack_end.h"
935SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */
936
937typedef
938#include "vmware_pack_begin.h"
939struct {
940 uint32 cid;
941 uint32 index;
942 float plane[4];
943}
944#include "vmware_pack_end.h"
945SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */
946
947typedef
948#include "vmware_pack_begin.h"
949struct {
950 uint32 cid;
951 uint32 shid;
952 SVGA3dShaderType type;
953 /* Followed by variable number of DWORDs for shader bycode */
954}
955#include "vmware_pack_end.h"
956SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */
957
958typedef
959#include "vmware_pack_begin.h"
960struct {
961 uint32 cid;
962 uint32 shid;
963 SVGA3dShaderType type;
964}
965#include "vmware_pack_end.h"
966SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */
967
968typedef
969#include "vmware_pack_begin.h"
970struct {
971 uint32 cid;
972 uint32 reg; /* register number */
973 SVGA3dShaderType type;
974 SVGA3dShaderConstType ctype;
975 uint32 values[4];
976
977 /*
978 * Followed by a variable number of additional values.
979 */
980}
981#include "vmware_pack_end.h"
982SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */
983
984typedef
985#include "vmware_pack_begin.h"
986struct {
987 uint32 cid;
988 SVGA3dShaderType type;
989 uint32 shid;
990}
991#include "vmware_pack_end.h"
992SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */
993
994typedef
995#include "vmware_pack_begin.h"
996struct {
997 uint32 cid;
998 SVGA3dQueryType type;
999}
1000#include "vmware_pack_end.h"
1001SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */
1002
1003typedef
1004#include "vmware_pack_begin.h"
1005struct {
1006 uint32 cid;
1007 SVGA3dQueryType type;
1008 SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */
1009}
1010#include "vmware_pack_end.h"
1011SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */
1012
1013
1014/*
1015 * SVGA3D_CMD_WAIT_FOR_QUERY --
1016 *
1017 * Will read the SVGA3dQueryResult structure pointed to by guestResult,
1018 * and if the state member is set to anything else than
1019 * SVGA3D_QUERYSTATE_PENDING, this command will always be a no-op.
1020 *
1021 * Otherwise, in addition to the query explicitly waited for,
1022 * All queries with the same type and issued with the same cid, for which
1023 * an SVGA_3D_CMD_END_QUERY command has previously been sent, will
1024 * be finished after execution of this command.
1025 *
1026 * A query will be identified by the gmrId and offset of the guestResult
1027 * member. If the device can't find an SVGA_3D_CMD_END_QUERY that has
1028 * been sent previously with an indentical gmrId and offset, it will
1029 * effectively end all queries with an identical type issued with the
1030 * same cid, and the SVGA3dQueryResult structure pointed to by
1031 * guestResult will not be written to. This property can be used to
1032 * implement a query barrier for a given cid and query type.
1033 */
1034
1035typedef
1036#include "vmware_pack_begin.h"
1037struct {
1038 uint32 cid; /* Same parameters passed to END_QUERY */
1039 SVGA3dQueryType type;
1040 SVGAGuestPtr guestResult;
1041}
1042#include "vmware_pack_end.h"
1043SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */
1044
1045typedef
1046#include "vmware_pack_begin.h"
1047struct {
1048 uint32 totalSize; /* Set by guest before query is ended. */
1049 SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */
1050 union { /* Set by host on exit from PENDING state */
1051 uint32 result32;
1052 uint32 queryCookie; /* May be used to identify which QueryGetData this
1053 result corresponds to. */
1054 };
1055}
1056#include "vmware_pack_end.h"
1057SVGA3dQueryResult;
1058
1059
1060/*
1061 * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
1062 *
1063 * This is a blit from an SVGA3D surface to a Screen Object.
1064 * This blit must be directed at a specific screen.
1065 *
1066 * The blit copies from a rectangular region of an SVGA3D surface
1067 * image to a rectangular region of a screen.
1068 *
1069 * This command takes an optional variable-length list of clipping
1070 * rectangles after the body of the command. If no rectangles are
1071 * specified, there is no clipping region. The entire destRect is
1072 * drawn to. If one or more rectangles are included, they describe
1073 * a clipping region. The clip rectangle coordinates are measured
1074 * relative to the top-left corner of destRect.
1075 *
1076 * The srcImage must be from mip=0 face=0.
1077 *
1078 * This supports scaling if the src and dest are of different sizes.
1079 *
1080 * Availability:
1081 * SVGA_FIFO_CAP_SCREEN_OBJECT
1082 */
1083
1084typedef
1085#include "vmware_pack_begin.h"
1086struct {
1087 SVGA3dSurfaceImageId srcImage;
1088 SVGASignedRect srcRect;
1089 uint32 destScreenId; /* Screen Object ID */
1090 SVGASignedRect destRect;
1091 /* Clipping: zero or more SVGASignedRects follow */
1092}
1093#include "vmware_pack_end.h"
1094SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
1095
1096typedef
1097#include "vmware_pack_begin.h"
1098struct {
1099 uint32 sid;
1100 SVGA3dTextureFilter filter;
1101}
1102#include "vmware_pack_end.h"
1103SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
1104
1105
1106
1107typedef
1108#include "vmware_pack_begin.h"
1109struct {
1110 uint32 sid;
1111}
1112#include "vmware_pack_end.h"
1113SVGA3dCmdActivateSurface; /* SVGA_3D_CMD_ACTIVATE_SURFACE */
1114
1115typedef
1116#include "vmware_pack_begin.h"
1117struct {
1118 uint32 sid;
1119}
1120#include "vmware_pack_end.h"
1121SVGA3dCmdDeactivateSurface; /* SVGA_3D_CMD_DEACTIVATE_SURFACE */
1122
1123/*
1124 * Screen DMA command
1125 *
1126 * Available with SVGA_FIFO_CAP_SCREEN_OBJECT_2. The SVGA_CAP_3D device
1127 * cap bit is not required.
1128 *
1129 * - refBuffer and destBuffer are 32bit BGRX; refBuffer and destBuffer could
1130 * be different, but it is required that guest makes sure refBuffer has
1131 * exactly the same contents that were written to when last time screen DMA
1132 * command is received by host.
1133 *
1134 * - changemap is generated by lib/blit, and it has the changes from last
1135 * received screen DMA or more.
1136 */
1137
1138typedef
1139#include "vmware_pack_begin.h"
1140struct SVGA3dCmdScreenDMA {
1141 uint32 screenId;
1142 SVGAGuestImage refBuffer;
1143 SVGAGuestImage destBuffer;
1144 SVGAGuestImage changeMap;
1145}
1146#include "vmware_pack_end.h"
1147SVGA3dCmdScreenDMA; /* SVGA_3D_CMD_SCREEN_DMA */
1148
1149/*
1150 * Set Unity Surface Cookie
1151 *
1152 * Associates the supplied cookie with the surface id for use with
1153 * Unity. This cookie is a hint from guest to host, there is no way
1154 * for the guest to readback the cookie and the host is free to drop
1155 * the cookie association at will. The default value for the cookie
1156 * on all surfaces is 0.
1157 */
1158
1159typedef
1160#include "vmware_pack_begin.h"
1161struct SVGA3dCmdSetUnitySurfaceCookie {
1162 uint32 sid;
1163 uint64 cookie;
1164}
1165#include "vmware_pack_end.h"
1166SVGA3dCmdSetUnitySurfaceCookie; /* SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE */
1167
1168/*
1169 * Open a context-specific surface in a non-context-specific manner.
1170 */
1171
1172typedef
1173#include "vmware_pack_begin.h"
1174struct SVGA3dCmdOpenContextSurface {
1175 uint32 sid;
1176}
1177#include "vmware_pack_end.h"
1178SVGA3dCmdOpenContextSurface; /* SVGA_3D_CMD_OPEN_CONTEXT_SURFACE */
1179
1180
1181/*
1182 * Logic ops
1183 */
1184
1185#define SVGA3D_LOTRANSBLT_HONORALPHA (0x01)
1186#define SVGA3D_LOSTRETCHBLT_MIRRORX (0x01)
1187#define SVGA3D_LOSTRETCHBLT_MIRRORY (0x02)
1188#define SVGA3D_LOALPHABLEND_SRCHASALPHA (0x01)
1189
1190typedef
1191#include "vmware_pack_begin.h"
1192struct SVGA3dCmdLogicOpsBitBlt {
1193 /*
1194 * All LogicOps surfaces are one-level
1195 * surfaces so mipmap & face should always
1196 * be zero.
1197 */
1198 SVGA3dSurfaceImageId src;
1199 SVGA3dSurfaceImageId dst;
1200 SVGA3dLogicOp logicOp;
1201 /* Followed by variable number of SVGA3dCopyBox structures */
1202}
1203#include "vmware_pack_end.h"
1204SVGA3dCmdLogicOpsBitBlt; /* SVGA_3D_CMD_LOGICOPS_BITBLT */
1205
1206
1207typedef
1208#include "vmware_pack_begin.h"
1209struct SVGA3dCmdLogicOpsTransBlt {
1210 /*
1211 * All LogicOps surfaces are one-level
1212 * surfaces so mipmap & face should always
1213 * be zero.
1214 */
1215 SVGA3dSurfaceImageId src;
1216 SVGA3dSurfaceImageId dst;
1217 uint32 color;
1218 uint32 flags;
1219 SVGA3dBox srcBox;
1220 SVGA3dBox dstBox;
1221}
1222#include "vmware_pack_end.h"
1223SVGA3dCmdLogicOpsTransBlt; /* SVGA_3D_CMD_LOGICOPS_TRANSBLT */
1224
1225
1226typedef
1227#include "vmware_pack_begin.h"
1228struct SVGA3dCmdLogicOpsStretchBlt {
1229 /*
1230 * All LogicOps surfaces are one-level
1231 * surfaces so mipmap & face should always
1232 * be zero.
1233 */
1234 SVGA3dSurfaceImageId src;
1235 SVGA3dSurfaceImageId dst;
1236 uint16 mode;
1237 uint16 flags;
1238 SVGA3dBox srcBox;
1239 SVGA3dBox dstBox;
1240}
1241#include "vmware_pack_end.h"
1242SVGA3dCmdLogicOpsStretchBlt; /* SVGA_3D_CMD_LOGICOPS_STRETCHBLT */
1243
1244
1245typedef
1246#include "vmware_pack_begin.h"
1247struct SVGA3dCmdLogicOpsColorFill {
1248 /*
1249 * All LogicOps surfaces are one-level
1250 * surfaces so mipmap & face should always
1251 * be zero.
1252 */
1253 SVGA3dSurfaceImageId dst;
1254 uint32 color;
1255 SVGA3dLogicOp logicOp;
1256 /* Followed by variable number of SVGA3dRect structures. */
1257}
1258#include "vmware_pack_end.h"
1259SVGA3dCmdLogicOpsColorFill; /* SVGA_3D_CMD_LOGICOPS_COLORFILL */
1260
1261
1262typedef
1263#include "vmware_pack_begin.h"
1264struct SVGA3dCmdLogicOpsAlphaBlend {
1265 /*
1266 * All LogicOps surfaces are one-level
1267 * surfaces so mipmap & face should always
1268 * be zero.
1269 */
1270 SVGA3dSurfaceImageId src;
1271 SVGA3dSurfaceImageId dst;
1272 uint32 alphaVal;
1273 uint32 flags;
1274 SVGA3dBox srcBox;
1275 SVGA3dBox dstBox;
1276}
1277#include "vmware_pack_end.h"
1278SVGA3dCmdLogicOpsAlphaBlend; /* SVGA_3D_CMD_LOGICOPS_ALPHABLEND */
1279
1280#define SVGA3D_CLEARTYPE_INVALID_GAMMA_INDEX 0xFFFFFFFF
1281
1282#define SVGA3D_CLEARTYPE_GAMMA_WIDTH 512
1283#define SVGA3D_CLEARTYPE_GAMMA_HEIGHT 16
1284
1285typedef
1286#include "vmware_pack_begin.h"
1287struct SVGA3dCmdLogicOpsClearTypeBlend {
1288 /*
1289 * All LogicOps surfaces are one-level
1290 * surfaces so mipmap & face should always
1291 * be zero.
1292 */
1293 SVGA3dSurfaceImageId tmp;
1294 SVGA3dSurfaceImageId dst;
1295 SVGA3dSurfaceImageId gammaSurf;
1296 SVGA3dSurfaceImageId alphaSurf;
1297 uint32 gamma;
1298 uint32 color;
1299 uint32 color2;
1300 int32 alphaOffsetX;
1301 int32 alphaOffsetY;
1302 /* Followed by variable number of SVGA3dBox structures */
1303}
1304#include "vmware_pack_end.h"
1305SVGA3dCmdLogicOpsClearTypeBlend; /* SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND */
1306
1307
1308/*
1309 * Guest-backed objects definitions.
1310 */
1311
1312typedef
1313#include "vmware_pack_begin.h"
1314struct {
1315 SVGAMobFormat ptDepth;
1316 uint32 sizeInBytes;
1317 PPN64 base;
1318}
1319#include "vmware_pack_end.h"
1320SVGAOTableMobEntry;
1321#define SVGA3D_OTABLE_MOB_ENTRY_SIZE (sizeof(SVGAOTableMobEntry))
1322
1323typedef
1324#include "vmware_pack_begin.h"
1325struct {
1326 SVGA3dSurfaceFormat format;
1327 SVGA3dSurfaceFlags surfaceFlags;
1328 uint32 numMipLevels;
1329 uint32 multisampleCount;
1330 SVGA3dTextureFilter autogenFilter;
1331 SVGA3dSize size;
1332 SVGAMobId mobid;
1333 uint32 arraySize;
1334 uint32 mobPitch;
1335 uint32 pad[5];
1336}
1337#include "vmware_pack_end.h"
1338SVGAOTableSurfaceEntry;
1339#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE (sizeof(SVGAOTableSurfaceEntry))
1340
1341typedef
1342#include "vmware_pack_begin.h"
1343struct {
1344 uint32 cid;
1345 SVGAMobId mobid;
1346}
1347#include "vmware_pack_end.h"
1348SVGAOTableContextEntry;
1349#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE (sizeof(SVGAOTableContextEntry))
1350
1351typedef
1352#include "vmware_pack_begin.h"
1353struct {
1354 SVGA3dShaderType type;
1355 uint32 sizeInBytes;
1356 uint32 offsetInBytes;
1357 SVGAMobId mobid;
1358}
1359#include "vmware_pack_end.h"
1360SVGAOTableShaderEntry;
1361#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE (sizeof(SVGAOTableShaderEntry))
1362
1363#define SVGA_STFLAG_PRIMARY (1 << 0)
1364typedef uint32 SVGAScreenTargetFlags;
1365
1366typedef
1367#include "vmware_pack_begin.h"
1368struct {
1369 SVGA3dSurfaceImageId image;
1370 uint32 width;
1371 uint32 height;
1372 int32 xRoot;
1373 int32 yRoot;
1374 SVGAScreenTargetFlags flags;
1375 uint32 dpi;
1376 uint32 pad[7];
1377}
1378#include "vmware_pack_end.h"
1379SVGAOTableScreenTargetEntry;
1380#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE \
1381 (sizeof(SVGAOTableScreenTargetEntry))
1382
1383typedef
1384#include "vmware_pack_begin.h"
1385struct {
1386 float value[4];
1387}
1388#include "vmware_pack_end.h"
1389SVGA3dShaderConstFloat;
1390
1391typedef
1392#include "vmware_pack_begin.h"
1393struct {
1394 int32 value[4];
1395}
1396#include "vmware_pack_end.h"
1397SVGA3dShaderConstInt;
1398
1399typedef
1400#include "vmware_pack_begin.h"
1401struct {
1402 uint32 value;
1403}
1404#include "vmware_pack_end.h"
1405SVGA3dShaderConstBool;
1406
1407typedef
1408#include "vmware_pack_begin.h"
1409struct {
1410 uint16 streamOffset;
1411 uint8 stream;
1412 uint8 type;
1413 uint8 methodUsage;
1414 uint8 usageIndex;
1415}
1416#include "vmware_pack_end.h"
1417SVGAGBVertexElement;
1418
1419typedef
1420#include "vmware_pack_begin.h"
1421struct {
1422 uint32 sid;
1423 uint16 stride;
1424 uint32 offset;
1425}
1426#include "vmware_pack_end.h"
1427SVGAGBVertexStream;
1428typedef
1429#include "vmware_pack_begin.h"
1430struct {
1431 SVGA3dRect viewport;
1432 SVGA3dRect scissorRect;
1433 SVGA3dZRange zRange;
1434
1435 SVGA3dSurfaceImageId renderTargets[SVGA3D_RT_MAX];
1436 SVGAGBVertexElement decl1[4];
1437
1438 uint32 renderStates[SVGA3D_RS_MAX];
1439 SVGAGBVertexElement decl2[18];
1440 uint32 pad0[2];
1441
1442 struct {
1443 SVGA3dFace face;
1444 SVGA3dMaterial material;
1445 } material;
1446
1447 float clipPlanes[SVGA3D_NUM_CLIPPLANES][4];
1448 float matrices[SVGA3D_TRANSFORM_MAX][16];
1449
1450 SVGA3dBool lightEnabled[SVGA3D_NUM_LIGHTS];
1451 SVGA3dLightData lightData[SVGA3D_NUM_LIGHTS];
1452
1453 /*
1454 * Shaders currently bound
1455 */
1456 uint32 shaders[SVGA3D_NUM_SHADERTYPE_PREDX];
1457 SVGAGBVertexElement decl3[10];
1458 uint32 pad1[3];
1459
1460 uint32 occQueryActive;
1461 uint32 occQueryValue;
1462
1463 /*
1464 * Int/Bool Shader constants
1465 */
1466 SVGA3dShaderConstInt pShaderIValues[SVGA3D_CONSTINTREG_MAX];
1467 SVGA3dShaderConstInt vShaderIValues[SVGA3D_CONSTINTREG_MAX];
1468 uint16 pShaderBValues;
1469 uint16 vShaderBValues;
1470
1471
1472 SVGAGBVertexStream streams[SVGA3D_MAX_VERTEX_ARRAYS];
1473 SVGA3dVertexDivisor divisors[SVGA3D_MAX_VERTEX_ARRAYS];
1474 uint32 numVertexDecls;
1475 uint32 numVertexStreams;
1476 uint32 numVertexDivisors;
1477 uint32 pad2[30];
1478
1479 /*
1480 * Texture Stages
1481 *
1482 * SVGA3D_TS_INVALID through SVGA3D_TS_CONSTANT are in the
1483 * textureStages array.
1484 * SVGA3D_TS_COLOR_KEY is in tsColorKey.
1485 */
1486 uint32 tsColorKey[SVGA3D_NUM_TEXTURE_UNITS];
1487 uint32 textureStages[SVGA3D_NUM_TEXTURE_UNITS][SVGA3D_TS_CONSTANT + 1];
1488 uint32 tsColorKeyEnable[SVGA3D_NUM_TEXTURE_UNITS];
1489
1490 /*
1491 * Float Shader constants.
1492 */
1493 SVGA3dShaderConstFloat pShaderFValues[SVGA3D_CONSTREG_MAX];
1494 SVGA3dShaderConstFloat vShaderFValues[SVGA3D_CONSTREG_MAX];
1495}
1496#include "vmware_pack_end.h"
1497SVGAGBContextData;
1498#define SVGA3D_CONTEXT_DATA_SIZE (sizeof(SVGAGBContextData))
1499
1500/*
1501 * SVGA3dCmdSetOTableBase --
1502 *
1503 * This command allows the guest to specify the base PPN of the
1504 * specified object table.
1505 */
1506
1507typedef
1508#include "vmware_pack_begin.h"
1509struct {
1510 SVGAOTableType type;
1511 PPN baseAddress;
1512 uint32 sizeInBytes;
1513 uint32 validSizeInBytes;
1514 SVGAMobFormat ptDepth;
1515}
1516#include "vmware_pack_end.h"
1517SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
1518
1519typedef
1520#include "vmware_pack_begin.h"
1521struct {
1522 SVGAOTableType type;
1523 PPN64 baseAddress;
1524 uint32 sizeInBytes;
1525 uint32 validSizeInBytes;
1526 SVGAMobFormat ptDepth;
1527}
1528#include "vmware_pack_end.h"
1529SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
1530
1531typedef
1532#include "vmware_pack_begin.h"
1533struct {
1534 SVGAOTableType type;
1535}
1536#include "vmware_pack_end.h"
1537SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
1538
1539/*
1540 * Define a memory object (Mob) in the OTable.
1541 */
1542
1543typedef
1544#include "vmware_pack_begin.h"
1545struct SVGA3dCmdDefineGBMob {
1546 SVGAMobId mobid;
1547 SVGAMobFormat ptDepth;
1548 PPN base;
1549 uint32 sizeInBytes;
1550}
1551#include "vmware_pack_end.h"
1552SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
1553
1554
1555/*
1556 * Destroys an object in the OTable.
1557 */
1558
1559typedef
1560#include "vmware_pack_begin.h"
1561struct SVGA3dCmdDestroyGBMob {
1562 SVGAMobId mobid;
1563}
1564#include "vmware_pack_end.h"
1565SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
1566
1567
1568/*
1569 * Define a memory object (Mob) in the OTable with a PPN64 base.
1570 */
1571
1572typedef
1573#include "vmware_pack_begin.h"
1574struct SVGA3dCmdDefineGBMob64 {
1575 SVGAMobId mobid;
1576 SVGAMobFormat ptDepth;
1577 PPN64 base;
1578 uint32 sizeInBytes;
1579}
1580#include "vmware_pack_end.h"
1581SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
1582
1583/*
1584 * Redefine an object in the OTable with PPN64 base.
1585 */
1586
1587typedef
1588#include "vmware_pack_begin.h"
1589struct SVGA3dCmdRedefineGBMob64 {
1590 SVGAMobId mobid;
1591 SVGAMobFormat ptDepth;
1592 PPN64 base;
1593 uint32 sizeInBytes;
1594}
1595#include "vmware_pack_end.h"
1596SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
1597
1598/*
1599 * Notification that the page tables have been modified.
1600 */
1601
1602typedef
1603#include "vmware_pack_begin.h"
1604struct SVGA3dCmdUpdateGBMobMapping {
1605 SVGAMobId mobid;
1606}
1607#include "vmware_pack_end.h"
1608SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
1609
1610/*
1611 * Define a guest-backed surface.
1612 */
1613
1614typedef
1615#include "vmware_pack_begin.h"
1616struct SVGA3dCmdDefineGBSurface {
1617 uint32 sid;
1618 SVGA3dSurfaceFlags surfaceFlags;
1619 SVGA3dSurfaceFormat format;
1620 uint32 numMipLevels;
1621 uint32 multisampleCount;
1622 SVGA3dTextureFilter autogenFilter;
1623 SVGA3dSize size;
1624}
1625#include "vmware_pack_end.h"
1626SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
1627
1628/*
1629 * Destroy a guest-backed surface.
1630 */
1631
1632typedef
1633#include "vmware_pack_begin.h"
1634struct SVGA3dCmdDestroyGBSurface {
1635 uint32 sid;
1636}
1637#include "vmware_pack_end.h"
1638SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
1639
1640/*
1641 * Bind a guest-backed surface to a mob.
1642 */
1643
1644typedef
1645#include "vmware_pack_begin.h"
1646struct SVGA3dCmdBindGBSurface {
1647 uint32 sid;
1648 SVGAMobId mobid;
1649}
1650#include "vmware_pack_end.h"
1651SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
1652
1653typedef
1654#include "vmware_pack_begin.h"
1655struct SVGA3dCmdBindGBSurfaceWithPitch {
1656 uint32 sid;
1657 SVGAMobId mobid;
1658 uint32 baseLevelPitch;
1659}
1660#include "vmware_pack_end.h"
1661SVGA3dCmdBindGBSurfaceWithPitch; /* SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH */
1662
1663/*
1664 * Conditionally bind a mob to a guest-backed surface if testMobid
1665 * matches the currently bound mob. Optionally issue a
1666 * readback/update on the surface while it is still bound to the old
1667 * mobid if the mobid is changed by this command.
1668 */
1669
1670#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
1671#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_UPDATE (1 << 1)
1672
1673typedef
1674#include "vmware_pack_begin.h"
1675struct{
1676 uint32 sid;
1677 SVGAMobId testMobid;
1678 SVGAMobId mobid;
1679 uint32 flags;
1680}
1681#include "vmware_pack_end.h"
1682SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
1683
1684/*
1685 * Update an image in a guest-backed surface.
1686 * (Inform the device that the guest-contents have been updated.)
1687 */
1688
1689typedef
1690#include "vmware_pack_begin.h"
1691struct SVGA3dCmdUpdateGBImage {
1692 SVGA3dSurfaceImageId image;
1693 SVGA3dBox box;
1694}
1695#include "vmware_pack_end.h"
1696SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
1697
1698/*
1699 * Update an entire guest-backed surface.
1700 * (Inform the device that the guest-contents have been updated.)
1701 */
1702
1703typedef
1704#include "vmware_pack_begin.h"
1705struct SVGA3dCmdUpdateGBSurface {
1706 uint32 sid;
1707}
1708#include "vmware_pack_end.h"
1709SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
1710
1711/*
1712 * Readback an image in a guest-backed surface.
1713 * (Request the device to flush the dirty contents into the guest.)
1714 */
1715
1716typedef
1717#include "vmware_pack_begin.h"
1718struct SVGA3dCmdReadbackGBImage {
1719 SVGA3dSurfaceImageId image;
1720}
1721#include "vmware_pack_end.h"
1722SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE */
1723
1724/*
1725 * Readback an entire guest-backed surface.
1726 * (Request the device to flush the dirty contents into the guest.)
1727 */
1728
1729typedef
1730#include "vmware_pack_begin.h"
1731struct SVGA3dCmdReadbackGBSurface {
1732 uint32 sid;
1733}
1734#include "vmware_pack_end.h"
1735SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
1736
1737/*
1738 * Readback a sub rect of an image in a guest-backed surface. After
1739 * issuing this command the driver is required to issue an update call
1740 * of the same region before issuing any other commands that reference
1741 * this surface or rendering is not guaranteed.
1742 */
1743
1744typedef
1745#include "vmware_pack_begin.h"
1746struct SVGA3dCmdReadbackGBImagePartial {
1747 SVGA3dSurfaceImageId image;
1748 SVGA3dBox box;
1749 uint32 invertBox;
1750}
1751#include "vmware_pack_end.h"
1752SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
1753
1754
1755/*
1756 * Invalidate an image in a guest-backed surface.
1757 * (Notify the device that the contents can be lost.)
1758 */
1759
1760typedef
1761#include "vmware_pack_begin.h"
1762struct SVGA3dCmdInvalidateGBImage {
1763 SVGA3dSurfaceImageId image;
1764}
1765#include "vmware_pack_end.h"
1766SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
1767
1768/*
1769 * Invalidate an entire guest-backed surface.
1770 * (Notify the device that the contents if all images can be lost.)
1771 */
1772
1773typedef
1774#include "vmware_pack_begin.h"
1775struct SVGA3dCmdInvalidateGBSurface {
1776 uint32 sid;
1777}
1778#include "vmware_pack_end.h"
1779SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
1780
1781/*
1782 * Invalidate a sub rect of an image in a guest-backed surface. After
1783 * issuing this command the driver is required to issue an update call
1784 * of the same region before issuing any other commands that reference
1785 * this surface or rendering is not guaranteed.
1786 */
1787
1788typedef
1789#include "vmware_pack_begin.h"
1790struct SVGA3dCmdInvalidateGBImagePartial {
1791 SVGA3dSurfaceImageId image;
1792 SVGA3dBox box;
1793 uint32 invertBox;
1794}
1795#include "vmware_pack_end.h"
1796SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
1797
1798
1799/*
1800 * Define a guest-backed context.
1801 */
1802
1803typedef
1804#include "vmware_pack_begin.h"
1805struct SVGA3dCmdDefineGBContext {
1806 uint32 cid;
1807}
1808#include "vmware_pack_end.h"
1809SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
1810
1811/*
1812 * Destroy a guest-backed context.
1813 */
1814
1815typedef
1816#include "vmware_pack_begin.h"
1817struct SVGA3dCmdDestroyGBContext {
1818 uint32 cid;
1819}
1820#include "vmware_pack_end.h"
1821SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
1822
1823/*
1824 * Bind a guest-backed context.
1825 *
1826 * validContents should be set to 0 for new contexts,
1827 * and 1 if this is an old context which is getting paged
1828 * back on to the device.
1829 *
1830 * For new contexts, it is recommended that the driver
1831 * issue commands to initialize all interesting state
1832 * prior to rendering.
1833 */
1834
1835typedef
1836#include "vmware_pack_begin.h"
1837struct SVGA3dCmdBindGBContext {
1838 uint32 cid;
1839 SVGAMobId mobid;
1840 uint32 validContents;
1841}
1842#include "vmware_pack_end.h"
1843SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
1844
1845/*
1846 * Readback a guest-backed context.
1847 * (Request that the device flush the contents back into guest memory.)
1848 */
1849
1850typedef
1851#include "vmware_pack_begin.h"
1852struct SVGA3dCmdReadbackGBContext {
1853 uint32 cid;
1854}
1855#include "vmware_pack_end.h"
1856SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
1857
1858/*
1859 * Invalidate a guest-backed context.
1860 */
1861typedef
1862#include "vmware_pack_begin.h"
1863struct SVGA3dCmdInvalidateGBContext {
1864 uint32 cid;
1865}
1866#include "vmware_pack_end.h"
1867SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
1868
1869/*
1870 * Define a guest-backed shader.
1871 */
1872
1873typedef
1874#include "vmware_pack_begin.h"
1875struct SVGA3dCmdDefineGBShader {
1876 uint32 shid;
1877 SVGA3dShaderType type;
1878 uint32 sizeInBytes;
1879}
1880#include "vmware_pack_end.h"
1881SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
1882
1883/*
1884 * Bind a guest-backed shader.
1885 */
1886
1887typedef
1888#include "vmware_pack_begin.h"
1889struct SVGA3dCmdBindGBShader {
1890 uint32 shid;
1891 SVGAMobId mobid;
1892 uint32 offsetInBytes;
1893}
1894#include "vmware_pack_end.h"
1895SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
1896
1897/*
1898 * Destroy a guest-backed shader.
1899 */
1900
1901typedef
1902#include "vmware_pack_begin.h"
1903struct SVGA3dCmdDestroyGBShader {
1904 uint32 shid;
1905}
1906#include "vmware_pack_end.h"
1907SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
1908
1909typedef
1910#include "vmware_pack_begin.h"
1911struct {
1912 uint32 cid;
1913 uint32 regStart;
1914 SVGA3dShaderType shaderType;
1915 SVGA3dShaderConstType constType;
1916
1917 /*
1918 * Followed by a variable number of shader constants.
1919 *
1920 * Note that FLOAT and INT constants are 4-dwords in length, while
1921 * BOOL constants are 1-dword in length.
1922 */
1923}
1924#include "vmware_pack_end.h"
1925SVGA3dCmdSetGBShaderConstInline; /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
1926
1927
1928typedef
1929#include "vmware_pack_begin.h"
1930struct {
1931 uint32 cid;
1932 SVGA3dQueryType type;
1933}
1934#include "vmware_pack_end.h"
1935SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
1936
1937typedef
1938#include "vmware_pack_begin.h"
1939struct {
1940 uint32 cid;
1941 SVGA3dQueryType type;
1942 SVGAMobId mobid;
1943 uint32 offset;
1944}
1945#include "vmware_pack_end.h"
1946SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
1947
1948
1949/*
1950 * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
1951 *
1952 * The semantics of this command are identical to the
1953 * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
1954 * to a Mob instead of a GMR.
1955 */
1956
1957typedef
1958#include "vmware_pack_begin.h"
1959struct {
1960 uint32 cid;
1961 SVGA3dQueryType type;
1962 SVGAMobId mobid;
1963 uint32 offset;
1964}
1965#include "vmware_pack_end.h"
1966SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
1967
1968
1969typedef
1970#include "vmware_pack_begin.h"
1971struct {
1972 SVGAMobId mobid;
1973 uint32 mustBeZero;
1974 uint32 initialized;
1975}
1976#include "vmware_pack_end.h"
1977SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
1978
1979typedef
1980#include "vmware_pack_begin.h"
1981struct {
1982 SVGAMobId mobid;
1983 uint32 gartOffset;
1984}
1985#include "vmware_pack_end.h"
1986SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
1987
1988
1989typedef
1990#include "vmware_pack_begin.h"
1991struct {
1992 uint32 gartOffset;
1993 uint32 numPages;
1994}
1995#include "vmware_pack_end.h"
1996SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
1997
1998
1999/*
2000 * Screen Targets
2001 */
2002
2003typedef
2004#include "vmware_pack_begin.h"
2005struct {
2006 uint32 stid;
2007 uint32 width;
2008 uint32 height;
2009 int32 xRoot;
2010 int32 yRoot;
2011 SVGAScreenTargetFlags flags;
2012
2013 /*
2014 * The physical DPI that the guest expects this screen displayed at.
2015 *
2016 * Guests which are not DPI-aware should set this to zero.
2017 */
2018 uint32 dpi;
2019}
2020#include "vmware_pack_end.h"
2021SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
2022
2023typedef
2024#include "vmware_pack_begin.h"
2025struct {
2026 uint32 stid;
2027}
2028#include "vmware_pack_end.h"
2029SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
2030
2031typedef
2032#include "vmware_pack_begin.h"
2033struct {
2034 uint32 stid;
2035 SVGA3dSurfaceImageId image;
2036}
2037#include "vmware_pack_end.h"
2038SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
2039
2040typedef
2041#include "vmware_pack_begin.h"
2042struct {
2043 uint32 stid;
2044 SVGA3dRect rect;
2045}
2046#include "vmware_pack_end.h"
2047SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
2048
2049typedef
2050#include "vmware_pack_begin.h"
2051struct SVGA3dCmdGBScreenDMA {
2052 uint32 screenId;
2053 uint32 dead;
2054 SVGAMobId destMobID;
2055 uint32 destPitch;
2056 SVGAMobId changeMapMobID;
2057}
2058#include "vmware_pack_end.h"
2059SVGA3dCmdGBScreenDMA; /* SVGA_3D_CMD_GB_SCREEN_DMA */
2060
2061typedef
2062#include "vmware_pack_begin.h"
2063struct {
2064 uint32 value;
2065 uint32 mobId;
2066 uint32 mobOffset;
2067}
2068#include "vmware_pack_end.h"
2069SVGA3dCmdGBMobFence; /* SVGA_3D_CMD_GB_MOB_FENCE*/
2070
2071#endif /* _SVGA3D_CMD_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
new file mode 100644
index 000000000000..c18b663f360f
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -0,0 +1,457 @@
1/**********************************************************
2 * Copyright 1998-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_devcaps.h --
28 *
29 * SVGA 3d caps definitions
30 */
31
32#ifndef _SVGA3D_DEVCAPS_H_
33#define _SVGA3D_DEVCAPS_H_
34
35#define INCLUDE_ALLOW_MODULE
36#define INCLUDE_ALLOW_USERLEVEL
37#define INCLUDE_ALLOW_VMCORE
38
39#include "includeCheck.h"
40
41/*
42 * 3D Hardware Version
43 *
44 * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
45 * register. Is set by the host and read by the guest. This lets
46 * us make new guest drivers which are backwards-compatible with old
47 * SVGA hardware revisions. It does not let us support old guest
48 * drivers. Good enough for now.
49 *
50 */
51
52#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
53#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
54#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
55
56typedef enum {
57 SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
58 SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
59 SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
60 SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
61 SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
62 SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
63 SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1),
64 SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1,
65} SVGA3dHardwareVersion;
66
67/*
68 * DevCap indexes.
69 */
70
71typedef enum {
72 SVGA3D_DEVCAP_INVALID = ((uint32)-1),
73 SVGA3D_DEVCAP_3D = 0,
74 SVGA3D_DEVCAP_MAX_LIGHTS = 1,
75
76 /*
77 * SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
78 * fixed-function texture units available. Each of these units
79 * work in both FFP and Shader modes, and they support texture
80 * transforms and texture coordinates. The host may have additional
81 * texture image units that are only usable with shaders.
82 */
83 SVGA3D_DEVCAP_MAX_TEXTURES = 2,
84 SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
85 SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
86 SVGA3D_DEVCAP_VERTEX_SHADER = 5,
87 SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
88 SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
89 SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
90 SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
91 SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
92 SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
93 SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12,
94 SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13,
95 SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14,
96 SVGA3D_DEVCAP_QUERY_TYPES = 15,
97 SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
98 SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
99 SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
100 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
101 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
102 SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
103 SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
104 SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
105 SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
106 SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
107 SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
108 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
109 SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
110 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
111 SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
112 SVGA3D_DEVCAP_TEXTURE_OPS = 31,
113 SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
114 SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
115 SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
116 SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
117 SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
118 SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
119 SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
120 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
121 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
122 SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
123 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
124 SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
125 SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
126 SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
127 SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
128 SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
129 SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
130 SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
131 SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
132 SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
133 SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
134 SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
135 SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
136 SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
137 SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
138 SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
139 SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
140 SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
141 SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
142 SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
143
144 /*
145 * There is a hole in our devcap definitions for
146 * historical reasons.
147 *
148 * Define a constant just for completeness.
149 */
150 SVGA3D_DEVCAP_MISSING62 = 62,
151
152 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
153
154 /*
155 * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
156 * render targets. This does not include the depth or stencil targets.
157 */
158 SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
159
160 SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
161 SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
162 SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
163 SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
164 SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
165 SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70,
166 SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71,
167 SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72,
168 SVGA3D_DEVCAP_SUPERSAMPLE = 73,
169 SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74,
170 SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75,
171 SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76,
172
173 /*
174 * This is the maximum number of SVGA context IDs that the guest
175 * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
176 */
177 SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77,
178
179 /*
180 * This is the maximum number of SVGA surface IDs that the guest
181 * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
182 */
183 SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78,
184
185 SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79,
186 SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80,
187 SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81,
188
189 SVGA3D_DEVCAP_SURFACEFMT_ATI1 = 82,
190 SVGA3D_DEVCAP_SURFACEFMT_ATI2 = 83,
191
192 /*
193 * Deprecated.
194 */
195 SVGA3D_DEVCAP_DEAD1 = 84,
196
197 /*
198 * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
199 * ored together, one for every type of video decoding supported.
200 */
201 SVGA3D_DEVCAP_VIDEO_DECODE = 85,
202
203 /*
204 * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
205 * ored together, one for every type of video processing supported.
206 */
207 SVGA3D_DEVCAP_VIDEO_PROCESS = 86,
208
209 SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */
210 SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */
211 SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */
212 SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */
213
214 SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91,
215
216 /*
217 * Does the host support the SVGA logic ops commands?
218 */
219 SVGA3D_DEVCAP_LOGICOPS = 92,
220
221 /*
222 * Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported?
223 */
224 SVGA3D_DEVCAP_TS_COLOR_KEY = 93, /* boolean */
225
226 /*
227 * Deprecated.
228 */
229 SVGA3D_DEVCAP_DEAD2 = 94,
230
231 /*
232 * Does the device support the DX commands?
233 */
234 SVGA3D_DEVCAP_DX = 95,
235
236 /*
237 * What is the maximum size of a texture array?
238 *
239 * (Even if this cap is zero, cubemaps are still allowed.)
240 */
241 SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE = 96,
242
243 /*
244 * What is the maximum number of vertex buffers that can
245 * be used in the DXContext inputAssembly?
246 */
247 SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS = 97,
248
249 /*
250 * What is the maximum number of constant buffers
251 * that can be expected to work correctly with a
252 * DX context?
253 */
254 SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS = 98,
255
256 /*
257 * Does the device support provoking vertex control?
258 * If zero, the first vertex will always be the provoking vertex.
259 */
260 SVGA3D_DEVCAP_DX_PROVOKING_VERTEX = 99,
261
262 SVGA3D_DEVCAP_DXFMT_X8R8G8B8 = 100,
263 SVGA3D_DEVCAP_DXFMT_A8R8G8B8 = 101,
264 SVGA3D_DEVCAP_DXFMT_R5G6B5 = 102,
265 SVGA3D_DEVCAP_DXFMT_X1R5G5B5 = 103,
266 SVGA3D_DEVCAP_DXFMT_A1R5G5B5 = 104,
267 SVGA3D_DEVCAP_DXFMT_A4R4G4B4 = 105,
268 SVGA3D_DEVCAP_DXFMT_Z_D32 = 106,
269 SVGA3D_DEVCAP_DXFMT_Z_D16 = 107,
270 SVGA3D_DEVCAP_DXFMT_Z_D24S8 = 108,
271 SVGA3D_DEVCAP_DXFMT_Z_D15S1 = 109,
272 SVGA3D_DEVCAP_DXFMT_LUMINANCE8 = 110,
273 SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4 = 111,
274 SVGA3D_DEVCAP_DXFMT_LUMINANCE16 = 112,
275 SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8 = 113,
276 SVGA3D_DEVCAP_DXFMT_DXT1 = 114,
277 SVGA3D_DEVCAP_DXFMT_DXT2 = 115,
278 SVGA3D_DEVCAP_DXFMT_DXT3 = 116,
279 SVGA3D_DEVCAP_DXFMT_DXT4 = 117,
280 SVGA3D_DEVCAP_DXFMT_DXT5 = 118,
281 SVGA3D_DEVCAP_DXFMT_BUMPU8V8 = 119,
282 SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 = 120,
283 SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 = 121,
284 SVGA3D_DEVCAP_DXFMT_BUMPL8V8U8 = 122,
285 SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 = 123,
286 SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 = 124,
287 SVGA3D_DEVCAP_DXFMT_A2R10G10B10 = 125,
288 SVGA3D_DEVCAP_DXFMT_V8U8 = 126,
289 SVGA3D_DEVCAP_DXFMT_Q8W8V8U8 = 127,
290 SVGA3D_DEVCAP_DXFMT_CxV8U8 = 128,
291 SVGA3D_DEVCAP_DXFMT_X8L8V8U8 = 129,
292 SVGA3D_DEVCAP_DXFMT_A2W10V10U10 = 130,
293 SVGA3D_DEVCAP_DXFMT_ALPHA8 = 131,
294 SVGA3D_DEVCAP_DXFMT_R_S10E5 = 132,
295 SVGA3D_DEVCAP_DXFMT_R_S23E8 = 133,
296 SVGA3D_DEVCAP_DXFMT_RG_S10E5 = 134,
297 SVGA3D_DEVCAP_DXFMT_RG_S23E8 = 135,
298 SVGA3D_DEVCAP_DXFMT_BUFFER = 136,
299 SVGA3D_DEVCAP_DXFMT_Z_D24X8 = 137,
300 SVGA3D_DEVCAP_DXFMT_V16U16 = 138,
301 SVGA3D_DEVCAP_DXFMT_G16R16 = 139,
302 SVGA3D_DEVCAP_DXFMT_A16B16G16R16 = 140,
303 SVGA3D_DEVCAP_DXFMT_UYVY = 141,
304 SVGA3D_DEVCAP_DXFMT_YUY2 = 142,
305 SVGA3D_DEVCAP_DXFMT_NV12 = 143,
306 SVGA3D_DEVCAP_DXFMT_AYUV = 144,
307 SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS = 145,
308 SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT = 146,
309 SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT = 147,
310 SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS = 148,
311 SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT = 149,
312 SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT = 150,
313 SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT = 151,
314 SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS = 152,
315 SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT = 153,
316 SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM = 154,
317 SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT = 155,
318 SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS = 156,
319 SVGA3D_DEVCAP_DXFMT_R32G32_UINT = 157,
320 SVGA3D_DEVCAP_DXFMT_R32G32_SINT = 158,
321 SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS = 159,
322 SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT = 160,
323 SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24_TYPELESS = 161,
324 SVGA3D_DEVCAP_DXFMT_X32_TYPELESS_G8X24_UINT = 162,
325 SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS = 163,
326 SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT = 164,
327 SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT = 165,
328 SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS = 166,
329 SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM = 167,
330 SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB = 168,
331 SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT = 169,
332 SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT = 170,
333 SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS = 171,
334 SVGA3D_DEVCAP_DXFMT_R16G16_UINT = 172,
335 SVGA3D_DEVCAP_DXFMT_R16G16_SINT = 173,
336 SVGA3D_DEVCAP_DXFMT_R32_TYPELESS = 174,
337 SVGA3D_DEVCAP_DXFMT_D32_FLOAT = 175,
338 SVGA3D_DEVCAP_DXFMT_R32_UINT = 176,
339 SVGA3D_DEVCAP_DXFMT_R32_SINT = 177,
340 SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS = 178,
341 SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT = 179,
342 SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8_TYPELESS = 180,
343 SVGA3D_DEVCAP_DXFMT_X24_TYPELESS_G8_UINT = 181,
344 SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS = 182,
345 SVGA3D_DEVCAP_DXFMT_R8G8_UNORM = 183,
346 SVGA3D_DEVCAP_DXFMT_R8G8_UINT = 184,
347 SVGA3D_DEVCAP_DXFMT_R8G8_SINT = 185,
348 SVGA3D_DEVCAP_DXFMT_R16_TYPELESS = 186,
349 SVGA3D_DEVCAP_DXFMT_R16_UNORM = 187,
350 SVGA3D_DEVCAP_DXFMT_R16_UINT = 188,
351 SVGA3D_DEVCAP_DXFMT_R16_SNORM = 189,
352 SVGA3D_DEVCAP_DXFMT_R16_SINT = 190,
353 SVGA3D_DEVCAP_DXFMT_R8_TYPELESS = 191,
354 SVGA3D_DEVCAP_DXFMT_R8_UNORM = 192,
355 SVGA3D_DEVCAP_DXFMT_R8_UINT = 193,
356 SVGA3D_DEVCAP_DXFMT_R8_SNORM = 194,
357 SVGA3D_DEVCAP_DXFMT_R8_SINT = 195,
358 SVGA3D_DEVCAP_DXFMT_P8 = 196,
359 SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP = 197,
360 SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM = 198,
361 SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM = 199,
362 SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS = 200,
363 SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB = 201,
364 SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS = 202,
365 SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB = 203,
366 SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS = 204,
367 SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB = 205,
368 SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS = 206,
369 SVGA3D_DEVCAP_DXFMT_ATI1 = 207,
370 SVGA3D_DEVCAP_DXFMT_BC4_SNORM = 208,
371 SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS = 209,
372 SVGA3D_DEVCAP_DXFMT_ATI2 = 210,
373 SVGA3D_DEVCAP_DXFMT_BC5_SNORM = 211,
374 SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM = 212,
375 SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS = 213,
376 SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB = 214,
377 SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS = 215,
378 SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB = 216,
379 SVGA3D_DEVCAP_DXFMT_Z_DF16 = 217,
380 SVGA3D_DEVCAP_DXFMT_Z_DF24 = 218,
381 SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT = 219,
382 SVGA3D_DEVCAP_DXFMT_YV12 = 220,
383 SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT = 221,
384 SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT = 222,
385 SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM = 223,
386 SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT = 224,
387 SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM = 225,
388 SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM = 226,
389 SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT = 227,
390 SVGA3D_DEVCAP_DXFMT_R16G16_UNORM = 228,
391 SVGA3D_DEVCAP_DXFMT_R16G16_SNORM = 229,
392 SVGA3D_DEVCAP_DXFMT_R32_FLOAT = 230,
393 SVGA3D_DEVCAP_DXFMT_R8G8_SNORM = 231,
394 SVGA3D_DEVCAP_DXFMT_R16_FLOAT = 232,
395 SVGA3D_DEVCAP_DXFMT_D16_UNORM = 233,
396 SVGA3D_DEVCAP_DXFMT_A8_UNORM = 234,
397 SVGA3D_DEVCAP_DXFMT_BC1_UNORM = 235,
398 SVGA3D_DEVCAP_DXFMT_BC2_UNORM = 236,
399 SVGA3D_DEVCAP_DXFMT_BC3_UNORM = 237,
400 SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM = 238,
401 SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM = 239,
402 SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM = 240,
403 SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM = 241,
404 SVGA3D_DEVCAP_DXFMT_BC4_UNORM = 242,
405 SVGA3D_DEVCAP_DXFMT_BC5_UNORM = 243,
406
407 SVGA3D_DEVCAP_MAX /* This must be the last index. */
408} SVGA3dDevCapIndex;
409
410/*
411 * Bit definitions for DXFMT devcaps
412 *
413 *
414 * SUPPORTED: Can the format be defined?
415 * SHADER_SAMPLE: Can the format be sampled from a shader?
416 * COLOR_RENDERTARGET: Can the format be a color render target?
417 * DEPTH_RENDERTARGET: Can the format be a depth render target?
418 * BLENDABLE: Is the format blendable?
419 * MIPS: Does the format support mip levels?
420 * ARRAY: Does the format support texture arrays?
421 * VOLUME: Does the format support having volume?
422 * MULTISAMPLE_2: Does the format support 2x multisample?
423 * MULTISAMPLE_4: Does the format support 4x multisample?
424 * MULTISAMPLE_8: Does the format support 8x multisample?
425 */
426#define SVGA3D_DXFMT_SUPPORTED (1 << 0)
427#define SVGA3D_DXFMT_SHADER_SAMPLE (1 << 1)
428#define SVGA3D_DXFMT_COLOR_RENDERTARGET (1 << 2)
429#define SVGA3D_DXFMT_DEPTH_RENDERTARGET (1 << 3)
430#define SVGA3D_DXFMT_BLENDABLE (1 << 4)
431#define SVGA3D_DXFMT_MIPS (1 << 5)
432#define SVGA3D_DXFMT_ARRAY (1 << 6)
433#define SVGA3D_DXFMT_VOLUME (1 << 7)
434#define SVGA3D_DXFMT_DX_VERTEX_BUFFER (1 << 8)
435#define SVGADX_DXFMT_MULTISAMPLE_2 (1 << 9)
436#define SVGADX_DXFMT_MULTISAMPLE_4 (1 << 10)
437#define SVGADX_DXFMT_MULTISAMPLE_8 (1 << 11)
438#define SVGADX_DXFMT_MAX (1 << 12)
439
440/*
441 * Convenience mask for any multisample capability.
442 *
443 * The multisample bits imply both load and render capability.
444 */
445#define SVGA3D_DXFMT_MULTISAMPLE ( \
446 SVGADX_DXFMT_MULTISAMPLE_2 | \
447 SVGADX_DXFMT_MULTISAMPLE_4 | \
448 SVGADX_DXFMT_MULTISAMPLE_8 )
449
450typedef union {
451 Bool b;
452 uint32 u;
453 int32 i;
454 float f;
455} SVGA3dDevCapResult;
456
457#endif /* _SVGA3D_DEVCAPS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
new file mode 100644
index 000000000000..8c5ae608cfb4
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -0,0 +1,1487 @@
1/**********************************************************
2 * Copyright 2012-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_dx.h --
28 *
29 * SVGA 3d hardware definitions for DX10 support.
30 */
31
32#ifndef _SVGA3D_DX_H_
33#define _SVGA3D_DX_H_
34
35#define INCLUDE_ALLOW_MODULE
36#define INCLUDE_ALLOW_USERLEVEL
37#define INCLUDE_ALLOW_VMCORE
38#include "includeCheck.h"
39
40#include "svga3d_limits.h"
41
42#define SVGA3D_INPUT_MIN 0
43#define SVGA3D_INPUT_PER_VERTEX_DATA 0
44#define SVGA3D_INPUT_PER_INSTANCE_DATA 1
45#define SVGA3D_INPUT_MAX 2
46typedef uint32 SVGA3dInputClassification;
47
48#define SVGA3D_RESOURCE_TYPE_MIN 1
49#define SVGA3D_RESOURCE_BUFFER 1
50#define SVGA3D_RESOURCE_TEXTURE1D 2
51#define SVGA3D_RESOURCE_TEXTURE2D 3
52#define SVGA3D_RESOURCE_TEXTURE3D 4
53#define SVGA3D_RESOURCE_TEXTURECUBE 5
54#define SVGA3D_RESOURCE_TYPE_DX10_MAX 6
55#define SVGA3D_RESOURCE_BUFFEREX 6
56#define SVGA3D_RESOURCE_TYPE_MAX 7
57typedef uint32 SVGA3dResourceType;
58
59#define SVGA3D_DEPTH_WRITE_MASK_ZERO 0
60#define SVGA3D_DEPTH_WRITE_MASK_ALL 1
61typedef uint8 SVGA3dDepthWriteMask;
62
63#define SVGA3D_FILTER_MIP_LINEAR (1 << 0)
64#define SVGA3D_FILTER_MAG_LINEAR (1 << 2)
65#define SVGA3D_FILTER_MIN_LINEAR (1 << 4)
66#define SVGA3D_FILTER_ANISOTROPIC (1 << 6)
67#define SVGA3D_FILTER_COMPARE (1 << 7)
68typedef uint32 SVGA3dFilter;
69
70#define SVGA3D_CULL_INVALID 0
71#define SVGA3D_CULL_MIN 1
72#define SVGA3D_CULL_NONE 1
73#define SVGA3D_CULL_FRONT 2
74#define SVGA3D_CULL_BACK 3
75#define SVGA3D_CULL_MAX 4
76typedef uint8 SVGA3dCullMode;
77
78#define SVGA3D_COMPARISON_INVALID 0
79#define SVGA3D_COMPARISON_MIN 1
80#define SVGA3D_COMPARISON_NEVER 1
81#define SVGA3D_COMPARISON_LESS 2
82#define SVGA3D_COMPARISON_EQUAL 3
83#define SVGA3D_COMPARISON_LESS_EQUAL 4
84#define SVGA3D_COMPARISON_GREATER 5
85#define SVGA3D_COMPARISON_NOT_EQUAL 6
86#define SVGA3D_COMPARISON_GREATER_EQUAL 7
87#define SVGA3D_COMPARISON_ALWAYS 8
88#define SVGA3D_COMPARISON_MAX 9
89typedef uint8 SVGA3dComparisonFunc;
90
91#define SVGA3D_DX_MAX_VERTEXBUFFERS 32
92#define SVGA3D_DX_MAX_SOTARGETS 4
93#define SVGA3D_DX_MAX_SRVIEWS 128
94#define SVGA3D_DX_MAX_CONSTBUFFERS 16
95#define SVGA3D_DX_MAX_SAMPLERS 16
96
97/* Id limits */
98static const uint32 SVGA3dBlendObjectCountPerContext = 4096;
99static const uint32 SVGA3dDepthStencilObjectCountPerContext = 4096;
100
101typedef uint32 SVGA3dSurfaceId;
102typedef uint32 SVGA3dShaderResourceViewId;
103typedef uint32 SVGA3dRenderTargetViewId;
104typedef uint32 SVGA3dDepthStencilViewId;
105
106typedef uint32 SVGA3dShaderId;
107typedef uint32 SVGA3dElementLayoutId;
108typedef uint32 SVGA3dSamplerId;
109typedef uint32 SVGA3dBlendStateId;
110typedef uint32 SVGA3dDepthStencilStateId;
111typedef uint32 SVGA3dRasterizerStateId;
112typedef uint32 SVGA3dQueryId;
113typedef uint32 SVGA3dStreamOutputId;
114
115typedef union {
116 struct {
117 float r;
118 float g;
119 float b;
120 float a;
121 };
122
123 float value[4];
124} SVGA3dRGBAFloat;
125
126typedef
127#include "vmware_pack_begin.h"
128struct {
129 uint32 cid;
130 SVGAMobId mobid;
131}
132#include "vmware_pack_end.h"
133SVGAOTableDXContextEntry;
134
135typedef
136#include "vmware_pack_begin.h"
137struct SVGA3dCmdDXDefineContext {
138 uint32 cid;
139}
140#include "vmware_pack_end.h"
141SVGA3dCmdDXDefineContext; /* SVGA_3D_CMD_DX_DEFINE_CONTEXT */
142
143typedef
144#include "vmware_pack_begin.h"
145struct SVGA3dCmdDXDestroyContext {
146 uint32 cid;
147}
148#include "vmware_pack_end.h"
149SVGA3dCmdDXDestroyContext; /* SVGA_3D_CMD_DX_DESTROY_CONTEXT */
150
151/*
152 * Bind a DX context.
153 *
154 * validContents should be set to 0 for new contexts,
155 * and 1 if this is an old context which is getting paged
156 * back on to the device.
157 *
158 * For new contexts, it is recommended that the driver
159 * issue commands to initialize all interesting state
160 * prior to rendering.
161 */
162typedef
163#include "vmware_pack_begin.h"
164struct SVGA3dCmdDXBindContext {
165 uint32 cid;
166 SVGAMobId mobid;
167 uint32 validContents;
168}
169#include "vmware_pack_end.h"
170SVGA3dCmdDXBindContext; /* SVGA_3D_CMD_DX_BIND_CONTEXT */
171
172/*
173 * Readback a DX context.
174 * (Request that the device flush the contents back into guest memory.)
175 */
176typedef
177#include "vmware_pack_begin.h"
178struct SVGA3dCmdDXReadbackContext {
179 uint32 cid;
180}
181#include "vmware_pack_end.h"
182SVGA3dCmdDXReadbackContext; /* SVGA_3D_CMD_DX_READBACK_CONTEXT */
183
184/*
185 * Invalidate a guest-backed context.
186 */
187typedef
188#include "vmware_pack_begin.h"
189struct SVGA3dCmdDXInvalidateContext {
190 uint32 cid;
191}
192#include "vmware_pack_end.h"
193SVGA3dCmdDXInvalidateContext; /* SVGA_3D_CMD_DX_INVALIDATE_CONTEXT */
194
195typedef
196#include "vmware_pack_begin.h"
197struct SVGA3dReplyFormatData {
198 uint32 formatSupport;
199 uint32 msaa2xQualityLevels:5;
200 uint32 msaa4xQualityLevels:5;
201 uint32 msaa8xQualityLevels:5;
202 uint32 msaa16xQualityLevels:5;
203 uint32 msaa32xQualityLevels:5;
204 uint32 pad:7;
205}
206#include "vmware_pack_end.h"
207SVGA3dReplyFormatData;
208
209typedef
210#include "vmware_pack_begin.h"
211struct SVGA3dCmdDXSetSingleConstantBuffer {
212 uint32 slot;
213 SVGA3dShaderType type;
214 SVGA3dSurfaceId sid;
215 uint32 offsetInBytes;
216 uint32 sizeInBytes;
217}
218#include "vmware_pack_end.h"
219SVGA3dCmdDXSetSingleConstantBuffer;
220/* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER */
221
222typedef
223#include "vmware_pack_begin.h"
224struct SVGA3dCmdDXSetShaderResources {
225 uint32 startView;
226 SVGA3dShaderType type;
227
228 /*
229 * Followed by a variable number of SVGA3dShaderResourceViewId's.
230 */
231}
232#include "vmware_pack_end.h"
233SVGA3dCmdDXSetShaderResources; /* SVGA_3D_CMD_DX_SET_SHADER_RESOURCES */
234
235typedef
236#include "vmware_pack_begin.h"
237struct SVGA3dCmdDXSetShader {
238 SVGA3dShaderId shaderId;
239 SVGA3dShaderType type;
240}
241#include "vmware_pack_end.h"
242SVGA3dCmdDXSetShader; /* SVGA_3D_CMD_DX_SET_SHADER */
243
244typedef
245#include "vmware_pack_begin.h"
246struct SVGA3dCmdDXSetSamplers {
247 uint32 startSampler;
248 SVGA3dShaderType type;
249
250 /*
251 * Followed by a variable number of SVGA3dSamplerId's.
252 */
253}
254#include "vmware_pack_end.h"
255SVGA3dCmdDXSetSamplers; /* SVGA_3D_CMD_DX_SET_SAMPLERS */
256
257typedef
258#include "vmware_pack_begin.h"
259struct SVGA3dCmdDXDraw {
260 uint32 vertexCount;
261 uint32 startVertexLocation;
262}
263#include "vmware_pack_end.h"
264SVGA3dCmdDXDraw; /* SVGA_3D_CMD_DX_DRAW */
265
266typedef
267#include "vmware_pack_begin.h"
268struct SVGA3dCmdDXDrawIndexed {
269 uint32 indexCount;
270 uint32 startIndexLocation;
271 int32 baseVertexLocation;
272}
273#include "vmware_pack_end.h"
274SVGA3dCmdDXDrawIndexed; /* SVGA_3D_CMD_DX_DRAW_INDEXED */
275
276typedef
277#include "vmware_pack_begin.h"
278struct SVGA3dCmdDXDrawInstanced {
279 uint32 vertexCountPerInstance;
280 uint32 instanceCount;
281 uint32 startVertexLocation;
282 uint32 startInstanceLocation;
283}
284#include "vmware_pack_end.h"
285SVGA3dCmdDXDrawInstanced; /* SVGA_3D_CMD_DX_DRAW_INSTANCED */
286
287typedef
288#include "vmware_pack_begin.h"
289struct SVGA3dCmdDXDrawIndexedInstanced {
290 uint32 indexCountPerInstance;
291 uint32 instanceCount;
292 uint32 startIndexLocation;
293 int32 baseVertexLocation;
294 uint32 startInstanceLocation;
295}
296#include "vmware_pack_end.h"
297SVGA3dCmdDXDrawIndexedInstanced; /* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED */
298
299typedef
300#include "vmware_pack_begin.h"
301struct SVGA3dCmdDXDrawAuto {
302 uint32 pad0;
303}
304#include "vmware_pack_end.h"
305SVGA3dCmdDXDrawAuto; /* SVGA_3D_CMD_DX_DRAW_AUTO */
306
307typedef
308#include "vmware_pack_begin.h"
309struct SVGA3dCmdDXSetInputLayout {
310 SVGA3dElementLayoutId elementLayoutId;
311}
312#include "vmware_pack_end.h"
313SVGA3dCmdDXSetInputLayout; /* SVGA_3D_CMD_DX_SET_INPUT_LAYOUT */
314
315typedef
316#include "vmware_pack_begin.h"
317struct SVGA3dVertexBuffer {
318 SVGA3dSurfaceId sid;
319 uint32 stride;
320 uint32 offset;
321}
322#include "vmware_pack_end.h"
323SVGA3dVertexBuffer;
324
325typedef
326#include "vmware_pack_begin.h"
327struct SVGA3dCmdDXSetVertexBuffers {
328 uint32 startBuffer;
329 /* Followed by a variable number of SVGA3dVertexBuffer's. */
330}
331#include "vmware_pack_end.h"
332SVGA3dCmdDXSetVertexBuffers; /* SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS */
333
334typedef
335#include "vmware_pack_begin.h"
336struct SVGA3dCmdDXSetIndexBuffer {
337 SVGA3dSurfaceId sid;
338 SVGA3dSurfaceFormat format;
339 uint32 offset;
340}
341#include "vmware_pack_end.h"
342SVGA3dCmdDXSetIndexBuffer; /* SVGA_3D_CMD_DX_SET_INDEX_BUFFER */
343
344typedef
345#include "vmware_pack_begin.h"
346struct SVGA3dCmdDXSetTopology {
347 SVGA3dPrimitiveType topology;
348}
349#include "vmware_pack_end.h"
350SVGA3dCmdDXSetTopology; /* SVGA_3D_CMD_DX_SET_TOPOLOGY */
351
352typedef
353#include "vmware_pack_begin.h"
354struct SVGA3dCmdDXSetRenderTargets {
355 SVGA3dDepthStencilViewId depthStencilViewId;
356 /* Followed by a variable number of SVGA3dRenderTargetViewId's. */
357}
358#include "vmware_pack_end.h"
359SVGA3dCmdDXSetRenderTargets; /* SVGA_3D_CMD_DX_SET_RENDERTARGETS */
360
361typedef
362#include "vmware_pack_begin.h"
363struct SVGA3dCmdDXSetBlendState {
364 SVGA3dBlendStateId blendId;
365 float blendFactor[4];
366 uint32 sampleMask;
367}
368#include "vmware_pack_end.h"
369SVGA3dCmdDXSetBlendState; /* SVGA_3D_CMD_DX_SET_BLEND_STATE */
370
371typedef
372#include "vmware_pack_begin.h"
373struct SVGA3dCmdDXSetDepthStencilState {
374 SVGA3dDepthStencilStateId depthStencilId;
375 uint32 stencilRef;
376}
377#include "vmware_pack_end.h"
378SVGA3dCmdDXSetDepthStencilState; /* SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE */
379
380typedef
381#include "vmware_pack_begin.h"
382struct SVGA3dCmdDXSetRasterizerState {
383 SVGA3dRasterizerStateId rasterizerId;
384}
385#include "vmware_pack_end.h"
386SVGA3dCmdDXSetRasterizerState; /* SVGA_3D_CMD_DX_SET_RASTERIZER_STATE */
387
388#define SVGA3D_DXQUERY_FLAG_PREDICATEHINT (1 << 0)
389typedef uint32 SVGA3dDXQueryFlags;
390
391/*
392 * The SVGADXQueryDeviceState and SVGADXQueryDeviceBits are used by the device
393 * to track query state transitions, but are not intended to be used by the
394 * driver.
395 */
396#define SVGADX_QDSTATE_INVALID ((uint8)-1) /* Query has no state */
397#define SVGADX_QDSTATE_MIN 0
398#define SVGADX_QDSTATE_IDLE 0 /* Query hasn't started yet */
399#define SVGADX_QDSTATE_ACTIVE 1 /* Query is actively gathering data */
400#define SVGADX_QDSTATE_PENDING 2 /* Query is waiting for results */
401#define SVGADX_QDSTATE_FINISHED 3 /* Query has completed */
402#define SVGADX_QDSTATE_MAX 4
403typedef uint8 SVGADXQueryDeviceState;
404
405typedef
406#include "vmware_pack_begin.h"
407struct {
408 SVGA3dQueryTypeUint8 type;
409 uint16 pad0;
410 SVGADXQueryDeviceState state;
411 SVGA3dDXQueryFlags flags;
412 SVGAMobId mobid;
413 uint32 offset;
414}
415#include "vmware_pack_end.h"
416SVGACOTableDXQueryEntry;
417
418typedef
419#include "vmware_pack_begin.h"
420struct SVGA3dCmdDXDefineQuery {
421 SVGA3dQueryId queryId;
422 SVGA3dQueryType type;
423 SVGA3dDXQueryFlags flags;
424}
425#include "vmware_pack_end.h"
426SVGA3dCmdDXDefineQuery; /* SVGA_3D_CMD_DX_DEFINE_QUERY */
427
428typedef
429#include "vmware_pack_begin.h"
430struct SVGA3dCmdDXDestroyQuery {
431 SVGA3dQueryId queryId;
432}
433#include "vmware_pack_end.h"
434SVGA3dCmdDXDestroyQuery; /* SVGA_3D_CMD_DX_DESTROY_QUERY */
435
436typedef
437#include "vmware_pack_begin.h"
438struct SVGA3dCmdDXBindQuery {
439 SVGA3dQueryId queryId;
440 SVGAMobId mobid;
441}
442#include "vmware_pack_end.h"
443SVGA3dCmdDXBindQuery; /* SVGA_3D_CMD_DX_BIND_QUERY */
444
445typedef
446#include "vmware_pack_begin.h"
447struct SVGA3dCmdDXSetQueryOffset {
448 SVGA3dQueryId queryId;
449 uint32 mobOffset;
450}
451#include "vmware_pack_end.h"
452SVGA3dCmdDXSetQueryOffset; /* SVGA_3D_CMD_DX_SET_QUERY_OFFSET */
453
454typedef
455#include "vmware_pack_begin.h"
456struct SVGA3dCmdDXBeginQuery {
457 SVGA3dQueryId queryId;
458}
459#include "vmware_pack_end.h"
460SVGA3dCmdDXBeginQuery; /* SVGA_3D_CMD_DX_QUERY_BEGIN */
461
462typedef
463#include "vmware_pack_begin.h"
464struct SVGA3dCmdDXEndQuery {
465 SVGA3dQueryId queryId;
466}
467#include "vmware_pack_end.h"
468SVGA3dCmdDXEndQuery; /* SVGA_3D_CMD_DX_QUERY_END */
469
470typedef
471#include "vmware_pack_begin.h"
472struct SVGA3dCmdDXReadbackQuery {
473 SVGA3dQueryId queryId;
474}
475#include "vmware_pack_end.h"
476SVGA3dCmdDXReadbackQuery; /* SVGA_3D_CMD_DX_READBACK_QUERY */
477
478typedef
479#include "vmware_pack_begin.h"
480struct SVGA3dCmdDXMoveQuery {
481 SVGA3dQueryId queryId;
482 SVGAMobId mobid;
483 uint32 mobOffset;
484}
485#include "vmware_pack_end.h"
486SVGA3dCmdDXMoveQuery; /* SVGA_3D_CMD_DX_MOVE_QUERY */
487
488typedef
489#include "vmware_pack_begin.h"
490struct SVGA3dCmdDXBindAllQuery {
491 uint32 cid;
492 SVGAMobId mobid;
493}
494#include "vmware_pack_end.h"
495SVGA3dCmdDXBindAllQuery; /* SVGA_3D_CMD_DX_BIND_ALL_QUERY */
496
497typedef
498#include "vmware_pack_begin.h"
499struct SVGA3dCmdDXReadbackAllQuery {
500 uint32 cid;
501}
502#include "vmware_pack_end.h"
503SVGA3dCmdDXReadbackAllQuery; /* SVGA_3D_CMD_DX_READBACK_ALL_QUERY */
504
505typedef
506#include "vmware_pack_begin.h"
507struct SVGA3dCmdDXSetPredication {
508 SVGA3dQueryId queryId;
509 uint32 predicateValue;
510}
511#include "vmware_pack_end.h"
512SVGA3dCmdDXSetPredication; /* SVGA_3D_CMD_DX_SET_PREDICATION */
513
514typedef
515#include "vmware_pack_begin.h"
516struct MKS3dDXSOState {
517 uint32 offset; /* Starting offset */
518 uint32 intOffset; /* Internal offset */
519 uint32 vertexCount; /* vertices written */
520 uint32 sizeInBytes; /* max bytes to write */
521}
522#include "vmware_pack_end.h"
523SVGA3dDXSOState;
524
525/* Set the offset field to this value to append SO values to the buffer */
526#define SVGA3D_DX_SO_OFFSET_APPEND ((uint32) ~0u)
527
528typedef
529#include "vmware_pack_begin.h"
530struct SVGA3dSoTarget {
531 SVGA3dSurfaceId sid;
532 uint32 offset;
533 uint32 sizeInBytes;
534}
535#include "vmware_pack_end.h"
536SVGA3dSoTarget;
537
538typedef
539#include "vmware_pack_begin.h"
540struct SVGA3dCmdDXSetSOTargets {
541 uint32 pad0;
542 /* Followed by a variable number of SVGA3dSOTarget's. */
543}
544#include "vmware_pack_end.h"
545SVGA3dCmdDXSetSOTargets; /* SVGA_3D_CMD_DX_SET_SOTARGETS */
546
547typedef
548#include "vmware_pack_begin.h"
549struct SVGA3dViewport
550{
551 float x;
552 float y;
553 float width;
554 float height;
555 float minDepth;
556 float maxDepth;
557}
558#include "vmware_pack_end.h"
559SVGA3dViewport;
560
561typedef
562#include "vmware_pack_begin.h"
563struct SVGA3dCmdDXSetViewports {
564 uint32 pad0;
565 /* Followed by a variable number of SVGA3dViewport's. */
566}
567#include "vmware_pack_end.h"
568SVGA3dCmdDXSetViewports; /* SVGA_3D_CMD_DX_SET_VIEWPORTS */
569
570#define SVGA3D_DX_MAX_VIEWPORTS 16
571
572typedef
573#include "vmware_pack_begin.h"
574struct SVGA3dCmdDXSetScissorRects {
575 uint32 pad0;
576 /* Followed by a variable number of SVGASignedRect's. */
577}
578#include "vmware_pack_end.h"
579SVGA3dCmdDXSetScissorRects; /* SVGA_3D_CMD_DX_SET_SCISSORRECTS */
580
581#define SVGA3D_DX_MAX_SCISSORRECTS 16
582
583typedef
584#include "vmware_pack_begin.h"
585struct SVGA3dCmdDXClearRenderTargetView {
586 SVGA3dRenderTargetViewId renderTargetViewId;
587 SVGA3dRGBAFloat rgba;
588}
589#include "vmware_pack_end.h"
590SVGA3dCmdDXClearRenderTargetView; /* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW */
591
592typedef
593#include "vmware_pack_begin.h"
594struct SVGA3dCmdDXClearDepthStencilView {
595 uint16 flags;
596 uint16 stencil;
597 SVGA3dDepthStencilViewId depthStencilViewId;
598 float depth;
599}
600#include "vmware_pack_end.h"
601SVGA3dCmdDXClearDepthStencilView; /* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW */
602
603typedef
604#include "vmware_pack_begin.h"
605struct SVGA3dCmdDXPredCopyRegion {
606 SVGA3dSurfaceId dstSid;
607 uint32 dstSubResource;
608 SVGA3dSurfaceId srcSid;
609 uint32 srcSubResource;
610 SVGA3dCopyBox box;
611}
612#include "vmware_pack_end.h"
613SVGA3dCmdDXPredCopyRegion;
614/* SVGA_3D_CMD_DX_PRED_COPY_REGION */
615
616typedef
617#include "vmware_pack_begin.h"
618struct SVGA3dCmdDXPredCopy {
619 SVGA3dSurfaceId dstSid;
620 SVGA3dSurfaceId srcSid;
621}
622#include "vmware_pack_end.h"
623SVGA3dCmdDXPredCopy; /* SVGA_3D_CMD_DX_PRED_COPY */
624
625typedef
626#include "vmware_pack_begin.h"
627struct SVGA3dCmdDXBufferCopy {
628 SVGA3dSurfaceId dest;
629 SVGA3dSurfaceId src;
630 uint32 destX;
631 uint32 srcX;
632 uint32 width;
633}
634#include "vmware_pack_end.h"
635SVGA3dCmdDXBufferCopy;
636/* SVGA_3D_CMD_DX_BUFFER_COPY */
637
638typedef uint32 SVGA3dDXStretchBltMode;
639#define SVGADX_STRETCHBLT_LINEAR (1 << 0)
640#define SVGADX_STRETCHBLT_FORCE_SRC_SRGB (1 << 1)
641
642typedef
643#include "vmware_pack_begin.h"
644struct SVGA3dCmdDXStretchBlt {
645 SVGA3dSurfaceId srcSid;
646 uint32 srcSubResource;
647 SVGA3dSurfaceId dstSid;
648 uint32 destSubResource;
649 SVGA3dBox boxSrc;
650 SVGA3dBox boxDest;
651 SVGA3dDXStretchBltMode mode;
652}
653#include "vmware_pack_end.h"
654SVGA3dCmdDXStretchBlt; /* SVGA_3D_CMD_DX_STRETCHBLT */
655
656typedef
657#include "vmware_pack_begin.h"
658struct SVGA3dCmdDXGenMips {
659 SVGA3dShaderResourceViewId shaderResourceViewId;
660}
661#include "vmware_pack_end.h"
662SVGA3dCmdDXGenMips; /* SVGA_3D_CMD_DX_GENMIPS */
663
664/*
665 * Defines a resource/DX surface. Resources share the surfaceId namespace.
666 *
667 */
668typedef
669#include "vmware_pack_begin.h"
670struct SVGA3dCmdDefineGBSurface_v2 {
671 uint32 sid;
672 SVGA3dSurfaceFlags surfaceFlags;
673 SVGA3dSurfaceFormat format;
674 uint32 numMipLevels;
675 uint32 multisampleCount;
676 SVGA3dTextureFilter autogenFilter;
677 SVGA3dSize size;
678 uint32 arraySize;
679 uint32 pad;
680}
681#include "vmware_pack_end.h"
682SVGA3dCmdDefineGBSurface_v2; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
683
684/*
685 * Update a sub-resource in a guest-backed resource.
686 * (Inform the device that the guest-contents have been updated.)
687 */
688typedef
689#include "vmware_pack_begin.h"
690struct SVGA3dCmdDXUpdateSubResource {
691 SVGA3dSurfaceId sid;
692 uint32 subResource;
693 SVGA3dBox box;
694}
695#include "vmware_pack_end.h"
696SVGA3dCmdDXUpdateSubResource; /* SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE */
697
698/*
699 * Readback a subresource in a guest-backed resource.
700 * (Request the device to flush the dirty contents into the guest.)
701 */
702typedef
703#include "vmware_pack_begin.h"
704struct SVGA3dCmdDXReadbackSubResource {
705 SVGA3dSurfaceId sid;
706 uint32 subResource;
707}
708#include "vmware_pack_end.h"
709SVGA3dCmdDXReadbackSubResource; /* SVGA_3D_CMD_DX_READBACK_SUBRESOURCE */
710
711/*
712 * Invalidate an image in a guest-backed surface.
713 * (Notify the device that the contents can be lost.)
714 */
715typedef
716#include "vmware_pack_begin.h"
717struct SVGA3dCmdDXInvalidateSubResource {
718 SVGA3dSurfaceId sid;
719 uint32 subResource;
720}
721#include "vmware_pack_end.h"
722SVGA3dCmdDXInvalidateSubResource; /* SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE */
723
724
725/*
726 * Raw byte wise transfer from a buffer surface into another surface
727 * of the requested box.
728 */
729typedef
730#include "vmware_pack_begin.h"
731struct SVGA3dCmdDXTransferFromBuffer {
732 SVGA3dSurfaceId srcSid;
733 uint32 srcOffset;
734 uint32 srcPitch;
735 uint32 srcSlicePitch;
736 SVGA3dSurfaceId destSid;
737 uint32 destSubResource;
738 SVGA3dBox destBox;
739}
740#include "vmware_pack_end.h"
741SVGA3dCmdDXTransferFromBuffer; /* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER */
742
743
744/*
745 * Raw byte wise transfer from a buffer surface into another surface
746 * of the requested box. Supported if SVGA3D_DEVCAP_DXCONTEXT is set.
747 * The context is implied from the command buffer header.
748 */
749typedef
750#include "vmware_pack_begin.h"
751struct SVGA3dCmdDXPredTransferFromBuffer {
752 SVGA3dSurfaceId srcSid;
753 uint32 srcOffset;
754 uint32 srcPitch;
755 uint32 srcSlicePitch;
756 SVGA3dSurfaceId destSid;
757 uint32 destSubResource;
758 SVGA3dBox destBox;
759}
760#include "vmware_pack_end.h"
761SVGA3dCmdDXPredTransferFromBuffer;
762/* SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER */
763
764
765typedef
766#include "vmware_pack_begin.h"
767struct SVGA3dCmdDXSurfaceCopyAndReadback {
768 SVGA3dSurfaceId srcSid;
769 SVGA3dSurfaceId destSid;
770 SVGA3dCopyBox box;
771}
772#include "vmware_pack_end.h"
773SVGA3dCmdDXSurfaceCopyAndReadback;
774/* SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK */
775
776
777typedef
778#include "vmware_pack_begin.h"
779struct {
780 union {
781 struct {
782 uint32 firstElement;
783 uint32 numElements;
784 uint32 pad0;
785 uint32 pad1;
786 } buffer;
787 struct {
788 uint32 mostDetailedMip;
789 uint32 firstArraySlice;
790 uint32 mipLevels;
791 uint32 arraySize;
792 } tex;
793 struct {
794 uint32 firstElement;
795 uint32 numElements;
796 uint32 flags;
797 uint32 pad0;
798 } bufferex;
799 };
800}
801#include "vmware_pack_end.h"
802SVGA3dShaderResourceViewDesc;
803
804typedef
805#include "vmware_pack_begin.h"
806struct {
807 SVGA3dSurfaceId sid;
808 SVGA3dSurfaceFormat format;
809 SVGA3dResourceType resourceDimension;
810 SVGA3dShaderResourceViewDesc desc;
811 uint32 pad;
812}
813#include "vmware_pack_end.h"
814SVGACOTableDXSRViewEntry;
815
816typedef
817#include "vmware_pack_begin.h"
818struct SVGA3dCmdDXDefineShaderResourceView {
819 SVGA3dShaderResourceViewId shaderResourceViewId;
820
821 SVGA3dSurfaceId sid;
822 SVGA3dSurfaceFormat format;
823 SVGA3dResourceType resourceDimension;
824
825 SVGA3dShaderResourceViewDesc desc;
826}
827#include "vmware_pack_end.h"
828SVGA3dCmdDXDefineShaderResourceView;
829/* SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW */
830
831typedef
832#include "vmware_pack_begin.h"
833struct SVGA3dCmdDXDestroyShaderResourceView {
834 SVGA3dShaderResourceViewId shaderResourceViewId;
835}
836#include "vmware_pack_end.h"
837SVGA3dCmdDXDestroyShaderResourceView;
838/* SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW */
839
840typedef
841#include "vmware_pack_begin.h"
842struct SVGA3dRenderTargetViewDesc {
843 union {
844 struct {
845 uint32 firstElement;
846 uint32 numElements;
847 } buffer;
848 struct {
849 uint32 mipSlice;
850 uint32 firstArraySlice;
851 uint32 arraySize;
852 } tex; /* 1d, 2d, cube */
853 struct {
854 uint32 mipSlice;
855 uint32 firstW;
856 uint32 wSize;
857 } tex3D;
858 };
859}
860#include "vmware_pack_end.h"
861SVGA3dRenderTargetViewDesc;
862
863typedef
864#include "vmware_pack_begin.h"
865struct {
866 SVGA3dSurfaceId sid;
867 SVGA3dSurfaceFormat format;
868 SVGA3dResourceType resourceDimension;
869 SVGA3dRenderTargetViewDesc desc;
870 uint32 pad[2];
871}
872#include "vmware_pack_end.h"
873SVGACOTableDXRTViewEntry;
874
875typedef
876#include "vmware_pack_begin.h"
877struct SVGA3dCmdDXDefineRenderTargetView {
878 SVGA3dRenderTargetViewId renderTargetViewId;
879
880 SVGA3dSurfaceId sid;
881 SVGA3dSurfaceFormat format;
882 SVGA3dResourceType resourceDimension;
883
884 SVGA3dRenderTargetViewDesc desc;
885}
886#include "vmware_pack_end.h"
887SVGA3dCmdDXDefineRenderTargetView;
888/* SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW */
889
890typedef
891#include "vmware_pack_begin.h"
892struct SVGA3dCmdDXDestroyRenderTargetView {
893 SVGA3dRenderTargetViewId renderTargetViewId;
894}
895#include "vmware_pack_end.h"
896SVGA3dCmdDXDestroyRenderTargetView;
897/* SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW */
898
899/*
900 */
901#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_DEPTH 0x01
902#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_STENCIL 0x02
903#define SVGA3D_DXDSVIEW_CREATE_FLAG_MASK 0x03
904typedef uint8 SVGA3DCreateDSViewFlags;
905
906typedef
907#include "vmware_pack_begin.h"
908struct {
909 SVGA3dSurfaceId sid;
910 SVGA3dSurfaceFormat format;
911 SVGA3dResourceType resourceDimension;
912 uint32 mipSlice;
913 uint32 firstArraySlice;
914 uint32 arraySize;
915 SVGA3DCreateDSViewFlags flags;
916 uint8 pad0;
917 uint16 pad1;
918 uint32 pad2;
919}
920#include "vmware_pack_end.h"
921SVGACOTableDXDSViewEntry;
922
923typedef
924#include "vmware_pack_begin.h"
925struct SVGA3dCmdDXDefineDepthStencilView {
926 SVGA3dDepthStencilViewId depthStencilViewId;
927
928 SVGA3dSurfaceId sid;
929 SVGA3dSurfaceFormat format;
930 SVGA3dResourceType resourceDimension;
931 uint32 mipSlice;
932 uint32 firstArraySlice;
933 uint32 arraySize;
934 SVGA3DCreateDSViewFlags flags;
935 uint8 pad0;
936 uint16 pad1;
937}
938#include "vmware_pack_end.h"
939SVGA3dCmdDXDefineDepthStencilView;
940/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW */
941
942typedef
943#include "vmware_pack_begin.h"
944struct SVGA3dCmdDXDestroyDepthStencilView {
945 SVGA3dDepthStencilViewId depthStencilViewId;
946}
947#include "vmware_pack_end.h"
948SVGA3dCmdDXDestroyDepthStencilView;
949/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW */
950
951typedef
952#include "vmware_pack_begin.h"
953struct SVGA3dInputElementDesc {
954 uint32 inputSlot;
955 uint32 alignedByteOffset;
956 SVGA3dSurfaceFormat format;
957 SVGA3dInputClassification inputSlotClass;
958 uint32 instanceDataStepRate;
959 uint32 inputRegister;
960}
961#include "vmware_pack_end.h"
962SVGA3dInputElementDesc;
963
964typedef
965#include "vmware_pack_begin.h"
966struct {
967 /*
968 * XXX: How many of these can there be?
969 */
970 uint32 elid;
971 uint32 numDescs;
972 SVGA3dInputElementDesc desc[32];
973 uint32 pad[62];
974}
975#include "vmware_pack_end.h"
976SVGACOTableDXElementLayoutEntry;
977
978typedef
979#include "vmware_pack_begin.h"
980struct SVGA3dCmdDXDefineElementLayout {
981 SVGA3dElementLayoutId elementLayoutId;
982 /* Followed by a variable number of SVGA3dInputElementDesc's. */
983}
984#include "vmware_pack_end.h"
985SVGA3dCmdDXDefineElementLayout;
986/* SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT */
987
988typedef
989#include "vmware_pack_begin.h"
990struct SVGA3dCmdDXDestroyElementLayout {
991 SVGA3dElementLayoutId elementLayoutId;
992}
993#include "vmware_pack_end.h"
994SVGA3dCmdDXDestroyElementLayout;
995/* SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT */
996
997
998#define SVGA3D_DX_MAX_RENDER_TARGETS 8
999
1000typedef
1001#include "vmware_pack_begin.h"
1002struct SVGA3dDXBlendStatePerRT {
1003 uint8 blendEnable;
1004 uint8 srcBlend;
1005 uint8 destBlend;
1006 uint8 blendOp;
1007 uint8 srcBlendAlpha;
1008 uint8 destBlendAlpha;
1009 uint8 blendOpAlpha;
1010 uint8 renderTargetWriteMask;
1011 uint8 logicOpEnable;
1012 uint8 logicOp;
1013 uint16 pad0;
1014}
1015#include "vmware_pack_end.h"
1016SVGA3dDXBlendStatePerRT;
1017
1018typedef
1019#include "vmware_pack_begin.h"
1020struct {
1021 uint8 alphaToCoverageEnable;
1022 uint8 independentBlendEnable;
1023 uint16 pad0;
1024 SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
1025 uint32 pad1[7];
1026}
1027#include "vmware_pack_end.h"
1028SVGACOTableDXBlendStateEntry;
1029
1030/*
1031 */
1032typedef
1033#include "vmware_pack_begin.h"
1034struct SVGA3dCmdDXDefineBlendState {
1035 SVGA3dBlendStateId blendId;
1036 uint8 alphaToCoverageEnable;
1037 uint8 independentBlendEnable;
1038 uint16 pad0;
1039 SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
1040}
1041#include "vmware_pack_end.h"
1042SVGA3dCmdDXDefineBlendState; /* SVGA_3D_CMD_DX_DEFINE_BLEND_STATE */
1043
1044typedef
1045#include "vmware_pack_begin.h"
1046struct SVGA3dCmdDXDestroyBlendState {
1047 SVGA3dBlendStateId blendId;
1048}
1049#include "vmware_pack_end.h"
1050SVGA3dCmdDXDestroyBlendState; /* SVGA_3D_CMD_DX_DESTROY_BLEND_STATE */
1051
1052typedef
1053#include "vmware_pack_begin.h"
1054struct {
1055 uint8 depthEnable;
1056 SVGA3dDepthWriteMask depthWriteMask;
1057 SVGA3dComparisonFunc depthFunc;
1058 uint8 stencilEnable;
1059 uint8 frontEnable;
1060 uint8 backEnable;
1061 uint8 stencilReadMask;
1062 uint8 stencilWriteMask;
1063
1064 uint8 frontStencilFailOp;
1065 uint8 frontStencilDepthFailOp;
1066 uint8 frontStencilPassOp;
1067 SVGA3dComparisonFunc frontStencilFunc;
1068
1069 uint8 backStencilFailOp;
1070 uint8 backStencilDepthFailOp;
1071 uint8 backStencilPassOp;
1072 SVGA3dComparisonFunc backStencilFunc;
1073}
1074#include "vmware_pack_end.h"
1075SVGACOTableDXDepthStencilEntry;
1076
1077/*
1078 */
1079typedef
1080#include "vmware_pack_begin.h"
1081struct SVGA3dCmdDXDefineDepthStencilState {
1082 SVGA3dDepthStencilStateId depthStencilId;
1083
1084 uint8 depthEnable;
1085 SVGA3dDepthWriteMask depthWriteMask;
1086 SVGA3dComparisonFunc depthFunc;
1087 uint8 stencilEnable;
1088 uint8 frontEnable;
1089 uint8 backEnable;
1090 uint8 stencilReadMask;
1091 uint8 stencilWriteMask;
1092
1093 uint8 frontStencilFailOp;
1094 uint8 frontStencilDepthFailOp;
1095 uint8 frontStencilPassOp;
1096 SVGA3dComparisonFunc frontStencilFunc;
1097
1098 uint8 backStencilFailOp;
1099 uint8 backStencilDepthFailOp;
1100 uint8 backStencilPassOp;
1101 SVGA3dComparisonFunc backStencilFunc;
1102}
1103#include "vmware_pack_end.h"
1104SVGA3dCmdDXDefineDepthStencilState;
1105/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE */
1106
1107typedef
1108#include "vmware_pack_begin.h"
1109struct SVGA3dCmdDXDestroyDepthStencilState {
1110 SVGA3dDepthStencilStateId depthStencilId;
1111}
1112#include "vmware_pack_end.h"
1113SVGA3dCmdDXDestroyDepthStencilState;
1114/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE */
1115
1116typedef
1117#include "vmware_pack_begin.h"
1118struct {
1119 uint8 fillMode;
1120 SVGA3dCullMode cullMode;
1121 uint8 frontCounterClockwise;
1122 uint8 provokingVertexLast;
1123 int32 depthBias;
1124 float depthBiasClamp;
1125 float slopeScaledDepthBias;
1126 uint8 depthClipEnable;
1127 uint8 scissorEnable;
1128 uint8 multisampleEnable;
1129 uint8 antialiasedLineEnable;
1130 float lineWidth;
1131 uint8 lineStippleEnable;
1132 uint8 lineStippleFactor;
1133 uint16 lineStipplePattern;
1134 uint32 forcedSampleCount;
1135}
1136#include "vmware_pack_end.h"
1137SVGACOTableDXRasterizerStateEntry;
1138
1139/*
1140 */
1141typedef
1142#include "vmware_pack_begin.h"
1143struct SVGA3dCmdDXDefineRasterizerState {
1144 SVGA3dRasterizerStateId rasterizerId;
1145
1146 uint8 fillMode;
1147 SVGA3dCullMode cullMode;
1148 uint8 frontCounterClockwise;
1149 uint8 provokingVertexLast;
1150 int32 depthBias;
1151 float depthBiasClamp;
1152 float slopeScaledDepthBias;
1153 uint8 depthClipEnable;
1154 uint8 scissorEnable;
1155 uint8 multisampleEnable;
1156 uint8 antialiasedLineEnable;
1157 float lineWidth;
1158 uint8 lineStippleEnable;
1159 uint8 lineStippleFactor;
1160 uint16 lineStipplePattern;
1161}
1162#include "vmware_pack_end.h"
1163SVGA3dCmdDXDefineRasterizerState;
1164/* SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE */
1165
1166typedef
1167#include "vmware_pack_begin.h"
1168struct SVGA3dCmdDXDestroyRasterizerState {
1169 SVGA3dRasterizerStateId rasterizerId;
1170}
1171#include "vmware_pack_end.h"
1172SVGA3dCmdDXDestroyRasterizerState;
1173/* SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE */
1174
1175typedef
1176#include "vmware_pack_begin.h"
1177struct {
1178 SVGA3dFilter filter;
1179 uint8 addressU;
1180 uint8 addressV;
1181 uint8 addressW;
1182 uint8 pad0;
1183 float mipLODBias;
1184 uint8 maxAnisotropy;
1185 SVGA3dComparisonFunc comparisonFunc;
1186 uint16 pad1;
1187 SVGA3dRGBAFloat borderColor;
1188 float minLOD;
1189 float maxLOD;
1190 uint32 pad2[6];
1191}
1192#include "vmware_pack_end.h"
1193SVGACOTableDXSamplerEntry;
1194
1195/*
1196 */
1197typedef
1198#include "vmware_pack_begin.h"
1199struct SVGA3dCmdDXDefineSamplerState {
1200 SVGA3dSamplerId samplerId;
1201 SVGA3dFilter filter;
1202 uint8 addressU;
1203 uint8 addressV;
1204 uint8 addressW;
1205 uint8 pad0;
1206 float mipLODBias;
1207 uint8 maxAnisotropy;
1208 SVGA3dComparisonFunc comparisonFunc;
1209 uint16 pad1;
1210 SVGA3dRGBAFloat borderColor;
1211 float minLOD;
1212 float maxLOD;
1213}
1214#include "vmware_pack_end.h"
1215SVGA3dCmdDXDefineSamplerState; /* SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE */
1216
1217typedef
1218#include "vmware_pack_begin.h"
1219struct SVGA3dCmdDXDestroySamplerState {
1220 SVGA3dSamplerId samplerId;
1221}
1222#include "vmware_pack_end.h"
1223SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */
1224
1225/*
1226 */
1227typedef
1228#include "vmware_pack_begin.h"
1229struct SVGA3dSignatureEntry {
1230 uint8 systemValue;
1231 uint8 reg; /* register is a reserved word */
1232 uint16 mask;
1233 uint8 registerComponentType;
1234 uint8 minPrecision;
1235 uint16 pad0;
1236}
1237#include "vmware_pack_end.h"
1238SVGA3dSignatureEntry;
1239
1240typedef
1241#include "vmware_pack_begin.h"
1242struct SVGA3dCmdDXDefineShader {
1243 SVGA3dShaderId shaderId;
1244 SVGA3dShaderType type;
1245 uint32 sizeInBytes; /* Number of bytes of shader text. */
1246}
1247#include "vmware_pack_end.h"
1248SVGA3dCmdDXDefineShader; /* SVGA_3D_CMD_DX_DEFINE_SHADER */
1249
1250typedef
1251#include "vmware_pack_begin.h"
1252struct SVGACOTableDXShaderEntry {
1253 SVGA3dShaderType type;
1254 uint32 sizeInBytes;
1255 uint32 offsetInBytes;
1256 SVGAMobId mobid;
1257 uint32 numInputSignatureEntries;
1258 uint32 numOutputSignatureEntries;
1259
1260 uint32 numPatchConstantSignatureEntries;
1261
1262 uint32 pad;
1263}
1264#include "vmware_pack_end.h"
1265SVGACOTableDXShaderEntry;
1266
1267typedef
1268#include "vmware_pack_begin.h"
1269struct SVGA3dCmdDXDestroyShader {
1270 SVGA3dShaderId shaderId;
1271}
1272#include "vmware_pack_end.h"
1273SVGA3dCmdDXDestroyShader; /* SVGA_3D_CMD_DX_DESTROY_SHADER */
1274
1275typedef
1276#include "vmware_pack_begin.h"
1277struct SVGA3dCmdDXBindShader {
1278 uint32 cid;
1279 uint32 shid;
1280 SVGAMobId mobid;
1281 uint32 offsetInBytes;
1282}
1283#include "vmware_pack_end.h"
1284SVGA3dCmdDXBindShader; /* SVGA_3D_CMD_DX_BIND_SHADER */
1285
1286/*
1287 * The maximum number of streamout decl's in each streamout entry.
1288 */
1289#define SVGA3D_MAX_STREAMOUT_DECLS 64
1290
1291typedef
1292#include "vmware_pack_begin.h"
1293struct SVGA3dStreamOutputDeclarationEntry {
1294 uint32 outputSlot;
1295 uint32 registerIndex;
1296 uint8 registerMask;
1297 uint8 pad0;
1298 uint16 pad1;
1299 uint32 stream;
1300}
1301#include "vmware_pack_end.h"
1302SVGA3dStreamOutputDeclarationEntry;
1303
1304typedef
1305#include "vmware_pack_begin.h"
1306struct SVGAOTableStreamOutputEntry {
1307 uint32 numOutputStreamEntries;
1308 SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
1309 uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
1310 uint32 rasterizedStream;
1311 uint32 pad[250];
1312}
1313#include "vmware_pack_end.h"
1314SVGACOTableDXStreamOutputEntry;
1315
1316typedef
1317#include "vmware_pack_begin.h"
1318struct SVGA3dCmdDXDefineStreamOutput {
1319 SVGA3dStreamOutputId soid;
1320 uint32 numOutputStreamEntries;
1321 SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
1322 uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
1323 uint32 rasterizedStream;
1324}
1325#include "vmware_pack_end.h"
1326SVGA3dCmdDXDefineStreamOutput; /* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT */
1327
1328typedef
1329#include "vmware_pack_begin.h"
1330struct SVGA3dCmdDXDestroyStreamOutput {
1331 SVGA3dStreamOutputId soid;
1332}
1333#include "vmware_pack_end.h"
1334SVGA3dCmdDXDestroyStreamOutput; /* SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT */
1335
1336typedef
1337#include "vmware_pack_begin.h"
1338struct SVGA3dCmdDXSetStreamOutput {
1339 SVGA3dStreamOutputId soid;
1340}
1341#include "vmware_pack_end.h"
1342SVGA3dCmdDXSetStreamOutput; /* SVGA_3D_CMD_DX_SET_STREAMOUTPUT */
1343
1344typedef
1345#include "vmware_pack_begin.h"
1346struct {
1347 uint64 value;
1348 uint32 mobId;
1349 uint32 mobOffset;
1350}
1351#include "vmware_pack_end.h"
1352SVGA3dCmdDXMobFence64; /* SVGA_3D_CMD_DX_MOB_FENCE_64 */
1353
1354/*
1355 * SVGA3dCmdSetCOTable --
1356 *
1357 * This command allows the guest to bind a mob to a context-object table.
1358 */
1359
1360typedef
1361#include "vmware_pack_begin.h"
1362struct SVGA3dCmdDXSetCOTable {
1363 uint32 cid;
1364 uint32 mobid;
1365 SVGACOTableType type;
1366 uint32 validSizeInBytes;
1367}
1368#include "vmware_pack_end.h"
1369SVGA3dCmdDXSetCOTable; /* SVGA_3D_CMD_DX_SET_COTABLE */
1370
1371typedef
1372#include "vmware_pack_begin.h"
1373struct SVGA3dCmdDXReadbackCOTable {
1374 uint32 cid;
1375 SVGACOTableType type;
1376}
1377#include "vmware_pack_end.h"
1378SVGA3dCmdDXReadbackCOTable; /* SVGA_3D_CMD_DX_READBACK_COTABLE */
1379
1380typedef
1381#include "vmware_pack_begin.h"
1382struct SVGA3dCOTableData {
1383 uint32 mobid;
1384}
1385#include "vmware_pack_end.h"
1386SVGA3dCOTableData;
1387
1388typedef
1389#include "vmware_pack_begin.h"
1390struct SVGA3dBufferBinding {
1391 uint32 bufferId;
1392 uint32 stride;
1393 uint32 offset;
1394}
1395#include "vmware_pack_end.h"
1396SVGA3dBufferBinding;
1397
1398typedef
1399#include "vmware_pack_begin.h"
1400struct SVGA3dConstantBufferBinding {
1401 uint32 sid;
1402 uint32 offsetInBytes;
1403 uint32 sizeInBytes;
1404}
1405#include "vmware_pack_end.h"
1406SVGA3dConstantBufferBinding;
1407
1408typedef
1409#include "vmware_pack_begin.h"
1410struct SVGADXInputAssemblyMobFormat {
1411 uint32 layoutId;
1412 SVGA3dBufferBinding vertexBuffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
1413 uint32 indexBufferSid;
1414 uint32 pad;
1415 uint32 indexBufferOffset;
1416 uint32 indexBufferFormat;
1417 uint32 topology;
1418}
1419#include "vmware_pack_end.h"
1420SVGADXInputAssemblyMobFormat;
1421
1422typedef
1423#include "vmware_pack_begin.h"
1424struct SVGADXContextMobFormat {
1425 SVGADXInputAssemblyMobFormat inputAssembly;
1426
1427 struct {
1428 uint32 blendStateId;
1429 uint32 blendFactor[4];
1430 uint32 sampleMask;
1431 uint32 depthStencilStateId;
1432 uint32 stencilRef;
1433 uint32 rasterizerStateId;
1434 uint32 depthStencilViewId;
1435 uint32 renderTargetViewIds[SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS];
1436 uint32 unorderedAccessViewIds[SVGA3D_MAX_UAVIEWS];
1437 } renderState;
1438
1439 struct {
1440 uint32 targets[SVGA3D_DX_MAX_SOTARGETS];
1441 uint32 soid;
1442 } streamOut;
1443 uint32 pad0[11];
1444
1445 uint8 numViewports;
1446 uint8 numScissorRects;
1447 uint16 pad1[1];
1448
1449 uint32 pad2[3];
1450
1451 SVGA3dViewport viewports[SVGA3D_DX_MAX_VIEWPORTS];
1452 uint32 pad3[32];
1453
1454 SVGASignedRect scissorRects[SVGA3D_DX_MAX_SCISSORRECTS];
1455 uint32 pad4[64];
1456
1457 struct {
1458 uint32 queryID;
1459 uint32 value;
1460 } predication;
1461 uint32 pad5[2];
1462
1463 struct {
1464 uint32 shaderId;
1465 SVGA3dConstantBufferBinding constantBuffers[SVGA3D_DX_MAX_CONSTBUFFERS];
1466 uint32 shaderResources[SVGA3D_DX_MAX_SRVIEWS];
1467 uint32 samplers[SVGA3D_DX_MAX_SAMPLERS];
1468 } shaderState[SVGA3D_NUM_SHADERTYPE];
1469 uint32 pad6[26];
1470
1471 SVGA3dQueryId queryID[SVGA3D_MAX_QUERY];
1472
1473 SVGA3dCOTableData cotables[SVGA_COTABLE_MAX];
1474 uint32 pad7[381];
1475}
1476#include "vmware_pack_end.h"
1477SVGADXContextMobFormat;
1478
1479typedef
1480#include "vmware_pack_begin.h"
1481struct SVGA3dCmdDXTempSetContext {
1482 uint32 dxcid;
1483}
1484#include "vmware_pack_end.h"
1485SVGA3dCmdDXTempSetContext; /* SVGA_3D_CMD_DX_TEMP_SET_CONTEXT */
1486
1487#endif /* _SVGA3D_DX_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
new file mode 100644
index 000000000000..a1c36877ad55
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -0,0 +1,99 @@
1/**********************************************************
2 * Copyright 2007-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_limits.h --
28 *
29 * SVGA 3d hardware limits
30 */
31
32#ifndef _SVGA3D_LIMITS_H_
33#define _SVGA3D_LIMITS_H_
34
35#define INCLUDE_ALLOW_MODULE
36#define INCLUDE_ALLOW_USERLEVEL
37#define INCLUDE_ALLOW_VMCORE
38
39#include "includeCheck.h"
40
41#define SVGA3D_NUM_CLIPPLANES 6
42#define SVGA3D_MAX_RENDER_TARGETS 8
43#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS (SVGA3D_MAX_RENDER_TARGETS)
44#define SVGA3D_MAX_UAVIEWS 8
45#define SVGA3D_MAX_CONTEXT_IDS 256
46#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
47
48/*
49 * Maximum ID a shader can be assigned on a given context.
50 */
51#define SVGA3D_MAX_SHADERIDS 5000
52/*
53 * Maximum number of shaders of a given type that can be defined
54 * (including all contexts).
55 */
56#define SVGA3D_MAX_SIMULTANEOUS_SHADERS 20000
57
58#define SVGA3D_NUM_TEXTURE_UNITS 32
59#define SVGA3D_NUM_LIGHTS 8
60
61/*
62 * Maximum size in dwords of shader text the SVGA device will allow.
63 * Currently 8 MB.
64 */
65#define SVGA3D_MAX_SHADER_MEMORY (8 * 1024 * 1024 / sizeof(uint32))
66
67#define SVGA3D_MAX_CLIP_PLANES 6
68
69/*
70 * This is the limit to the number of fixed-function texture
71 * transforms and texture coordinates we can support. It does *not*
72 * correspond to the number of texture image units (samplers) we
73 * support!
74 */
75#define SVGA3D_MAX_TEXTURE_COORDS 8
76
77/*
78 * Number of faces in a cubemap.
79 */
80#define SVGA3D_MAX_SURFACE_FACES 6
81
82/*
83 * Maximum number of array indexes in a GB surface (with DX enabled).
84 */
85#define SVGA3D_MAX_SURFACE_ARRAYSIZE 512
86
87/*
88 * The maximum number of vertex arrays we're guaranteed to support in
89 * SVGA_3D_CMD_DRAWPRIMITIVES.
90 */
91#define SVGA3D_MAX_VERTEX_ARRAYS 32
92
93/*
94 * The maximum number of primitive ranges we're guaranteed to support
95 * in SVGA_3D_CMD_DRAWPRIMITIVES.
96 */
97#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
98
99#endif /* _SVGA3D_LIMITS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
new file mode 100644
index 000000000000..b44ce648f592
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
@@ -0,0 +1,50 @@
1/**********************************************************
2 * Copyright 1998-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_reg.h --
28 *
29 * SVGA 3d hardware definitions
30 */
31
32#ifndef _SVGA3D_REG_H_
33#define _SVGA3D_REG_H_
34
35#define INCLUDE_ALLOW_MODULE
36#define INCLUDE_ALLOW_USERLEVEL
37#define INCLUDE_ALLOW_VMCORE
38
39#include "includeCheck.h"
40
41#include "svga_reg.h"
42
43#include "svga3d_types.h"
44#include "svga3d_limits.h"
45#include "svga3d_cmd.h"
46#include "svga3d_dx.h"
47#include "svga3d_devcaps.h"
48
49
50#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
new file mode 100644
index 000000000000..58704f0a4607
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -0,0 +1,1204 @@
1/**************************************************************************
2 *
3 * Copyright © 2008-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifdef __KERNEL__
29
30#include <drm/vmwgfx_drm.h>
31#define surf_size_struct struct drm_vmw_size
32
33#else /* __KERNEL__ */
34
35#ifndef ARRAY_SIZE
36#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
37#endif /* ARRAY_SIZE */
38
39#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
40#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
41#define surf_size_struct SVGA3dSize
42#define u32 uint32
43
44#endif /* __KERNEL__ */
45
46#include "svga3d_reg.h"
47
48/*
49 * enum svga3d_block_desc describes the active data channels in a block.
50 *
51 * There can be at-most four active channels in a block:
52 * 1. Red, bump W, luminance and depth are stored in the first channel.
53 * 2. Green, bump V and stencil are stored in the second channel.
54 * 3. Blue and bump U are stored in the third channel.
55 * 4. Alpha and bump Q are stored in the fourth channel.
56 *
57 * Block channels can be used to store compressed and buffer data:
58 * 1. For compressed formats, only the data channel is used and its size
59 * is equal to that of a singular block in the compression scheme.
60 * 2. For buffer formats, only the data channel is used and its size is
61 * exactly one byte in length.
62 * 3. In each case the bit depth represent the size of a singular block.
63 *
64 * Note: Compressed and IEEE formats do not use the bitMask structure.
65 */
66
67enum svga3d_block_desc {
68 SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
69 SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
70 data */
71 SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
72 data */
73 SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
74 U and V */
75 SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
76 data */
77 SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
78 data */
79 SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
80 channel */
81 SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
82 data */
83 SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
84 data */
85 SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
86 data */
87 SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
88 data */
89 SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
90 SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
91 channel */
92 SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
93 data */
94 SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
95 data */
96 SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
97 data depending on the
98 compression method used */
99 SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
100 floating point
101 representation in
102 all channels */
103 SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
104 data. */
105 SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
106 SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
107 SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
108 SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
109 SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
110 e.g., NV12. */
111 SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
112 Y, U, V, e.g., YV12. */
113
114 SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
115 SVGA3DBLOCKDESC_GREEN,
116 SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
117 SVGA3DBLOCKDESC_BLUE,
118 SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
119 SVGA3DBLOCKDESC_SRGB,
120 SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
121 SVGA3DBLOCKDESC_ALPHA,
122 SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
123 SVGA3DBLOCKDESC_SRGB,
124 SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
125 SVGA3DBLOCKDESC_V,
126 SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
127 SVGA3DBLOCKDESC_LUMINANCE,
128 SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
129 SVGA3DBLOCKDESC_W,
130 SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
131 SVGA3DBLOCKDESC_ALPHA,
132 SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
133 SVGA3DBLOCKDESC_V |
134 SVGA3DBLOCKDESC_W |
135 SVGA3DBLOCKDESC_Q,
136 SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
137 SVGA3DBLOCKDESC_ALPHA,
138 SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
139 SVGA3DBLOCKDESC_IEEE_FP,
140 SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
141 SVGA3DBLOCKDESC_GREEN,
142 SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
143 SVGA3DBLOCKDESC_BLUE,
144 SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
145 SVGA3DBLOCKDESC_ALPHA,
146 SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
147 SVGA3DBLOCKDESC_STENCIL,
148 SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
149 SVGA3DBLOCKDESC_Y,
150 SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
151 SVGA3DBLOCKDESC_Y |
152 SVGA3DBLOCKDESC_U_VIDEO |
153 SVGA3DBLOCKDESC_V_VIDEO,
154 SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
155 SVGA3DBLOCKDESC_EXP,
156 SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
157 SVGA3DBLOCKDESC_SRGB,
158 SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
159 SVGA3DBLOCKDESC_2PLANAR_YUV,
160 SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
161 SVGA3DBLOCKDESC_3PLANAR_YUV,
162};
163
164/*
165 * SVGA3dSurfaceDesc describes the actual pixel data.
166 *
167 * This structure provides the following information:
168 * 1. Block description.
169 * 2. Dimensions of a block in the surface.
170 * 3. Size of block in bytes.
171 * 4. Bit depth of the pixel data.
172 * 5. Channel bit depths and masks (if applicable).
173 */
174struct svga3d_channel_def {
175 union {
176 u8 blue;
177 u8 u;
178 u8 uv_video;
179 u8 u_video;
180 };
181 union {
182 u8 green;
183 u8 v;
184 u8 stencil;
185 u8 v_video;
186 };
187 union {
188 u8 red;
189 u8 w;
190 u8 luminance;
191 u8 y;
192 u8 depth;
193 u8 data;
194 };
195 union {
196 u8 alpha;
197 u8 q;
198 u8 exp;
199 };
200};
201
202struct svga3d_surface_desc {
203 SVGA3dSurfaceFormat format;
204 enum svga3d_block_desc block_desc;
205 surf_size_struct block_size;
206 u32 bytes_per_block;
207 u32 pitch_bytes_per_block;
208
209 u32 total_bit_depth;
210 struct svga3d_channel_def bit_depth;
211 struct svga3d_channel_def bit_offset;
212};
213
214static const struct svga3d_surface_desc svga3d_surface_descs[] = {
215 {SVGA3D_FORMAT_INVALID, SVGA3DBLOCKDESC_NONE,
216 {1, 1, 1}, 0, 0,
217 0, {{0}, {0}, {0}, {0}},
218 {{0}, {0}, {0}, {0}}},
219
220 {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB,
221 {1, 1, 1}, 4, 4,
222 24, {{8}, {8}, {8}, {0}},
223 {{0}, {8}, {16}, {24}}},
224
225 {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA,
226 {1, 1, 1}, 4, 4,
227 32, {{8}, {8}, {8}, {8}},
228 {{0}, {8}, {16}, {24}}},
229
230 {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB,
231 {1, 1, 1}, 2, 2,
232 16, {{5}, {6}, {5}, {0}},
233 {{0}, {5}, {11}, {0}}},
234
235 {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB,
236 {1, 1, 1}, 2, 2,
237 15, {{5}, {5}, {5}, {0}},
238 {{0}, {5}, {10}, {0}}},
239
240 {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA,
241 {1, 1, 1}, 2, 2,
242 16, {{5}, {5}, {5}, {1}},
243 {{0}, {5}, {10}, {15}}},
244
245 {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA,
246 {1, 1, 1}, 2, 2,
247 16, {{4}, {4}, {4}, {4}},
248 {{0}, {4}, {8}, {12}}},
249
250 {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH,
251 {1, 1, 1}, 4, 4,
252 32, {{0}, {0}, {32}, {0}},
253 {{0}, {0}, {0}, {0}}},
254
255 {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH,
256 {1, 1, 1}, 2, 2,
257 16, {{0}, {0}, {16}, {0}},
258 {{0}, {0}, {0}, {0}}},
259
260 {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS,
261 {1, 1, 1}, 4, 4,
262 32, {{0}, {8}, {24}, {0}},
263 {{0}, {24}, {0}, {0}}},
264
265 {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS,
266 {1, 1, 1}, 2, 2,
267 16, {{0}, {1}, {15}, {0}},
268 {{0}, {15}, {0}, {0}}},
269
270 {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_LUMINANCE,
271 {1, 1, 1}, 1, 1,
272 8, {{0}, {0}, {8}, {0}},
273 {{0}, {0}, {0}, {0}}},
274
275 {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA,
276 {1 , 1, 1}, 1, 1,
277 8, {{0}, {0}, {4}, {4}},
278 {{0}, {0}, {0}, {4}}},
279
280 {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_LUMINANCE,
281 {1, 1, 1}, 2, 2,
282 16, {{0}, {0}, {16}, {0}},
283 {{0}, {0}, {0}, {0}}},
284
285 {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA,
286 {1, 1, 1}, 2, 2,
287 16, {{0}, {0}, {8}, {8}},
288 {{0}, {0}, {0}, {8}}},
289
290 {SVGA3D_DXT1, SVGA3DBLOCKDESC_COMPRESSED,
291 {4, 4, 1}, 8, 8,
292 64, {{0}, {0}, {64}, {0}},
293 {{0}, {0}, {0}, {0}}},
294
295 {SVGA3D_DXT2, SVGA3DBLOCKDESC_COMPRESSED,
296 {4, 4, 1}, 16, 16,
297 128, {{0}, {0}, {128}, {0}},
298 {{0}, {0}, {0}, {0}}},
299
300 {SVGA3D_DXT3, SVGA3DBLOCKDESC_COMPRESSED,
301 {4, 4, 1}, 16, 16,
302 128, {{0}, {0}, {128}, {0}},
303 {{0}, {0}, {0}, {0}}},
304
305 {SVGA3D_DXT4, SVGA3DBLOCKDESC_COMPRESSED,
306 {4, 4, 1}, 16, 16,
307 128, {{0}, {0}, {128}, {0}},
308 {{0}, {0}, {0}, {0}}},
309
310 {SVGA3D_DXT5, SVGA3DBLOCKDESC_COMPRESSED,
311 {4, 4, 1}, 16, 16,
312 128, {{0}, {0}, {128}, {0}},
313 {{0}, {0}, {0}, {0}}},
314
315 {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV,
316 {1, 1, 1}, 2, 2,
317 16, {{0}, {0}, {8}, {8}},
318 {{0}, {0}, {0}, {8}}},
319
320 {SVGA3D_BUMPL6V5U5, SVGA3DBLOCKDESC_UVL,
321 {1, 1, 1}, 2, 2,
322 16, {{5}, {5}, {6}, {0}},
323 {{11}, {6}, {0}, {0}}},
324
325 {SVGA3D_BUMPX8L8V8U8, SVGA3DBLOCKDESC_UVL,
326 {1, 1, 1}, 4, 4,
327 32, {{8}, {8}, {8}, {0}},
328 {{16}, {8}, {0}, {0}}},
329
330 {SVGA3D_BUMPL8V8U8, SVGA3DBLOCKDESC_UVL,
331 {1, 1, 1}, 3, 3,
332 24, {{8}, {8}, {8}, {0}},
333 {{16}, {8}, {0}, {0}}},
334
335 {SVGA3D_ARGB_S10E5, SVGA3DBLOCKDESC_RGBA_FP,
336 {1, 1, 1}, 8, 8,
337 64, {{16}, {16}, {16}, {16}},
338 {{32}, {16}, {0}, {48}}},
339
340 {SVGA3D_ARGB_S23E8, SVGA3DBLOCKDESC_RGBA_FP,
341 {1, 1, 1}, 16, 16,
342 128, {{32}, {32}, {32}, {32}},
343 {{64}, {32}, {0}, {96}}},
344
345 {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA,
346 {1, 1, 1}, 4, 4,
347 32, {{10}, {10}, {10}, {2}},
348 {{0}, {10}, {20}, {30}}},
349
350 {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV,
351 {1, 1, 1}, 2, 2,
352 16, {{8}, {8}, {0}, {0}},
353 {{8}, {0}, {0}, {0}}},
354
355 {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ,
356 {1, 1, 1}, 4, 4,
357 32, {{8}, {8}, {8}, {8}},
358 {{24}, {16}, {8}, {0}}},
359
360 {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UV,
361 {1, 1, 1}, 2, 2,
362 16, {{8}, {8}, {0}, {0}},
363 {{8}, {0}, {0}, {0}}},
364
365 {SVGA3D_X8L8V8U8, SVGA3DBLOCKDESC_UVL,
366 {1, 1, 1}, 4, 4,
367 24, {{8}, {8}, {8}, {0}},
368 {{16}, {8}, {0}, {0}}},
369
370 {SVGA3D_A2W10V10U10, SVGA3DBLOCKDESC_UVWA,
371 {1, 1, 1}, 4, 4,
372 32, {{10}, {10}, {10}, {2}},
373 {{0}, {10}, {20}, {30}}},
374
375 {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_ALPHA,
376 {1, 1, 1}, 1, 1,
377 8, {{0}, {0}, {0}, {8}},
378 {{0}, {0}, {0}, {0}}},
379
380 {SVGA3D_R_S10E5, SVGA3DBLOCKDESC_R_FP,
381 {1, 1, 1}, 2, 2,
382 16, {{0}, {0}, {16}, {0}},
383 {{0}, {0}, {0}, {0}}},
384
385 {SVGA3D_R_S23E8, SVGA3DBLOCKDESC_R_FP,
386 {1, 1, 1}, 4, 4,
387 32, {{0}, {0}, {32}, {0}},
388 {{0}, {0}, {0}, {0}}},
389
390 {SVGA3D_RG_S10E5, SVGA3DBLOCKDESC_RG_FP,
391 {1, 1, 1}, 4, 4,
392 32, {{0}, {16}, {16}, {0}},
393 {{0}, {16}, {0}, {0}}},
394
395 {SVGA3D_RG_S23E8, SVGA3DBLOCKDESC_RG_FP,
396 {1, 1, 1}, 8, 8,
397 64, {{0}, {32}, {32}, {0}},
398 {{0}, {32}, {0}, {0}}},
399
400 {SVGA3D_BUFFER, SVGA3DBLOCKDESC_BUFFER,
401 {1, 1, 1}, 1, 1,
402 8, {{0}, {0}, {8}, {0}},
403 {{0}, {0}, {0}, {0}}},
404
405 {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH,
406 {1, 1, 1}, 4, 4,
407 32, {{0}, {0}, {24}, {0}},
408 {{0}, {24}, {0}, {0}}},
409
410 {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV,
411 {1, 1, 1}, 4, 4,
412 32, {{16}, {16}, {0}, {0}},
413 {{16}, {0}, {0}, {0}}},
414
415 {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG,
416 {1, 1, 1}, 4, 4,
417 32, {{0}, {16}, {16}, {0}},
418 {{0}, {0}, {16}, {0}}},
419
420 {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA,
421 {1, 1, 1}, 8, 8,
422 64, {{16}, {16}, {16}, {16}},
423 {{32}, {16}, {0}, {48}}},
424
425 {SVGA3D_UYVY, SVGA3DBLOCKDESC_YUV,
426 {1, 1, 1}, 2, 2,
427 16, {{8}, {0}, {8}, {0}},
428 {{0}, {0}, {8}, {0}}},
429
430 {SVGA3D_YUY2, SVGA3DBLOCKDESC_YUV,
431 {1, 1, 1}, 2, 2,
432 16, {{8}, {0}, {8}, {0}},
433 {{8}, {0}, {0}, {0}}},
434
435 {SVGA3D_NV12, SVGA3DBLOCKDESC_NV12,
436 {2, 2, 1}, 6, 2,
437 48, {{0}, {0}, {48}, {0}},
438 {{0}, {0}, {0}, {0}}},
439
440 {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
441 {1, 1, 1}, 4, 4,
442 32, {{8}, {8}, {8}, {8}},
443 {{0}, {8}, {16}, {24}}},
444
445 {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_RGBA,
446 {1, 1, 1}, 16, 16,
447 128, {{32}, {32}, {32}, {32}},
448 {{64}, {32}, {0}, {96}}},
449
450 {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA,
451 {1, 1, 1}, 16, 16,
452 128, {{32}, {32}, {32}, {32}},
453 {{64}, {32}, {0}, {96}}},
454
455 {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_UVWQ,
456 {1, 1, 1}, 16, 16,
457 128, {{32}, {32}, {32}, {32}},
458 {{64}, {32}, {0}, {96}}},
459
460 {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_RGB,
461 {1, 1, 1}, 12, 12,
462 96, {{32}, {32}, {32}, {0}},
463 {{64}, {32}, {0}, {0}}},
464
465 {SVGA3D_R32G32B32_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
466 {1, 1, 1}, 12, 12,
467 96, {{32}, {32}, {32}, {0}},
468 {{64}, {32}, {0}, {0}}},
469
470 {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB,
471 {1, 1, 1}, 12, 12,
472 96, {{32}, {32}, {32}, {0}},
473 {{64}, {32}, {0}, {0}}},
474
475 {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_UVW,
476 {1, 1, 1}, 12, 12,
477 96, {{32}, {32}, {32}, {0}},
478 {{64}, {32}, {0}, {0}}},
479
480 {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_RGBA,
481 {1, 1, 1}, 8, 8,
482 64, {{16}, {16}, {16}, {16}},
483 {{32}, {16}, {0}, {48}}},
484
485 {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA,
486 {1, 1, 1}, 8, 8,
487 64, {{16}, {16}, {16}, {16}},
488 {{32}, {16}, {0}, {48}}},
489
490 {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_UVWQ,
491 {1, 1, 1}, 8, 8,
492 64, {{16}, {16}, {16}, {16}},
493 {{32}, {16}, {0}, {48}}},
494
495 {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_UVWQ,
496 {1, 1, 1}, 8, 8,
497 64, {{16}, {16}, {16}, {16}},
498 {{32}, {16}, {0}, {48}}},
499
500 {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_RG,
501 {1, 1, 1}, 8, 8,
502 64, {{0}, {32}, {32}, {0}},
503 {{0}, {32}, {0}, {0}}},
504
505 {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG,
506 {1, 1, 1}, 8, 8,
507 64, {{0}, {32}, {32}, {0}},
508 {{0}, {32}, {0}, {0}}},
509
510 {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_UV,
511 {1, 1, 1}, 8, 8,
512 64, {{0}, {32}, {32}, {0}},
513 {{0}, {32}, {0}, {0}}},
514
515 {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_RG,
516 {1, 1, 1}, 8, 8,
517 64, {{0}, {8}, {32}, {0}},
518 {{0}, {32}, {0}, {0}}},
519
520 {SVGA3D_D32_FLOAT_S8X24_UINT, SVGA3DBLOCKDESC_DS,
521 {1, 1, 1}, 8, 8,
522 64, {{0}, {8}, {32}, {0}},
523 {{0}, {32}, {0}, {0}}},
524
525 {SVGA3D_R32_FLOAT_X8X24_TYPELESS, SVGA3DBLOCKDESC_R_FP,
526 {1, 1, 1}, 8, 8,
527 64, {{0}, {0}, {32}, {0}},
528 {{0}, {0}, {0}, {0}}},
529
530 {SVGA3D_X32_TYPELESS_G8X24_UINT, SVGA3DBLOCKDESC_GREEN,
531 {1, 1, 1}, 8, 8,
532 64, {{0}, {8}, {0}, {0}},
533 {{0}, {32}, {0}, {0}}},
534
535 {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_RGBA,
536 {1, 1, 1}, 4, 4,
537 32, {{10}, {10}, {10}, {2}},
538 {{0}, {10}, {20}, {30}}},
539
540 {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA,
541 {1, 1, 1}, 4, 4,
542 32, {{10}, {10}, {10}, {2}},
543 {{0}, {10}, {20}, {30}}},
544
545 {SVGA3D_R11G11B10_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
546 {1, 1, 1}, 4, 4,
547 32, {{10}, {11}, {11}, {0}},
548 {{0}, {10}, {21}, {0}}},
549
550 {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
551 {1, 1, 1}, 4, 4,
552 32, {{8}, {8}, {8}, {8}},
553 {{16}, {8}, {0}, {24}}},
554
555 {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
556 {1, 1, 1}, 4, 4,
557 32, {{8}, {8}, {8}, {8}},
558 {{16}, {8}, {0}, {24}}},
559
560 {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
561 {1, 1, 1}, 4, 4,
562 32, {{8}, {8}, {8}, {8}},
563 {{16}, {8}, {0}, {24}}},
564
565 {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA,
566 {1, 1, 1}, 4, 4,
567 32, {{8}, {8}, {8}, {8}},
568 {{16}, {8}, {0}, {24}}},
569
570 {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA,
571 {1, 1, 1}, 4, 4,
572 32, {{8}, {8}, {8}, {8}},
573 {{16}, {8}, {0}, {24}}},
574
575 {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_RG,
576 {1, 1, 1}, 4, 4,
577 32, {{0}, {16}, {16}, {0}},
578 {{0}, {16}, {0}, {0}}},
579
580 {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_FP,
581 {1, 1, 1}, 4, 4,
582 32, {{0}, {16}, {16}, {0}},
583 {{0}, {16}, {0}, {0}}},
584
585 {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_UV,
586 {1, 1, 1}, 4, 4,
587 32, {{0}, {16}, {16}, {0}},
588 {{0}, {16}, {0}, {0}}},
589
590 {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_RED,
591 {1, 1, 1}, 4, 4,
592 32, {{0}, {0}, {32}, {0}},
593 {{0}, {0}, {0}, {0}}},
594
595 {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH,
596 {1, 1, 1}, 4, 4,
597 32, {{0}, {0}, {32}, {0}},
598 {{0}, {0}, {0}, {0}}},
599
600 {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_RED,
601 {1, 1, 1}, 4, 4,
602 32, {{0}, {0}, {32}, {0}},
603 {{0}, {0}, {0}, {0}}},
604
605 {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_RED,
606 {1, 1, 1}, 4, 4,
607 32, {{0}, {0}, {32}, {0}},
608 {{0}, {0}, {0}, {0}}},
609
610 {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_RG,
611 {1, 1, 1}, 4, 4,
612 32, {{0}, {8}, {24}, {0}},
613 {{0}, {24}, {0}, {0}}},
614
615 {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS,
616 {1, 1, 1}, 4, 4,
617 32, {{0}, {8}, {24}, {0}},
618 {{0}, {24}, {0}, {0}}},
619
620 {SVGA3D_R24_UNORM_X8_TYPELESS, SVGA3DBLOCKDESC_RED,
621 {1, 1, 1}, 4, 4,
622 32, {{0}, {0}, {24}, {0}},
623 {{0}, {0}, {0}, {0}}},
624
625 {SVGA3D_X24_TYPELESS_G8_UINT, SVGA3DBLOCKDESC_GREEN,
626 {1, 1, 1}, 4, 4,
627 32, {{0}, {8}, {0}, {0}},
628 {{0}, {24}, {0}, {0}}},
629
630 {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_RG,
631 {1, 1, 1}, 2, 2,
632 16, {{0}, {8}, {8}, {0}},
633 {{0}, {8}, {0}, {0}}},
634
635 {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG,
636 {1, 1, 1}, 2, 2,
637 16, {{0}, {8}, {8}, {0}},
638 {{0}, {8}, {0}, {0}}},
639
640 {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG,
641 {1, 1, 1}, 2, 2,
642 16, {{0}, {8}, {8}, {0}},
643 {{0}, {8}, {0}, {0}}},
644
645 {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_UV,
646 {1, 1, 1}, 2, 2,
647 16, {{0}, {8}, {8}, {0}},
648 {{0}, {8}, {0}, {0}}},
649
650 {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_RED,
651 {1, 1, 1}, 2, 2,
652 16, {{0}, {0}, {16}, {0}},
653 {{0}, {0}, {0}, {0}}},
654
655 {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_RED,
656 {1, 1, 1}, 2, 2,
657 16, {{0}, {0}, {16}, {0}},
658 {{0}, {0}, {0}, {0}}},
659
660 {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_RED,
661 {1, 1, 1}, 2, 2,
662 16, {{0}, {0}, {16}, {0}},
663 {{0}, {0}, {0}, {0}}},
664
665 {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_U,
666 {1, 1, 1}, 2, 2,
667 16, {{0}, {0}, {16}, {0}},
668 {{0}, {0}, {0}, {0}}},
669
670 {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_U,
671 {1, 1, 1}, 2, 2,
672 16, {{0}, {0}, {16}, {0}},
673 {{0}, {0}, {0}, {0}}},
674
675 {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_RED,
676 {1, 1, 1}, 1, 1,
677 8, {{0}, {0}, {8}, {0}},
678 {{0}, {0}, {0}, {0}}},
679
680 {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_RED,
681 {1, 1, 1}, 1, 1,
682 8, {{0}, {0}, {8}, {0}},
683 {{0}, {0}, {0}, {0}}},
684
685 {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_RED,
686 {1, 1, 1}, 1, 1,
687 8, {{0}, {0}, {8}, {0}},
688 {{0}, {0}, {0}, {0}}},
689
690 {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_U,
691 {1, 1, 1}, 1, 1,
692 8, {{0}, {0}, {8}, {0}},
693 {{0}, {0}, {0}, {0}}},
694
695 {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_U,
696 {1, 1, 1}, 1, 1,
697 8, {{0}, {0}, {8}, {0}},
698 {{0}, {0}, {0}, {0}}},
699
700 {SVGA3D_P8, SVGA3DBLOCKDESC_RED,
701 {1, 1, 1}, 1, 1,
702 8, {{0}, {0}, {8}, {0}},
703 {{0}, {0}, {0}, {0}}},
704
705 {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGBE,
706 {1, 1, 1}, 4, 4,
707 32, {{9}, {9}, {9}, {5}},
708 {{18}, {9}, {0}, {27}}},
709
710 {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_RG,
711 {1, 1, 1}, 2, 2,
712 16, {{0}, {8}, {8}, {0}},
713 {{0}, {8}, {0}, {0}}},
714
715 {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_RG,
716 {1, 1, 1}, 2, 2,
717 16, {{0}, {8}, {8}, {0}},
718 {{0}, {8}, {0}, {0}}},
719
720 {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
721 {4, 4, 1}, 8, 8,
722 64, {{0}, {0}, {64}, {0}},
723 {{0}, {0}, {0}, {0}}},
724
725 {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
726 {4, 4, 1}, 8, 8,
727 64, {{0}, {0}, {64}, {0}},
728 {{0}, {0}, {0}, {0}}},
729
730 {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
731 {4, 4, 1}, 16, 16,
732 128, {{0}, {0}, {128}, {0}},
733 {{0}, {0}, {0}, {0}}},
734
735 {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
736 {4, 4, 1}, 16, 16,
737 128, {{0}, {0}, {128}, {0}},
738 {{0}, {0}, {0}, {0}}},
739
740 {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
741 {4, 4, 1}, 16, 16,
742 128, {{0}, {0}, {128}, {0}},
743 {{0}, {0}, {0}, {0}}},
744
745 {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
746 {4, 4, 1}, 16, 16,
747 128, {{0}, {0}, {128}, {0}},
748 {{0}, {0}, {0}, {0}}},
749
750 {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
751 {4, 4, 1}, 8, 8,
752 64, {{0}, {0}, {64}, {0}},
753 {{0}, {0}, {0}, {0}}},
754
755 {SVGA3D_ATI1, SVGA3DBLOCKDESC_COMPRESSED,
756 {4, 4, 1}, 8, 8,
757 64, {{0}, {0}, {64}, {0}},
758 {{0}, {0}, {0}, {0}}},
759
760 {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
761 {4, 4, 1}, 8, 8,
762 64, {{0}, {0}, {64}, {0}},
763 {{0}, {0}, {0}, {0}}},
764
765 {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
766 {4, 4, 1}, 16, 16,
767 128, {{0}, {0}, {128}, {0}},
768 {{0}, {0}, {0}, {0}}},
769
770 {SVGA3D_ATI2, SVGA3DBLOCKDESC_COMPRESSED,
771 {4, 4, 1}, 16, 16,
772 128, {{0}, {0}, {128}, {0}},
773 {{0}, {0}, {0}, {0}}},
774
775 {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
776 {4, 4, 1}, 16, 16,
777 128, {{0}, {0}, {128}, {0}},
778 {{0}, {0}, {0}, {0}}},
779
780 {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA,
781 {1, 1, 1}, 4, 4,
782 32, {{10}, {10}, {10}, {2}},
783 {{0}, {10}, {20}, {30}}},
784
785 {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
786 {1, 1, 1}, 4, 4,
787 32, {{8}, {8}, {8}, {8}},
788 {{0}, {8}, {16}, {24}}},
789
790 {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
791 {1, 1, 1}, 4, 4,
792 32, {{8}, {8}, {8}, {8}},
793 {{0}, {8}, {16}, {24}}},
794
795 {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_RGB,
796 {1, 1, 1}, 4, 4,
797 24, {{8}, {8}, {8}, {0}},
798 {{0}, {8}, {16}, {24}}},
799
800 {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_SRGB,
801 {1, 1, 1}, 4, 4,
802 24, {{8}, {8}, {8}, {0}},
803 {{0}, {8}, {16}, {24}}},
804
805 {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH,
806 {1, 1, 1}, 2, 2,
807 16, {{0}, {0}, {16}, {0}},
808 {{0}, {0}, {0}, {0}}},
809
810 {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH,
811 {1, 1, 1}, 4, 4,
812 32, {{0}, {8}, {24}, {0}},
813 {{0}, {24}, {0}, {0}}},
814
815 {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS,
816 {1, 1, 1}, 4, 4,
817 32, {{0}, {8}, {24}, {0}},
818 {{0}, {24}, {0}, {0}}},
819
820 {SVGA3D_YV12, SVGA3DBLOCKDESC_YV12,
821 {2, 2, 1}, 6, 2,
822 48, {{0}, {0}, {48}, {0}},
823 {{0}, {0}, {0}, {0}}},
824
825 {SVGA3D_R32G32B32A32_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
826 {1, 1, 1}, 16, 16,
827 128, {{32}, {32}, {32}, {32}},
828 {{64}, {32}, {0}, {96}}},
829
830 {SVGA3D_R16G16B16A16_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
831 {1, 1, 1}, 8, 8,
832 64, {{16}, {16}, {16}, {16}},
833 {{32}, {16}, {0}, {48}}},
834
835 {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA,
836 {1, 1, 1}, 8, 8,
837 64, {{16}, {16}, {16}, {16}},
838 {{32}, {16}, {0}, {48}}},
839
840 {SVGA3D_R32G32_FLOAT, SVGA3DBLOCKDESC_RG_FP,
841 {1, 1, 1}, 8, 8,
842 64, {{0}, {32}, {32}, {0}},
843 {{0}, {32}, {0}, {0}}},
844
845 {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA,
846 {1, 1, 1}, 4, 4,
847 32, {{10}, {10}, {10}, {2}},
848 {{0}, {10}, {20}, {30}}},
849
850 {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA,
851 {1, 1, 1}, 4, 4,
852 32, {{8}, {8}, {8}, {8}},
853 {{24}, {16}, {8}, {0}}},
854
855 {SVGA3D_R16G16_FLOAT, SVGA3DBLOCKDESC_RG_FP,
856 {1, 1, 1}, 4, 4,
857 32, {{0}, {16}, {16}, {0}},
858 {{0}, {16}, {0}, {0}}},
859
860 {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG,
861 {1, 1, 1}, 4, 4,
862 32, {{0}, {16}, {16}, {0}},
863 {{0}, {0}, {16}, {0}}},
864
865 {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG,
866 {1, 1, 1}, 4, 4,
867 32, {{16}, {16}, {0}, {0}},
868 {{16}, {0}, {0}, {0}}},
869
870 {SVGA3D_R32_FLOAT, SVGA3DBLOCKDESC_R_FP,
871 {1, 1, 1}, 4, 4,
872 32, {{0}, {0}, {32}, {0}},
873 {{0}, {0}, {0}, {0}}},
874
875 {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG,
876 {1, 1, 1}, 2, 2,
877 16, {{8}, {8}, {0}, {0}},
878 {{8}, {0}, {0}, {0}}},
879
880 {SVGA3D_R16_FLOAT, SVGA3DBLOCKDESC_R_FP,
881 {1, 1, 1}, 2, 2,
882 16, {{0}, {0}, {16}, {0}},
883 {{0}, {0}, {0}, {0}}},
884
885 {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH,
886 {1, 1, 1}, 2, 2,
887 16, {{0}, {0}, {16}, {0}},
888 {{0}, {0}, {0}, {0}}},
889
890 {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_ALPHA,
891 {1, 1, 1}, 1, 1,
892 8, {{0}, {0}, {0}, {8}},
893 {{0}, {0}, {0}, {0}}},
894
895 {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
896 {4, 4, 1}, 8, 8,
897 64, {{0}, {0}, {64}, {0}},
898 {{0}, {0}, {0}, {0}}},
899
900 {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
901 {4, 4, 1}, 16, 16,
902 128, {{0}, {0}, {128}, {0}},
903 {{0}, {0}, {0}, {0}}},
904
905 {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
906 {4, 4, 1}, 16, 16,
907 128, {{0}, {0}, {128}, {0}},
908 {{0}, {0}, {0}, {0}}},
909
910 {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB,
911 {1, 1, 1}, 2, 2,
912 16, {{5}, {6}, {5}, {0}},
913 {{0}, {5}, {11}, {0}}},
914
915 {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA,
916 {1, 1, 1}, 2, 2,
917 16, {{5}, {5}, {5}, {1}},
918 {{0}, {5}, {10}, {15}}},
919
920 {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
921 {1, 1, 1}, 4, 4,
922 32, {{8}, {8}, {8}, {8}},
923 {{0}, {8}, {16}, {24}}},
924
925 {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB,
926 {1, 1, 1}, 4, 4,
927 24, {{8}, {8}, {8}, {0}},
928 {{0}, {8}, {16}, {24}}},
929
930 {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
931 {4, 4, 1}, 8, 8,
932 64, {{0}, {0}, {64}, {0}},
933 {{0}, {0}, {0}, {0}}},
934
935 {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
936 {4, 4, 1}, 16, 16,
937 128, {{0}, {0}, {128}, {0}},
938 {{0}, {0}, {0}, {0}}},
939
940};
941
942static inline u32 clamped_umul32(u32 a, u32 b)
943{
944 uint64_t tmp = (uint64_t) a*b;
945 return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
946}
947
948static inline const struct svga3d_surface_desc *
949svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
950{
951 if (format < ARRAY_SIZE(svga3d_surface_descs))
952 return &svga3d_surface_descs[format];
953
954 return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
955}
956
957/*
958 *----------------------------------------------------------------------
959 *
960 * svga3dsurface_get_mip_size --
961 *
962 * Given a base level size and the mip level, compute the size of
963 * the mip level.
964 *
965 * Results:
966 * See above.
967 *
968 * Side effects:
969 * None.
970 *
971 *----------------------------------------------------------------------
972 */
973
974static inline surf_size_struct
975svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
976{
977 surf_size_struct size;
978
979 size.width = max_t(u32, base_level.width >> mip_level, 1);
980 size.height = max_t(u32, base_level.height >> mip_level, 1);
981 size.depth = max_t(u32, base_level.depth >> mip_level, 1);
982 return size;
983}
984
985static inline void
986svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
987 const surf_size_struct *pixel_size,
988 surf_size_struct *block_size)
989{
990 block_size->width = DIV_ROUND_UP(pixel_size->width,
991 desc->block_size.width);
992 block_size->height = DIV_ROUND_UP(pixel_size->height,
993 desc->block_size.height);
994 block_size->depth = DIV_ROUND_UP(pixel_size->depth,
995 desc->block_size.depth);
996}
997
998static inline bool
999svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
1000{
1001 return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
1002}
1003
1004static inline u32
1005svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
1006 const surf_size_struct *size)
1007{
1008 u32 pitch;
1009 surf_size_struct blocks;
1010
1011 svga3dsurface_get_size_in_blocks(desc, size, &blocks);
1012
1013 pitch = blocks.width * desc->pitch_bytes_per_block;
1014
1015 return pitch;
1016}
1017
1018/*
1019 *-----------------------------------------------------------------------------
1020 *
1021 * svga3dsurface_get_image_buffer_size --
1022 *
1023 * Return the number of bytes of buffer space required to store
1024 * one image of a surface, optionally using the specified pitch.
1025 *
1026 * If pitch is zero, it is assumed that rows are tightly packed.
1027 *
1028 * This function is overflow-safe. If the result would have
1029 * overflowed, instead we return MAX_UINT32.
1030 *
1031 * Results:
1032 * Byte count.
1033 *
1034 * Side effects:
1035 * None.
1036 *
1037 *-----------------------------------------------------------------------------
1038 */
1039
1040static inline u32
1041svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
1042 const surf_size_struct *size,
1043 u32 pitch)
1044{
1045 surf_size_struct image_blocks;
1046 u32 slice_size, total_size;
1047
1048 svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
1049
1050 if (svga3dsurface_is_planar_surface(desc)) {
1051 total_size = clamped_umul32(image_blocks.width,
1052 image_blocks.height);
1053 total_size = clamped_umul32(total_size, image_blocks.depth);
1054 total_size = clamped_umul32(total_size, desc->bytes_per_block);
1055 return total_size;
1056 }
1057
1058 if (pitch == 0)
1059 pitch = svga3dsurface_calculate_pitch(desc, size);
1060
1061 slice_size = clamped_umul32(image_blocks.height, pitch);
1062 total_size = clamped_umul32(slice_size, image_blocks.depth);
1063
1064 return total_size;
1065}
1066
1067static inline u32
1068svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
1069 surf_size_struct base_level_size,
1070 u32 num_mip_levels,
1071 u32 num_layers)
1072{
1073 const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
1074 u32 total_size = 0;
1075 u32 mip;
1076
1077 for (mip = 0; mip < num_mip_levels; mip++) {
1078 surf_size_struct size =
1079 svga3dsurface_get_mip_size(base_level_size, mip);
1080 total_size += svga3dsurface_get_image_buffer_size(desc,
1081 &size, 0);
1082 }
1083
1084 return total_size * num_layers;
1085}
1086
1087
1088/**
1089 * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
1090 * in an image (or volume).
1091 *
1092 * @width: The image width in pixels.
1093 * @height: The image height in pixels
1094 */
1095static inline u32
1096svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
1097 u32 width, u32 height,
1098 u32 x, u32 y, u32 z)
1099{
1100 const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
1101 const u32 bw = desc->block_size.width, bh = desc->block_size.height;
1102 const u32 bd = desc->block_size.depth;
1103 const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
1104 const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
1105 const u32 offset = (z / bd * imgstride +
1106 y / bh * rowstride +
1107 x / bw * desc->bytes_per_block);
1108 return offset;
1109}
1110
1111
1112static inline u32
1113svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
1114 surf_size_struct baseLevelSize,
1115 u32 numMipLevels,
1116 u32 face,
1117 u32 mip)
1118
1119{
1120 u32 offset;
1121 u32 mipChainBytes;
1122 u32 mipChainBytesToLevel;
1123 u32 i;
1124 const struct svga3d_surface_desc *desc;
1125 surf_size_struct mipSize;
1126 u32 bytes;
1127
1128 desc = svga3dsurface_get_desc(format);
1129
1130 mipChainBytes = 0;
1131 mipChainBytesToLevel = 0;
1132 for (i = 0; i < numMipLevels; i++) {
1133 mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
1134 bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
1135 mipChainBytes += bytes;
1136 if (i < mip)
1137 mipChainBytesToLevel += bytes;
1138 }
1139
1140 offset = mipChainBytes * face + mipChainBytesToLevel;
1141
1142 return offset;
1143}
1144
1145
1146/**
1147 * svga3dsurface_is_gb_screen_target_format - Is the specified format usable as
1148 * a ScreenTarget?
1149 * (with just the GBObjects cap-bit
1150 * set)
1151 * @format: format to queried
1152 *
1153 * RETURNS:
1154 * true if queried format is valid for screen targets
1155 */
1156static inline bool
1157svga3dsurface_is_gb_screen_target_format(SVGA3dSurfaceFormat format)
1158{
1159 return (format == SVGA3D_X8R8G8B8 ||
1160 format == SVGA3D_A8R8G8B8 ||
1161 format == SVGA3D_R5G6B5 ||
1162 format == SVGA3D_X1R5G5B5 ||
1163 format == SVGA3D_A1R5G5B5 ||
1164 format == SVGA3D_P8);
1165}
1166
1167
1168/**
1169 * svga3dsurface_is_dx_screen_target_format - Is the specified format usable as
1170 * a ScreenTarget?
1171 * (with DX10 enabled)
1172 *
1173 * @format: format to queried
1174 *
1175 * Results:
1176 * true if queried format is valid for screen targets
1177 */
1178static inline bool
1179svga3dsurface_is_dx_screen_target_format(SVGA3dSurfaceFormat format)
1180{
1181 return (format == SVGA3D_R8G8B8A8_UNORM ||
1182 format == SVGA3D_B8G8R8A8_UNORM ||
1183 format == SVGA3D_B8G8R8X8_UNORM);
1184}
1185
1186
1187/**
1188 * svga3dsurface_is_screen_target_format - Is the specified format usable as a
1189 * ScreenTarget?
1190 * (for some combination of caps)
1191 *
1192 * @format: format to queried
1193 *
1194 * Results:
1195 * true if queried format is valid for screen targets
1196 */
1197static inline bool
1198svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
1199{
1200 if (svga3dsurface_is_gb_screen_target_format(format)) {
1201 return true;
1202 }
1203 return svga3dsurface_is_dx_screen_target_format(format);
1204}
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
new file mode 100644
index 000000000000..27b33ba88430
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -0,0 +1,1633 @@
1/**********************************************************
2 * Copyright 2012-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_types.h --
28 *
29 * SVGA 3d hardware definitions for basic types
30 */
31
32#ifndef _SVGA3D_TYPES_H_
33#define _SVGA3D_TYPES_H_
34
35#define INCLUDE_ALLOW_MODULE
36#define INCLUDE_ALLOW_USERLEVEL
37#define INCLUDE_ALLOW_VMCORE
38
39#include "includeCheck.h"
40
41/*
42 * Generic Types
43 */
44
45#define SVGA3D_INVALID_ID ((uint32)-1)
46
47typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
48typedef uint32 SVGA3dColor; /* a, r, g, b */
49
50typedef
51#include "vmware_pack_begin.h"
52struct SVGA3dCopyRect {
53 uint32 x;
54 uint32 y;
55 uint32 w;
56 uint32 h;
57 uint32 srcx;
58 uint32 srcy;
59}
60#include "vmware_pack_end.h"
61SVGA3dCopyRect;
62
63typedef
64#include "vmware_pack_begin.h"
65struct SVGA3dCopyBox {
66 uint32 x;
67 uint32 y;
68 uint32 z;
69 uint32 w;
70 uint32 h;
71 uint32 d;
72 uint32 srcx;
73 uint32 srcy;
74 uint32 srcz;
75}
76#include "vmware_pack_end.h"
77SVGA3dCopyBox;
78
79typedef
80#include "vmware_pack_begin.h"
81struct SVGA3dRect {
82 uint32 x;
83 uint32 y;
84 uint32 w;
85 uint32 h;
86}
87#include "vmware_pack_end.h"
88SVGA3dRect;
89
90typedef
91#include "vmware_pack_begin.h"
92struct {
93 uint32 x;
94 uint32 y;
95 uint32 z;
96 uint32 w;
97 uint32 h;
98 uint32 d;
99}
100#include "vmware_pack_end.h"
101SVGA3dBox;
102
103typedef
104#include "vmware_pack_begin.h"
105struct {
106 uint32 x;
107 uint32 y;
108 uint32 z;
109}
110#include "vmware_pack_end.h"
111SVGA3dPoint;
112
113/*
114 * Surface formats.
115 */
116typedef enum SVGA3dSurfaceFormat {
117 SVGA3D_FORMAT_INVALID = 0,
118
119 SVGA3D_X8R8G8B8 = 1,
120 SVGA3D_FORMAT_MIN = 1,
121
122 SVGA3D_A8R8G8B8 = 2,
123
124 SVGA3D_R5G6B5 = 3,
125 SVGA3D_X1R5G5B5 = 4,
126 SVGA3D_A1R5G5B5 = 5,
127 SVGA3D_A4R4G4B4 = 6,
128
129 SVGA3D_Z_D32 = 7,
130 SVGA3D_Z_D16 = 8,
131 SVGA3D_Z_D24S8 = 9,
132 SVGA3D_Z_D15S1 = 10,
133
134 SVGA3D_LUMINANCE8 = 11,
135 SVGA3D_LUMINANCE4_ALPHA4 = 12,
136 SVGA3D_LUMINANCE16 = 13,
137 SVGA3D_LUMINANCE8_ALPHA8 = 14,
138
139 SVGA3D_DXT1 = 15,
140 SVGA3D_DXT2 = 16,
141 SVGA3D_DXT3 = 17,
142 SVGA3D_DXT4 = 18,
143 SVGA3D_DXT5 = 19,
144
145 SVGA3D_BUMPU8V8 = 20,
146 SVGA3D_BUMPL6V5U5 = 21,
147 SVGA3D_BUMPX8L8V8U8 = 22,
148 SVGA3D_BUMPL8V8U8 = 23,
149
150 SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
151 SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
152
153 SVGA3D_A2R10G10B10 = 26,
154
155 /* signed formats */
156 SVGA3D_V8U8 = 27,
157 SVGA3D_Q8W8V8U8 = 28,
158 SVGA3D_CxV8U8 = 29,
159
160 /* mixed formats */
161 SVGA3D_X8L8V8U8 = 30,
162 SVGA3D_A2W10V10U10 = 31,
163
164 SVGA3D_ALPHA8 = 32,
165
166 /* Single- and dual-component floating point formats */
167 SVGA3D_R_S10E5 = 33,
168 SVGA3D_R_S23E8 = 34,
169 SVGA3D_RG_S10E5 = 35,
170 SVGA3D_RG_S23E8 = 36,
171
172 SVGA3D_BUFFER = 37,
173
174 SVGA3D_Z_D24X8 = 38,
175
176 SVGA3D_V16U16 = 39,
177
178 SVGA3D_G16R16 = 40,
179 SVGA3D_A16B16G16R16 = 41,
180
181 /* Packed Video formats */
182 SVGA3D_UYVY = 42,
183 SVGA3D_YUY2 = 43,
184
185 /* Planar video formats */
186 SVGA3D_NV12 = 44,
187
188 /* Video format with alpha */
189 SVGA3D_AYUV = 45,
190
191 SVGA3D_R32G32B32A32_TYPELESS = 46,
192 SVGA3D_R32G32B32A32_UINT = 47,
193 SVGA3D_R32G32B32A32_SINT = 48,
194 SVGA3D_R32G32B32_TYPELESS = 49,
195 SVGA3D_R32G32B32_FLOAT = 50,
196 SVGA3D_R32G32B32_UINT = 51,
197 SVGA3D_R32G32B32_SINT = 52,
198 SVGA3D_R16G16B16A16_TYPELESS = 53,
199 SVGA3D_R16G16B16A16_UINT = 54,
200 SVGA3D_R16G16B16A16_SNORM = 55,
201 SVGA3D_R16G16B16A16_SINT = 56,
202 SVGA3D_R32G32_TYPELESS = 57,
203 SVGA3D_R32G32_UINT = 58,
204 SVGA3D_R32G32_SINT = 59,
205 SVGA3D_R32G8X24_TYPELESS = 60,
206 SVGA3D_D32_FLOAT_S8X24_UINT = 61,
207 SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62,
208 SVGA3D_X32_TYPELESS_G8X24_UINT = 63,
209 SVGA3D_R10G10B10A2_TYPELESS = 64,
210 SVGA3D_R10G10B10A2_UINT = 65,
211 SVGA3D_R11G11B10_FLOAT = 66,
212 SVGA3D_R8G8B8A8_TYPELESS = 67,
213 SVGA3D_R8G8B8A8_UNORM = 68,
214 SVGA3D_R8G8B8A8_UNORM_SRGB = 69,
215 SVGA3D_R8G8B8A8_UINT = 70,
216 SVGA3D_R8G8B8A8_SINT = 71,
217 SVGA3D_R16G16_TYPELESS = 72,
218 SVGA3D_R16G16_UINT = 73,
219 SVGA3D_R16G16_SINT = 74,
220 SVGA3D_R32_TYPELESS = 75,
221 SVGA3D_D32_FLOAT = 76,
222 SVGA3D_R32_UINT = 77,
223 SVGA3D_R32_SINT = 78,
224 SVGA3D_R24G8_TYPELESS = 79,
225 SVGA3D_D24_UNORM_S8_UINT = 80,
226 SVGA3D_R24_UNORM_X8_TYPELESS = 81,
227 SVGA3D_X24_TYPELESS_G8_UINT = 82,
228 SVGA3D_R8G8_TYPELESS = 83,
229 SVGA3D_R8G8_UNORM = 84,
230 SVGA3D_R8G8_UINT = 85,
231 SVGA3D_R8G8_SINT = 86,
232 SVGA3D_R16_TYPELESS = 87,
233 SVGA3D_R16_UNORM = 88,
234 SVGA3D_R16_UINT = 89,
235 SVGA3D_R16_SNORM = 90,
236 SVGA3D_R16_SINT = 91,
237 SVGA3D_R8_TYPELESS = 92,
238 SVGA3D_R8_UNORM = 93,
239 SVGA3D_R8_UINT = 94,
240 SVGA3D_R8_SNORM = 95,
241 SVGA3D_R8_SINT = 96,
242 SVGA3D_P8 = 97,
243 SVGA3D_R9G9B9E5_SHAREDEXP = 98,
244 SVGA3D_R8G8_B8G8_UNORM = 99,
245 SVGA3D_G8R8_G8B8_UNORM = 100,
246 SVGA3D_BC1_TYPELESS = 101,
247 SVGA3D_BC1_UNORM_SRGB = 102,
248 SVGA3D_BC2_TYPELESS = 103,
249 SVGA3D_BC2_UNORM_SRGB = 104,
250 SVGA3D_BC3_TYPELESS = 105,
251 SVGA3D_BC3_UNORM_SRGB = 106,
252 SVGA3D_BC4_TYPELESS = 107,
253 SVGA3D_ATI1 = 108, /* DX9-specific BC4_UNORM */
254 SVGA3D_BC4_SNORM = 109,
255 SVGA3D_BC5_TYPELESS = 110,
256 SVGA3D_ATI2 = 111, /* DX9-specific BC5_UNORM */
257 SVGA3D_BC5_SNORM = 112,
258 SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113,
259 SVGA3D_B8G8R8A8_TYPELESS = 114,
260 SVGA3D_B8G8R8A8_UNORM_SRGB = 115,
261 SVGA3D_B8G8R8X8_TYPELESS = 116,
262 SVGA3D_B8G8R8X8_UNORM_SRGB = 117,
263
264 /* Advanced depth formats. */
265 SVGA3D_Z_DF16 = 118,
266 SVGA3D_Z_DF24 = 119,
267 SVGA3D_Z_D24S8_INT = 120,
268
269 /* Planar video formats. */
270 SVGA3D_YV12 = 121,
271
272 SVGA3D_R32G32B32A32_FLOAT = 122,
273 SVGA3D_R16G16B16A16_FLOAT = 123,
274 SVGA3D_R16G16B16A16_UNORM = 124,
275 SVGA3D_R32G32_FLOAT = 125,
276 SVGA3D_R10G10B10A2_UNORM = 126,
277 SVGA3D_R8G8B8A8_SNORM = 127,
278 SVGA3D_R16G16_FLOAT = 128,
279 SVGA3D_R16G16_UNORM = 129,
280 SVGA3D_R16G16_SNORM = 130,
281 SVGA3D_R32_FLOAT = 131,
282 SVGA3D_R8G8_SNORM = 132,
283 SVGA3D_R16_FLOAT = 133,
284 SVGA3D_D16_UNORM = 134,
285 SVGA3D_A8_UNORM = 135,
286 SVGA3D_BC1_UNORM = 136,
287 SVGA3D_BC2_UNORM = 137,
288 SVGA3D_BC3_UNORM = 138,
289 SVGA3D_B5G6R5_UNORM = 139,
290 SVGA3D_B5G5R5A1_UNORM = 140,
291 SVGA3D_B8G8R8A8_UNORM = 141,
292 SVGA3D_B8G8R8X8_UNORM = 142,
293 SVGA3D_BC4_UNORM = 143,
294 SVGA3D_BC5_UNORM = 144,
295
296 SVGA3D_FORMAT_MAX
297} SVGA3dSurfaceFormat;
298
299typedef enum SVGA3dSurfaceFlags {
300 SVGA3D_SURFACE_CUBEMAP = (1 << 0),
301
302 /*
303 * HINT flags are not enforced by the device but are useful for
304 * performance.
305 */
306 SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
307 SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
308 SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
309 SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
310 SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
311 SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
312 SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
313 SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
314 SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9),
315 SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10),
316 SVGA3D_SURFACE_DECODE_RENDERTARGET = (1 << 11),
317
318 /*
319 * Is this surface using a base-level pitch for it's mob backing?
320 *
321 * This flag is not intended to be set by guest-drivers, but is instead
322 * set by the device when the surface is bound to a mob with a specified
323 * pitch.
324 */
325 SVGA3D_SURFACE_MOB_PITCH = (1 << 12),
326
327 SVGA3D_SURFACE_INACTIVE = (1 << 13),
328 SVGA3D_SURFACE_HINT_RT_LOCKABLE = (1 << 14),
329 SVGA3D_SURFACE_VOLUME = (1 << 15),
330
331 /*
332 * Required to be set on a surface to bind it to a screen target.
333 */
334 SVGA3D_SURFACE_SCREENTARGET = (1 << 16),
335
336 /*
337 * Align images in the guest-backing mob to 16-bytes.
338 */
339 SVGA3D_SURFACE_ALIGN16 = (1 << 17),
340
341 SVGA3D_SURFACE_1D = (1 << 18),
342 SVGA3D_SURFACE_ARRAY = (1 << 19),
343
344 /*
345 * Bind flags.
346 * These are enforced for any surface defined with DefineGBSurface_v2.
347 */
348 SVGA3D_SURFACE_BIND_VERTEX_BUFFER = (1 << 20),
349 SVGA3D_SURFACE_BIND_INDEX_BUFFER = (1 << 21),
350 SVGA3D_SURFACE_BIND_CONSTANT_BUFFER = (1 << 22),
351 SVGA3D_SURFACE_BIND_SHADER_RESOURCE = (1 << 23),
352 SVGA3D_SURFACE_BIND_RENDER_TARGET = (1 << 24),
353 SVGA3D_SURFACE_BIND_DEPTH_STENCIL = (1 << 25),
354 SVGA3D_SURFACE_BIND_STREAM_OUTPUT = (1 << 26),
355
356 /*
357 * A note on staging flags:
358 *
359 * The STAGING flags notes that the surface will not be used directly by the
360 * drawing pipeline, i.e. that it will not be bound to any bind point.
361 * Staging surfaces may be used by copy operations to move data in and out
362 * of other surfaces.
363 *
364 * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
365 * updates indirectly, i.e. the surface will not be updated directly, but
366 * will receive copies from staging surfaces.
367 */
368 SVGA3D_SURFACE_STAGING_UPLOAD = (1 << 27),
369 SVGA3D_SURFACE_STAGING_DOWNLOAD = (1 << 28),
370 SVGA3D_SURFACE_HINT_INDIRECT_UPDATE = (1 << 29),
371
372 /*
373 * Setting this flag allow this surface to be used with the
374 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for
375 * buffer surfaces, an no bind flags are allowed to be set on surfaces
376 * with this flag.
377 */
378 SVGA3D_SURFACE_TRANSFER_FROM_BUFFER = (1 << 30),
379
380 /*
381 * Marker for the last defined bit.
382 */
383 SVGA3D_SURFACE_FLAG_MAX = (1 << 31),
384} SVGA3dSurfaceFlags;
385
386#define SVGA3D_SURFACE_HB_DISALLOWED_MASK \
387 ( SVGA3D_SURFACE_MOB_PITCH | \
388 SVGA3D_SURFACE_SCREENTARGET | \
389 SVGA3D_SURFACE_ALIGN16 | \
390 SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
391 SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
392 SVGA3D_SURFACE_STAGING_UPLOAD | \
393 SVGA3D_SURFACE_STAGING_DOWNLOAD | \
394 SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
395 SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
396 )
397
398#define SVGA3D_SURFACE_2D_DISALLOWED_MASK \
399 ( SVGA3D_SURFACE_CUBEMAP | \
400 SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \
401 SVGA3D_SURFACE_AUTOGENMIPMAPS | \
402 SVGA3D_SURFACE_DECODE_RENDERTARGET | \
403 SVGA3D_SURFACE_VOLUME | \
404 SVGA3D_SURFACE_1D | \
405 SVGA3D_SURFACE_ARRAY | \
406 SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
407 SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
408 SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
409 SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
410 SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
411 SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
412 )
413
414#define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \
415 ( SVGA3D_SURFACE_CUBEMAP | \
416 SVGA3D_SURFACE_AUTOGENMIPMAPS | \
417 SVGA3D_SURFACE_DECODE_RENDERTARGET | \
418 SVGA3D_SURFACE_VOLUME | \
419 SVGA3D_SURFACE_1D | \
420 SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
421 SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
422 SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
423 SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
424 SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
425 SVGA3D_SURFACE_INACTIVE | \
426 SVGA3D_SURFACE_STAGING_UPLOAD | \
427 SVGA3D_SURFACE_STAGING_DOWNLOAD | \
428 SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
429 SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
430 )
431
432#define SVGA3D_SURFACE_DX_ONLY_MASK \
433 ( SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
434 SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
435
436#define SVGA3D_SURFACE_STAGING_MASK \
437 ( SVGA3D_SURFACE_STAGING_UPLOAD | \
438 SVGA3D_SURFACE_STAGING_DOWNLOAD \
439 )
440
441#define SVGA3D_SURFACE_BIND_MASK \
442 ( SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
443 SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
444 SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
445 SVGA3D_SURFACE_BIND_SHADER_RESOURCE | \
446 SVGA3D_SURFACE_BIND_RENDER_TARGET | \
447 SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
448 SVGA3D_SURFACE_BIND_STREAM_OUTPUT \
449 )
450
451typedef enum {
452 SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
453 SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
454 SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004,
455 SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008,
456 SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010,
457 SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040,
458 SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080,
459
460/*
461 * This format can be used as a render target if the current display mode
462 * is the same depth if the alpha channel is ignored. e.g. if the device
463 * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
464 * format op list entry for A8R8G8B8 should have this cap.
465 */
466 SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100,
467
468/*
469 * This format contains DirectDraw support (including Flip). This flag
470 * should not to be set on alpha formats.
471 */
472 SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400,
473
474/*
475 * The rasterizer can support some level of Direct3D support in this format
476 * and implies that the driver can create a Context in this mode (for some
477 * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
478 * flag must also be set.
479 */
480 SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800,
481
482/*
483 * This is set for a private format when the driver has put the bpp in
484 * the structure.
485 */
486 SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000,
487
488/*
489 * Indicates that this format can be converted to any RGB format for which
490 * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
491 */
492 SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
493
494/*
495 * Indicates that this format can be used to create offscreen plain surfaces.
496 */
497 SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000,
498
499/*
500 * Indicated that this format can be read as an SRGB texture (meaning that the
501 * sampler will linearize the looked up data)
502 */
503 SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
504
505/*
506 * Indicates that this format can be used in the bumpmap instructions
507 */
508 SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
509
510/*
511 * Indicates that this format can be sampled by the displacement map sampler
512 */
513 SVGA3DFORMAT_OP_DMAP = 0x00020000,
514
515/*
516 * Indicates that this format cannot be used with texture filtering
517 */
518 SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
519
520/*
521 * Indicates that format conversions are supported to this RGB format if
522 * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
523 */
524 SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000,
525
526/*
527 * Indicated that this format can be written as an SRGB target
528 * (meaning that the pixel pipe will DE-linearize data on output to format)
529 */
530 SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
531
532/*
533 * Indicates that this format cannot be used with alpha blending
534 */
535 SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
536
537/*
538 * Indicates that the device can auto-generated sublevels for resources
539 * of this format
540 */
541 SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
542
543/*
544 * Indicates that this format can be used by vertex texture sampler
545 */
546 SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
547
548/*
549 * Indicates that this format supports neither texture coordinate
550 * wrap modes, nor mipmapping.
551 */
552 SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000
553} SVGA3dFormatOp;
554
555#define SVGA3D_FORMAT_POSITIVE \
556 (SVGA3DFORMAT_OP_TEXTURE | \
557 SVGA3DFORMAT_OP_VOLUMETEXTURE | \
558 SVGA3DFORMAT_OP_CUBETEXTURE | \
559 SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET | \
560 SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET | \
561 SVGA3DFORMAT_OP_ZSTENCIL | \
562 SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH | \
563 SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET | \
564 SVGA3DFORMAT_OP_DISPLAYMODE | \
565 SVGA3DFORMAT_OP_3DACCELERATION | \
566 SVGA3DFORMAT_OP_PIXELSIZE | \
567 SVGA3DFORMAT_OP_CONVERT_TO_ARGB | \
568 SVGA3DFORMAT_OP_OFFSCREENPLAIN | \
569 SVGA3DFORMAT_OP_SRGBREAD | \
570 SVGA3DFORMAT_OP_BUMPMAP | \
571 SVGA3DFORMAT_OP_DMAP | \
572 SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB | \
573 SVGA3DFORMAT_OP_SRGBWRITE | \
574 SVGA3DFORMAT_OP_AUTOGENMIPMAP | \
575 SVGA3DFORMAT_OP_VERTEXTEXTURE)
576
577#define SVGA3D_FORMAT_NEGATIVE \
578 (SVGA3DFORMAT_OP_NOFILTER | \
579 SVGA3DFORMAT_OP_NOALPHABLEND | \
580 SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP)
581
582/*
583 * This structure is a conversion of SVGA3DFORMAT_OP_*
584 * Entries must be located at the same position.
585 */
586typedef union {
587 uint32 value;
588 struct {
589 uint32 texture : 1;
590 uint32 volumeTexture : 1;
591 uint32 cubeTexture : 1;
592 uint32 offscreenRenderTarget : 1;
593 uint32 sameFormatRenderTarget : 1;
594 uint32 unknown1 : 1;
595 uint32 zStencil : 1;
596 uint32 zStencilArbitraryDepth : 1;
597 uint32 sameFormatUpToAlpha : 1;
598 uint32 unknown2 : 1;
599 uint32 displayMode : 1;
600 uint32 acceleration3d : 1;
601 uint32 pixelSize : 1;
602 uint32 convertToARGB : 1;
603 uint32 offscreenPlain : 1;
604 uint32 sRGBRead : 1;
605 uint32 bumpMap : 1;
606 uint32 dmap : 1;
607 uint32 noFilter : 1;
608 uint32 memberOfGroupARGB : 1;
609 uint32 sRGBWrite : 1;
610 uint32 noAlphaBlend : 1;
611 uint32 autoGenMipMap : 1;
612 uint32 vertexTexture : 1;
613 uint32 noTexCoordWrapNorMip : 1;
614 };
615} SVGA3dSurfaceFormatCaps;
616
617/*
618 * SVGA_3D_CMD_SETRENDERSTATE Types. All value types
619 * must fit in a uint32.
620 */
621
622typedef enum {
623 SVGA3D_RS_INVALID = 0,
624 SVGA3D_RS_MIN = 1,
625 SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */
626 SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */
627 SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */
628 SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */
629 SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */
630 SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */
631 SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */
632 SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */
633 SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */
634 SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */
635 SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */
636 SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */
637 SVGA3D_RS_STENCILREF = 13, /* uint32 */
638 SVGA3D_RS_STENCILMASK = 14, /* uint32 */
639 SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */
640 SVGA3D_RS_FOGSTART = 16, /* float */
641 SVGA3D_RS_FOGEND = 17, /* float */
642 SVGA3D_RS_FOGDENSITY = 18, /* float */
643 SVGA3D_RS_POINTSIZE = 19, /* float */
644 SVGA3D_RS_POINTSIZEMIN = 20, /* float */
645 SVGA3D_RS_POINTSIZEMAX = 21, /* float */
646 SVGA3D_RS_POINTSCALE_A = 22, /* float */
647 SVGA3D_RS_POINTSCALE_B = 23, /* float */
648 SVGA3D_RS_POINTSCALE_C = 24, /* float */
649 SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */
650 SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */
651 SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */
652 SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */
653 SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */
654 SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */
655 SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */
656 SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */
657 SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */
658 SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */
659 SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */
660 SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */
661 SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */
662 SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */
663 SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */
664 SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */
665 SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */
666 SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */
667 SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */
668 SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */
669 SVGA3D_RS_ZBIAS = 45, /* float */
670 SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */
671 SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */
672 SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */
673 SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */
674 SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */
675 SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */
676 SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */
677 SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */
678 SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */
679 SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */
680 SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */
681 SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */
682 SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */
683 SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */
684 SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */
685 SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */
686 SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */
687 SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */
688 SVGA3D_RS_DEPTHBIAS = 64, /* float */
689
690
691 /*
692 * Output Gamma Level
693 *
694 * Output gamma effects the gamma curve of colors that are output from the
695 * rendering pipeline. A value of 1.0 specifies a linear color space. If the
696 * value is <= 0.0, gamma correction is ignored and linear color space is
697 * used.
698 */
699
700 SVGA3D_RS_OUTPUTGAMMA = 65, /* float */
701 SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */
702 SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */
703 SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */
704 SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */
705 SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */
706 SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */
707 SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */
708 SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */
709 SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */
710 SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */
711 SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */
712 SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */
713 SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */
714 SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */
715 SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */
716 SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */
717 SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */
718 SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */
719 SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */
720 SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */
721 SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */
722 SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */
723 SVGA3D_RS_TWEENFACTOR = 88, /* float */
724 SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */
725 SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */
726 SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */
727 SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */
728 SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */
729 SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
730 SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
731 SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
732 SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */
733 SVGA3D_RS_LINEWIDTH = 98, /* float */
734 SVGA3D_RS_MAX
735} SVGA3dRenderStateName;
736
737typedef enum {
738 SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0,
739 SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1,
740 SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2,
741 SVGA3D_TRANSPARENCYANTIALIAS_MAX
742} SVGA3dTransparencyAntialiasType;
743
744typedef enum {
745 SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
746 SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
747 SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
748 SVGA3D_VERTEXMATERIAL_MAX = 3,
749} SVGA3dVertexMaterial;
750
751typedef enum {
752 SVGA3D_FILLMODE_INVALID = 0,
753 SVGA3D_FILLMODE_MIN = 1,
754 SVGA3D_FILLMODE_POINT = 1,
755 SVGA3D_FILLMODE_LINE = 2,
756 SVGA3D_FILLMODE_FILL = 3,
757 SVGA3D_FILLMODE_MAX
758} SVGA3dFillModeType;
759
760
761typedef
762#include "vmware_pack_begin.h"
763union {
764 struct {
765 uint16 mode; /* SVGA3dFillModeType */
766 uint16 face; /* SVGA3dFace */
767 };
768 uint32 uintValue;
769}
770#include "vmware_pack_end.h"
771SVGA3dFillMode;
772
773typedef enum {
774 SVGA3D_SHADEMODE_INVALID = 0,
775 SVGA3D_SHADEMODE_FLAT = 1,
776 SVGA3D_SHADEMODE_SMOOTH = 2,
777 SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */
778 SVGA3D_SHADEMODE_MAX
779} SVGA3dShadeMode;
780
781typedef
782#include "vmware_pack_begin.h"
783union {
784 struct {
785 uint16 repeat;
786 uint16 pattern;
787 };
788 uint32 uintValue;
789}
790#include "vmware_pack_end.h"
791SVGA3dLinePattern;
792
793typedef enum {
794 SVGA3D_BLENDOP_INVALID = 0,
795 SVGA3D_BLENDOP_MIN = 1,
796 SVGA3D_BLENDOP_ZERO = 1,
797 SVGA3D_BLENDOP_ONE = 2,
798 SVGA3D_BLENDOP_SRCCOLOR = 3,
799 SVGA3D_BLENDOP_INVSRCCOLOR = 4,
800 SVGA3D_BLENDOP_SRCALPHA = 5,
801 SVGA3D_BLENDOP_INVSRCALPHA = 6,
802 SVGA3D_BLENDOP_DESTALPHA = 7,
803 SVGA3D_BLENDOP_INVDESTALPHA = 8,
804 SVGA3D_BLENDOP_DESTCOLOR = 9,
805 SVGA3D_BLENDOP_INVDESTCOLOR = 10,
806 SVGA3D_BLENDOP_SRCALPHASAT = 11,
807 SVGA3D_BLENDOP_BLENDFACTOR = 12,
808 SVGA3D_BLENDOP_INVBLENDFACTOR = 13,
809 SVGA3D_BLENDOP_SRC1COLOR = 14,
810 SVGA3D_BLENDOP_INVSRC1COLOR = 15,
811 SVGA3D_BLENDOP_SRC1ALPHA = 16,
812 SVGA3D_BLENDOP_INVSRC1ALPHA = 17,
813 SVGA3D_BLENDOP_BLENDFACTORALPHA = 18,
814 SVGA3D_BLENDOP_INVBLENDFACTORALPHA = 19,
815 SVGA3D_BLENDOP_MAX
816} SVGA3dBlendOp;
817
818typedef enum {
819 SVGA3D_BLENDEQ_INVALID = 0,
820 SVGA3D_BLENDEQ_MIN = 1,
821 SVGA3D_BLENDEQ_ADD = 1,
822 SVGA3D_BLENDEQ_SUBTRACT = 2,
823 SVGA3D_BLENDEQ_REVSUBTRACT = 3,
824 SVGA3D_BLENDEQ_MINIMUM = 4,
825 SVGA3D_BLENDEQ_MAXIMUM = 5,
826 SVGA3D_BLENDEQ_MAX
827} SVGA3dBlendEquation;
828
829typedef enum {
830 SVGA3D_DX11_LOGICOP_MIN = 0,
831 SVGA3D_DX11_LOGICOP_CLEAR = 0,
832 SVGA3D_DX11_LOGICOP_SET = 1,
833 SVGA3D_DX11_LOGICOP_COPY = 2,
834 SVGA3D_DX11_LOGICOP_COPY_INVERTED = 3,
835 SVGA3D_DX11_LOGICOP_NOOP = 4,
836 SVGA3D_DX11_LOGICOP_INVERT = 5,
837 SVGA3D_DX11_LOGICOP_AND = 6,
838 SVGA3D_DX11_LOGICOP_NAND = 7,
839 SVGA3D_DX11_LOGICOP_OR = 8,
840 SVGA3D_DX11_LOGICOP_NOR = 9,
841 SVGA3D_DX11_LOGICOP_XOR = 10,
842 SVGA3D_DX11_LOGICOP_EQUIV = 11,
843 SVGA3D_DX11_LOGICOP_AND_REVERSE = 12,
844 SVGA3D_DX11_LOGICOP_AND_INVERTED = 13,
845 SVGA3D_DX11_LOGICOP_OR_REVERSE = 14,
846 SVGA3D_DX11_LOGICOP_OR_INVERTED = 15,
847 SVGA3D_DX11_LOGICOP_MAX
848} SVGA3dDX11LogicOp;
849
850typedef enum {
851 SVGA3D_FRONTWINDING_INVALID = 0,
852 SVGA3D_FRONTWINDING_CW = 1,
853 SVGA3D_FRONTWINDING_CCW = 2,
854 SVGA3D_FRONTWINDING_MAX
855} SVGA3dFrontWinding;
856
857typedef enum {
858 SVGA3D_FACE_INVALID = 0,
859 SVGA3D_FACE_NONE = 1,
860 SVGA3D_FACE_MIN = 1,
861 SVGA3D_FACE_FRONT = 2,
862 SVGA3D_FACE_BACK = 3,
863 SVGA3D_FACE_FRONT_BACK = 4,
864 SVGA3D_FACE_MAX
865} SVGA3dFace;
866
867/*
868 * The order and the values should not be changed
869 */
870
871typedef enum {
872 SVGA3D_CMP_INVALID = 0,
873 SVGA3D_CMP_NEVER = 1,
874 SVGA3D_CMP_LESS = 2,
875 SVGA3D_CMP_EQUAL = 3,
876 SVGA3D_CMP_LESSEQUAL = 4,
877 SVGA3D_CMP_GREATER = 5,
878 SVGA3D_CMP_NOTEQUAL = 6,
879 SVGA3D_CMP_GREATEREQUAL = 7,
880 SVGA3D_CMP_ALWAYS = 8,
881 SVGA3D_CMP_MAX
882} SVGA3dCmpFunc;
883
884/*
885 * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
886 * the fog factor to be specified in the alpha component of the specular
887 * (a.k.a. secondary) vertex color.
888 */
889typedef enum {
890 SVGA3D_FOGFUNC_INVALID = 0,
891 SVGA3D_FOGFUNC_EXP = 1,
892 SVGA3D_FOGFUNC_EXP2 = 2,
893 SVGA3D_FOGFUNC_LINEAR = 3,
894 SVGA3D_FOGFUNC_PER_VERTEX = 4
895} SVGA3dFogFunction;
896
897/*
898 * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
899 * or per-pixel basis.
900 */
901typedef enum {
902 SVGA3D_FOGTYPE_INVALID = 0,
903 SVGA3D_FOGTYPE_VERTEX = 1,
904 SVGA3D_FOGTYPE_PIXEL = 2,
905 SVGA3D_FOGTYPE_MAX = 3
906} SVGA3dFogType;
907
908/*
909 * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
910 * computed using the eye Z value of each pixel (or vertex), whereas range-
911 * based fog is computed using the actual distance (range) to the eye.
912 */
913typedef enum {
914 SVGA3D_FOGBASE_INVALID = 0,
915 SVGA3D_FOGBASE_DEPTHBASED = 1,
916 SVGA3D_FOGBASE_RANGEBASED = 2,
917 SVGA3D_FOGBASE_MAX = 3
918} SVGA3dFogBase;
919
920typedef enum {
921 SVGA3D_STENCILOP_INVALID = 0,
922 SVGA3D_STENCILOP_MIN = 1,
923 SVGA3D_STENCILOP_KEEP = 1,
924 SVGA3D_STENCILOP_ZERO = 2,
925 SVGA3D_STENCILOP_REPLACE = 3,
926 SVGA3D_STENCILOP_INCRSAT = 4,
927 SVGA3D_STENCILOP_DECRSAT = 5,
928 SVGA3D_STENCILOP_INVERT = 6,
929 SVGA3D_STENCILOP_INCR = 7,
930 SVGA3D_STENCILOP_DECR = 8,
931 SVGA3D_STENCILOP_MAX
932} SVGA3dStencilOp;
933
934typedef enum {
935 SVGA3D_CLIPPLANE_0 = (1 << 0),
936 SVGA3D_CLIPPLANE_1 = (1 << 1),
937 SVGA3D_CLIPPLANE_2 = (1 << 2),
938 SVGA3D_CLIPPLANE_3 = (1 << 3),
939 SVGA3D_CLIPPLANE_4 = (1 << 4),
940 SVGA3D_CLIPPLANE_5 = (1 << 5),
941} SVGA3dClipPlanes;
942
943typedef enum {
944 SVGA3D_CLEAR_COLOR = 0x1,
945 SVGA3D_CLEAR_DEPTH = 0x2,
946 SVGA3D_CLEAR_STENCIL = 0x4,
947
948 /*
949 * Hint only, must be used together with SVGA3D_CLEAR_COLOR. If
950 * SVGA3D_CLEAR_DEPTH or SVGA3D_CLEAR_STENCIL bit is set, this
951 * bit will be ignored.
952 */
953 SVGA3D_CLEAR_COLORFILL = 0x8
954} SVGA3dClearFlag;
955
956typedef enum {
957 SVGA3D_RT_DEPTH = 0,
958 SVGA3D_RT_MIN = 0,
959 SVGA3D_RT_STENCIL = 1,
960 SVGA3D_RT_COLOR0 = 2,
961 SVGA3D_RT_COLOR1 = 3,
962 SVGA3D_RT_COLOR2 = 4,
963 SVGA3D_RT_COLOR3 = 5,
964 SVGA3D_RT_COLOR4 = 6,
965 SVGA3D_RT_COLOR5 = 7,
966 SVGA3D_RT_COLOR6 = 8,
967 SVGA3D_RT_COLOR7 = 9,
968 SVGA3D_RT_MAX,
969 SVGA3D_RT_INVALID = ((uint32)-1),
970} SVGA3dRenderTargetType;
971
972#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
973
974typedef
975#include "vmware_pack_begin.h"
976union {
977 struct {
978 uint32 red : 1;
979 uint32 green : 1;
980 uint32 blue : 1;
981 uint32 alpha : 1;
982 };
983 uint32 uintValue;
984}
985#include "vmware_pack_end.h"
986SVGA3dColorMask;
987
988typedef enum {
989 SVGA3D_VBLEND_DISABLE = 0,
990 SVGA3D_VBLEND_1WEIGHT = 1,
991 SVGA3D_VBLEND_2WEIGHT = 2,
992 SVGA3D_VBLEND_3WEIGHT = 3,
993 SVGA3D_VBLEND_MAX = 4,
994} SVGA3dVertexBlendFlags;
995
996typedef enum {
997 SVGA3D_WRAPCOORD_0 = 1 << 0,
998 SVGA3D_WRAPCOORD_1 = 1 << 1,
999 SVGA3D_WRAPCOORD_2 = 1 << 2,
1000 SVGA3D_WRAPCOORD_3 = 1 << 3,
1001 SVGA3D_WRAPCOORD_ALL = 0xF,
1002} SVGA3dWrapFlags;
1003
1004/*
1005 * SVGA_3D_CMD_TEXTURESTATE Types. All value types
1006 * must fit in a uint32.
1007 */
1008
1009typedef enum {
1010 SVGA3D_TS_INVALID = 0,
1011 SVGA3D_TS_MIN = 1,
1012 SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */
1013 SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */
1014 SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */
1015 SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */
1016 SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */
1017 SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */
1018 SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */
1019 SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */
1020 SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */
1021 SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */
1022 SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */
1023 SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */
1024 SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */
1025 SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */
1026 SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */
1027 SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */
1028 SVGA3D_TS_BUMPENVMAT00 = 17, /* float */
1029 SVGA3D_TS_BUMPENVMAT01 = 18, /* float */
1030 SVGA3D_TS_BUMPENVMAT10 = 19, /* float */
1031 SVGA3D_TS_BUMPENVMAT11 = 20, /* float */
1032 SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */
1033 SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */
1034 SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */
1035 SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */
1036
1037
1038 /*
1039 * Sampler Gamma Level
1040 *
1041 * Sampler gamma effects the color of samples taken from the sampler. A
1042 * value of 1.0 will produce linear samples. If the value is <= 0.0 the
1043 * gamma value is ignored and a linear space is used.
1044 */
1045
1046 SVGA3D_TS_GAMMA = 25, /* float */
1047 SVGA3D_TS_BUMPENVLSCALE = 26, /* float */
1048 SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */
1049 SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */
1050 SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */
1051 SVGA3D_TS_PREGB_MAX = 30, /* Max value before GBObjects */
1052 SVGA3D_TS_CONSTANT = 30, /* SVGA3dColor */
1053 SVGA3D_TS_COLOR_KEY_ENABLE = 31, /* SVGA3dBool */
1054 SVGA3D_TS_COLOR_KEY = 32, /* SVGA3dColor */
1055 SVGA3D_TS_MAX
1056} SVGA3dTextureStateName;
1057
1058typedef enum {
1059 SVGA3D_TC_INVALID = 0,
1060 SVGA3D_TC_DISABLE = 1,
1061 SVGA3D_TC_SELECTARG1 = 2,
1062 SVGA3D_TC_SELECTARG2 = 3,
1063 SVGA3D_TC_MODULATE = 4,
1064 SVGA3D_TC_ADD = 5,
1065 SVGA3D_TC_ADDSIGNED = 6,
1066 SVGA3D_TC_SUBTRACT = 7,
1067 SVGA3D_TC_BLENDTEXTUREALPHA = 8,
1068 SVGA3D_TC_BLENDDIFFUSEALPHA = 9,
1069 SVGA3D_TC_BLENDCURRENTALPHA = 10,
1070 SVGA3D_TC_BLENDFACTORALPHA = 11,
1071 SVGA3D_TC_MODULATE2X = 12,
1072 SVGA3D_TC_MODULATE4X = 13,
1073 SVGA3D_TC_DSDT = 14,
1074 SVGA3D_TC_DOTPRODUCT3 = 15,
1075 SVGA3D_TC_BLENDTEXTUREALPHAPM = 16,
1076 SVGA3D_TC_ADDSIGNED2X = 17,
1077 SVGA3D_TC_ADDSMOOTH = 18,
1078 SVGA3D_TC_PREMODULATE = 19,
1079 SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20,
1080 SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21,
1081 SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
1082 SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
1083 SVGA3D_TC_BUMPENVMAPLUMINANCE = 24,
1084 SVGA3D_TC_MULTIPLYADD = 25,
1085 SVGA3D_TC_LERP = 26,
1086 SVGA3D_TC_MAX
1087} SVGA3dTextureCombiner;
1088
1089#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
1090
1091typedef enum {
1092 SVGA3D_TEX_ADDRESS_INVALID = 0,
1093 SVGA3D_TEX_ADDRESS_MIN = 1,
1094 SVGA3D_TEX_ADDRESS_WRAP = 1,
1095 SVGA3D_TEX_ADDRESS_MIRROR = 2,
1096 SVGA3D_TEX_ADDRESS_CLAMP = 3,
1097 SVGA3D_TEX_ADDRESS_BORDER = 4,
1098 SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
1099 SVGA3D_TEX_ADDRESS_EDGE = 6,
1100 SVGA3D_TEX_ADDRESS_MAX
1101} SVGA3dTextureAddress;
1102
1103/*
1104 * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
1105 * disabled, and the rasterizer should use the magnification filter instead.
1106 */
1107typedef enum {
1108 SVGA3D_TEX_FILTER_NONE = 0,
1109 SVGA3D_TEX_FILTER_MIN = 0,
1110 SVGA3D_TEX_FILTER_NEAREST = 1,
1111 SVGA3D_TEX_FILTER_LINEAR = 2,
1112 SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
1113 SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */
1114 SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */
1115 SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */
1116 SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */
1117 SVGA3D_TEX_FILTER_MAX
1118} SVGA3dTextureFilter;
1119
1120typedef enum {
1121 SVGA3D_TEX_TRANSFORM_OFF = 0,
1122 SVGA3D_TEX_TRANSFORM_S = (1 << 0),
1123 SVGA3D_TEX_TRANSFORM_T = (1 << 1),
1124 SVGA3D_TEX_TRANSFORM_R = (1 << 2),
1125 SVGA3D_TEX_TRANSFORM_Q = (1 << 3),
1126 SVGA3D_TEX_PROJECTED = (1 << 15),
1127} SVGA3dTexTransformFlags;
1128
1129typedef enum {
1130 SVGA3D_TEXCOORD_GEN_OFF = 0,
1131 SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1,
1132 SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2,
1133 SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
1134 SVGA3D_TEXCOORD_GEN_SPHERE = 4,
1135 SVGA3D_TEXCOORD_GEN_MAX
1136} SVGA3dTextureCoordGen;
1137
1138/*
1139 * Texture argument constants for texture combiner
1140 */
1141typedef enum {
1142 SVGA3D_TA_INVALID = 0,
1143 SVGA3D_TA_TFACTOR = 1,
1144 SVGA3D_TA_PREVIOUS = 2,
1145 SVGA3D_TA_DIFFUSE = 3,
1146 SVGA3D_TA_TEXTURE = 4,
1147 SVGA3D_TA_SPECULAR = 5,
1148 SVGA3D_TA_CONSTANT = 6,
1149 SVGA3D_TA_MAX
1150} SVGA3dTextureArgData;
1151
1152#define SVGA3D_TM_MASK_LEN 4
1153
1154/* Modifiers for texture argument constants defined above. */
1155typedef enum {
1156 SVGA3D_TM_NONE = 0,
1157 SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN),
1158 SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN),
1159} SVGA3dTextureArgModifier;
1160
1161/*
1162 * Vertex declarations
1163 *
1164 * Notes:
1165 *
1166 * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
1167 * draw with any POSITIONT vertex arrays, the programmable vertex
1168 * pipeline will be implicitly disabled. Drawing will take place as if
1169 * no vertex shader was bound.
1170 */
1171
1172typedef enum {
1173 SVGA3D_DECLUSAGE_POSITION = 0,
1174 SVGA3D_DECLUSAGE_BLENDWEIGHT,
1175 SVGA3D_DECLUSAGE_BLENDINDICES,
1176 SVGA3D_DECLUSAGE_NORMAL,
1177 SVGA3D_DECLUSAGE_PSIZE,
1178 SVGA3D_DECLUSAGE_TEXCOORD,
1179 SVGA3D_DECLUSAGE_TANGENT,
1180 SVGA3D_DECLUSAGE_BINORMAL,
1181 SVGA3D_DECLUSAGE_TESSFACTOR,
1182 SVGA3D_DECLUSAGE_POSITIONT,
1183 SVGA3D_DECLUSAGE_COLOR,
1184 SVGA3D_DECLUSAGE_FOG,
1185 SVGA3D_DECLUSAGE_DEPTH,
1186 SVGA3D_DECLUSAGE_SAMPLE,
1187 SVGA3D_DECLUSAGE_MAX
1188} SVGA3dDeclUsage;
1189
1190typedef enum {
1191 SVGA3D_DECLMETHOD_DEFAULT = 0,
1192 SVGA3D_DECLMETHOD_PARTIALU,
1193 SVGA3D_DECLMETHOD_PARTIALV,
1194 SVGA3D_DECLMETHOD_CROSSUV, /* Normal */
1195 SVGA3D_DECLMETHOD_UV,
1196 SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */
1197 SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement */
1198 /* map */
1199} SVGA3dDeclMethod;
1200
1201typedef enum {
1202 SVGA3D_DECLTYPE_FLOAT1 = 0,
1203 SVGA3D_DECLTYPE_FLOAT2 = 1,
1204 SVGA3D_DECLTYPE_FLOAT3 = 2,
1205 SVGA3D_DECLTYPE_FLOAT4 = 3,
1206 SVGA3D_DECLTYPE_D3DCOLOR = 4,
1207 SVGA3D_DECLTYPE_UBYTE4 = 5,
1208 SVGA3D_DECLTYPE_SHORT2 = 6,
1209 SVGA3D_DECLTYPE_SHORT4 = 7,
1210 SVGA3D_DECLTYPE_UBYTE4N = 8,
1211 SVGA3D_DECLTYPE_SHORT2N = 9,
1212 SVGA3D_DECLTYPE_SHORT4N = 10,
1213 SVGA3D_DECLTYPE_USHORT2N = 11,
1214 SVGA3D_DECLTYPE_USHORT4N = 12,
1215 SVGA3D_DECLTYPE_UDEC3 = 13,
1216 SVGA3D_DECLTYPE_DEC3N = 14,
1217 SVGA3D_DECLTYPE_FLOAT16_2 = 15,
1218 SVGA3D_DECLTYPE_FLOAT16_4 = 16,
1219 SVGA3D_DECLTYPE_MAX,
1220} SVGA3dDeclType;
1221
1222/*
1223 * This structure is used for the divisor for geometry instancing;
1224 * it's a direct translation of the Direct3D equivalent.
1225 */
1226typedef union {
1227 struct {
1228 /*
1229 * For index data, this number represents the number of instances to draw.
1230 * For instance data, this number represents the number of
1231 * instances/vertex in this stream
1232 */
1233 uint32 count : 30;
1234
1235 /*
1236 * This is 1 if this is supposed to be the data that is repeated for
1237 * every instance.
1238 */
1239 uint32 indexedData : 1;
1240
1241 /*
1242 * This is 1 if this is supposed to be the per-instance data.
1243 */
1244 uint32 instanceData : 1;
1245 };
1246
1247 uint32 value;
1248} SVGA3dVertexDivisor;
1249
1250typedef enum {
1251 /*
1252 * SVGA3D_PRIMITIVE_INVALID is a valid primitive type.
1253 *
1254 * List MIN second so debuggers will think INVALID is
1255 * the correct name.
1256 */
1257 SVGA3D_PRIMITIVE_INVALID = 0,
1258 SVGA3D_PRIMITIVE_MIN = 0,
1259 SVGA3D_PRIMITIVE_TRIANGLELIST = 1,
1260 SVGA3D_PRIMITIVE_POINTLIST = 2,
1261 SVGA3D_PRIMITIVE_LINELIST = 3,
1262 SVGA3D_PRIMITIVE_LINESTRIP = 4,
1263 SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5,
1264 SVGA3D_PRIMITIVE_TRIANGLEFAN = 6,
1265 SVGA3D_PRIMITIVE_LINELIST_ADJ = 7,
1266 SVGA3D_PRIMITIVE_PREDX_MAX = 7,
1267 SVGA3D_PRIMITIVE_LINESTRIP_ADJ = 8,
1268 SVGA3D_PRIMITIVE_TRIANGLELIST_ADJ = 9,
1269 SVGA3D_PRIMITIVE_TRIANGLESTRIP_ADJ = 10,
1270 SVGA3D_PRIMITIVE_MAX
1271} SVGA3dPrimitiveType;
1272
1273typedef enum {
1274 SVGA3D_COORDINATE_INVALID = 0,
1275 SVGA3D_COORDINATE_LEFTHANDED = 1,
1276 SVGA3D_COORDINATE_RIGHTHANDED = 2,
1277 SVGA3D_COORDINATE_MAX
1278} SVGA3dCoordinateType;
1279
1280typedef enum {
1281 SVGA3D_TRANSFORM_INVALID = 0,
1282 SVGA3D_TRANSFORM_WORLD = 1,
1283 SVGA3D_TRANSFORM_MIN = 1,
1284 SVGA3D_TRANSFORM_VIEW = 2,
1285 SVGA3D_TRANSFORM_PROJECTION = 3,
1286 SVGA3D_TRANSFORM_TEXTURE0 = 4,
1287 SVGA3D_TRANSFORM_TEXTURE1 = 5,
1288 SVGA3D_TRANSFORM_TEXTURE2 = 6,
1289 SVGA3D_TRANSFORM_TEXTURE3 = 7,
1290 SVGA3D_TRANSFORM_TEXTURE4 = 8,
1291 SVGA3D_TRANSFORM_TEXTURE5 = 9,
1292 SVGA3D_TRANSFORM_TEXTURE6 = 10,
1293 SVGA3D_TRANSFORM_TEXTURE7 = 11,
1294 SVGA3D_TRANSFORM_WORLD1 = 12,
1295 SVGA3D_TRANSFORM_WORLD2 = 13,
1296 SVGA3D_TRANSFORM_WORLD3 = 14,
1297 SVGA3D_TRANSFORM_MAX
1298} SVGA3dTransformType;
1299
1300typedef enum {
1301 SVGA3D_LIGHTTYPE_INVALID = 0,
1302 SVGA3D_LIGHTTYPE_MIN = 1,
1303 SVGA3D_LIGHTTYPE_POINT = 1,
1304 SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */
1305 SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */
1306 SVGA3D_LIGHTTYPE_DIRECTIONAL = 4,
1307 SVGA3D_LIGHTTYPE_MAX
1308} SVGA3dLightType;
1309
1310typedef enum {
1311 SVGA3D_CUBEFACE_POSX = 0,
1312 SVGA3D_CUBEFACE_NEGX = 1,
1313 SVGA3D_CUBEFACE_POSY = 2,
1314 SVGA3D_CUBEFACE_NEGY = 3,
1315 SVGA3D_CUBEFACE_POSZ = 4,
1316 SVGA3D_CUBEFACE_NEGZ = 5,
1317} SVGA3dCubeFace;
1318
1319typedef enum {
1320 SVGA3D_SHADERTYPE_INVALID = 0,
1321 SVGA3D_SHADERTYPE_MIN = 1,
1322 SVGA3D_SHADERTYPE_VS = 1,
1323 SVGA3D_SHADERTYPE_PS = 2,
1324 SVGA3D_SHADERTYPE_PREDX_MAX = 3,
1325 SVGA3D_SHADERTYPE_GS = 3,
1326 SVGA3D_SHADERTYPE_DX10_MAX = 4,
1327 SVGA3D_SHADERTYPE_HS = 4,
1328 SVGA3D_SHADERTYPE_DS = 5,
1329 SVGA3D_SHADERTYPE_CS = 6,
1330 SVGA3D_SHADERTYPE_MAX = 7
1331} SVGA3dShaderType;
1332
1333#define SVGA3D_NUM_SHADERTYPE_PREDX \
1334 (SVGA3D_SHADERTYPE_PREDX_MAX - SVGA3D_SHADERTYPE_MIN)
1335
1336#define SVGA3D_NUM_SHADERTYPE_DX10 \
1337 (SVGA3D_SHADERTYPE_DX10_MAX - SVGA3D_SHADERTYPE_MIN)
1338
1339#define SVGA3D_NUM_SHADERTYPE \
1340 (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
1341
1342typedef enum {
1343 SVGA3D_CONST_TYPE_MIN = 0,
1344 SVGA3D_CONST_TYPE_FLOAT = 0,
1345 SVGA3D_CONST_TYPE_INT = 1,
1346 SVGA3D_CONST_TYPE_BOOL = 2,
1347 SVGA3D_CONST_TYPE_MAX = 3,
1348} SVGA3dShaderConstType;
1349
1350/*
1351 * Register limits for shader consts.
1352 */
1353#define SVGA3D_CONSTREG_MAX 256
1354#define SVGA3D_CONSTINTREG_MAX 16
1355#define SVGA3D_CONSTBOOLREG_MAX 16
1356
1357typedef enum {
1358 SVGA3D_STRETCH_BLT_POINT = 0,
1359 SVGA3D_STRETCH_BLT_LINEAR = 1,
1360 SVGA3D_STRETCH_BLT_MAX
1361} SVGA3dStretchBltMode;
1362
1363typedef enum {
1364 SVGA3D_QUERYTYPE_INVALID = ((uint8)-1),
1365 SVGA3D_QUERYTYPE_MIN = 0,
1366 SVGA3D_QUERYTYPE_OCCLUSION = 0,
1367 SVGA3D_QUERYTYPE_TIMESTAMP = 1,
1368 SVGA3D_QUERYTYPE_TIMESTAMPDISJOINT = 2,
1369 SVGA3D_QUERYTYPE_PIPELINESTATS = 3,
1370 SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE = 4,
1371 SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS = 5,
1372 SVGA3D_QUERYTYPE_STREAMOVERFLOWPREDICATE = 6,
1373 SVGA3D_QUERYTYPE_OCCLUSION64 = 7,
1374 SVGA3D_QUERYTYPE_EVENT = 8,
1375 SVGA3D_QUERYTYPE_DX10_MAX = 9,
1376 SVGA3D_QUERYTYPE_SOSTATS_STREAM0 = 9,
1377 SVGA3D_QUERYTYPE_SOSTATS_STREAM1 = 10,
1378 SVGA3D_QUERYTYPE_SOSTATS_STREAM2 = 11,
1379 SVGA3D_QUERYTYPE_SOSTATS_STREAM3 = 12,
1380 SVGA3D_QUERYTYPE_SOP_STREAM0 = 13,
1381 SVGA3D_QUERYTYPE_SOP_STREAM1 = 14,
1382 SVGA3D_QUERYTYPE_SOP_STREAM2 = 15,
1383 SVGA3D_QUERYTYPE_SOP_STREAM3 = 16,
1384 SVGA3D_QUERYTYPE_MAX
1385} SVGA3dQueryType;
1386
1387typedef uint8 SVGA3dQueryTypeUint8;
1388
1389#define SVGA3D_NUM_QUERYTYPE (SVGA3D_QUERYTYPE_MAX - SVGA3D_QUERYTYPE_MIN)
1390
1391/*
1392 * This is the maximum number of queries per context that can be active
1393 * simultaneously between a beginQuery and endQuery.
1394 */
1395#define SVGA3D_MAX_QUERY 64
1396
1397/*
1398 * Query result buffer formats
1399 */
1400typedef
1401#include "vmware_pack_begin.h"
1402struct {
1403 uint32 samplesRendered;
1404}
1405#include "vmware_pack_end.h"
1406SVGADXOcclusionQueryResult;
1407
1408typedef
1409#include "vmware_pack_begin.h"
1410struct {
1411 uint32 passed;
1412}
1413#include "vmware_pack_end.h"
1414SVGADXEventQueryResult;
1415
1416typedef
1417#include "vmware_pack_begin.h"
1418struct {
1419 uint64 timestamp;
1420}
1421#include "vmware_pack_end.h"
1422SVGADXTimestampQueryResult;
1423
1424typedef
1425#include "vmware_pack_begin.h"
1426struct {
1427 uint64 realFrequency;
1428 uint32 disjoint;
1429}
1430#include "vmware_pack_end.h"
1431SVGADXTimestampDisjointQueryResult;
1432
1433typedef
1434#include "vmware_pack_begin.h"
1435struct {
1436 uint64 inputAssemblyVertices;
1437 uint64 inputAssemblyPrimitives;
1438 uint64 vertexShaderInvocations;
1439 uint64 geometryShaderInvocations;
1440 uint64 geometryShaderPrimitives;
1441 uint64 clipperInvocations;
1442 uint64 clipperPrimitives;
1443 uint64 pixelShaderInvocations;
1444 uint64 hullShaderInvocations;
1445 uint64 domainShaderInvocations;
1446 uint64 computeShaderInvocations;
1447}
1448#include "vmware_pack_end.h"
1449SVGADXPipelineStatisticsQueryResult;
1450
1451typedef
1452#include "vmware_pack_begin.h"
1453struct {
1454 uint32 anySamplesRendered;
1455}
1456#include "vmware_pack_end.h"
1457SVGADXOcclusionPredicateQueryResult;
1458
1459typedef
1460#include "vmware_pack_begin.h"
1461struct {
1462 uint64 numPrimitivesWritten;
1463 uint64 numPrimitivesRequired;
1464}
1465#include "vmware_pack_end.h"
1466SVGADXStreamOutStatisticsQueryResult;
1467
1468typedef
1469#include "vmware_pack_begin.h"
1470struct {
1471 uint32 overflowed;
1472}
1473#include "vmware_pack_end.h"
1474SVGADXStreamOutPredicateQueryResult;
1475
1476typedef
1477#include "vmware_pack_begin.h"
1478struct {
1479 uint64 samplesRendered;
1480}
1481#include "vmware_pack_end.h"
1482SVGADXOcclusion64QueryResult;
1483
1484/*
1485 * SVGADXQueryResultUnion is not intended for use in the protocol, but is
1486 * very helpful when working with queries generically.
1487 */
1488typedef
1489#include "vmware_pack_begin.h"
1490union SVGADXQueryResultUnion {
1491 SVGADXOcclusionQueryResult occ;
1492 SVGADXEventQueryResult event;
1493 SVGADXTimestampQueryResult ts;
1494 SVGADXTimestampDisjointQueryResult tsDisjoint;
1495 SVGADXPipelineStatisticsQueryResult pipelineStats;
1496 SVGADXOcclusionPredicateQueryResult occPred;
1497 SVGADXStreamOutStatisticsQueryResult soStats;
1498 SVGADXStreamOutPredicateQueryResult soPred;
1499 SVGADXOcclusion64QueryResult occ64;
1500}
1501#include "vmware_pack_end.h"
1502SVGADXQueryResultUnion;
1503
1504
1505typedef enum {
1506 SVGA3D_QUERYSTATE_PENDING = 0, /* Query is not finished yet */
1507 SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully */
1508 SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully */
1509 SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (guest only) */
1510} SVGA3dQueryState;
1511
1512typedef enum {
1513 SVGA3D_WRITE_HOST_VRAM = 1,
1514 SVGA3D_READ_HOST_VRAM = 2,
1515} SVGA3dTransferType;
1516
1517typedef enum {
1518 SVGA3D_LOGICOP_INVALID = 0,
1519 SVGA3D_LOGICOP_MIN = 1,
1520 SVGA3D_LOGICOP_COPY = 1,
1521 SVGA3D_LOGICOP_NOT = 2,
1522 SVGA3D_LOGICOP_AND = 3,
1523 SVGA3D_LOGICOP_OR = 4,
1524 SVGA3D_LOGICOP_XOR = 5,
1525 SVGA3D_LOGICOP_NXOR = 6,
1526 SVGA3D_LOGICOP_ROP3MIN = 30, /* 7-29 are reserved for future logic ops. */
1527 SVGA3D_LOGICOP_ROP3MAX = (SVGA3D_LOGICOP_ROP3MIN + 255),
1528 SVGA3D_LOGICOP_MAX = (SVGA3D_LOGICOP_ROP3MAX + 1),
1529} SVGA3dLogicOp;
1530
1531typedef
1532#include "vmware_pack_begin.h"
1533struct {
1534 union {
1535 struct {
1536 uint16 function; /* SVGA3dFogFunction */
1537 uint8 type; /* SVGA3dFogType */
1538 uint8 base; /* SVGA3dFogBase */
1539 };
1540 uint32 uintValue;
1541 };
1542}
1543#include "vmware_pack_end.h"
1544SVGA3dFogMode;
1545
1546/*
1547 * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
1548 * is a surface ID as well as face/mipmap indices.
1549 */
1550
1551typedef
1552#include "vmware_pack_begin.h"
1553struct SVGA3dSurfaceImageId {
1554 uint32 sid;
1555 uint32 face;
1556 uint32 mipmap;
1557}
1558#include "vmware_pack_end.h"
1559SVGA3dSurfaceImageId;
1560
1561typedef
1562#include "vmware_pack_begin.h"
1563struct {
1564 uint32 width;
1565 uint32 height;
1566 uint32 depth;
1567}
1568#include "vmware_pack_end.h"
1569SVGA3dSize;
1570
1571/*
1572 * Guest-backed objects definitions.
1573 */
1574typedef enum {
1575 SVGA_OTABLE_MOB = 0,
1576 SVGA_OTABLE_MIN = 0,
1577 SVGA_OTABLE_SURFACE = 1,
1578 SVGA_OTABLE_CONTEXT = 2,
1579 SVGA_OTABLE_SHADER = 3,
1580 SVGA_OTABLE_SCREENTARGET = 4,
1581
1582 SVGA_OTABLE_DX9_MAX = 5,
1583
1584 SVGA_OTABLE_DXCONTEXT = 5,
1585 SVGA_OTABLE_MAX = 6
1586} SVGAOTableType;
1587
1588/*
1589 * Deprecated.
1590 */
1591#define SVGA_OTABLE_COUNT 4
1592
1593typedef enum {
1594 SVGA_COTABLE_MIN = 0,
1595 SVGA_COTABLE_RTVIEW = 0,
1596 SVGA_COTABLE_DSVIEW = 1,
1597 SVGA_COTABLE_SRVIEW = 2,
1598 SVGA_COTABLE_ELEMENTLAYOUT = 3,
1599 SVGA_COTABLE_BLENDSTATE = 4,
1600 SVGA_COTABLE_DEPTHSTENCIL = 5,
1601 SVGA_COTABLE_RASTERIZERSTATE = 6,
1602 SVGA_COTABLE_SAMPLER = 7,
1603 SVGA_COTABLE_STREAMOUTPUT = 8,
1604 SVGA_COTABLE_DXQUERY = 9,
1605 SVGA_COTABLE_DXSHADER = 10,
1606 SVGA_COTABLE_DX10_MAX = 11,
1607 SVGA_COTABLE_UAVIEW = 11,
1608 SVGA_COTABLE_MAX
1609} SVGACOTableType;
1610
1611/*
1612 * The largest size (number of entries) allowed in a COTable.
1613 */
1614#define SVGA_COTABLE_MAX_IDS (MAX_UINT16 - 2)
1615
1616typedef enum SVGAMobFormat {
1617 SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
1618 SVGA3D_MOBFMT_PTDEPTH_0 = 0,
1619 SVGA3D_MOBFMT_MIN = 0,
1620 SVGA3D_MOBFMT_PTDEPTH_1 = 1,
1621 SVGA3D_MOBFMT_PTDEPTH_2 = 2,
1622 SVGA3D_MOBFMT_RANGE = 3,
1623 SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
1624 SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
1625 SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
1626 SVGA3D_MOBFMT_PREDX_MAX = 7,
1627 SVGA3D_MOBFMT_EMPTY = 7,
1628 SVGA3D_MOBFMT_MAX,
1629} SVGAMobFormat;
1630
1631#define SVGA3D_MOB_EMPTY_BASE 1
1632
1633#endif /* _SVGA3D_TYPES_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
index 8e8d9682e018..884b1d1fb85f 100644
--- a/drivers/gpu/drm/vmwgfx/svga_escape.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
@@ -1,5 +1,5 @@
1/********************************************************** 1/**********************************************************
2 * Copyright 2007-2009 VMware, Inc. All rights reserved. 2 * Copyright 2007-2015 VMware, Inc. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person 4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation 5 * obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
index f38416fcb046..faf6d9b2b891 100644
--- a/drivers/gpu/drm/vmwgfx/svga_overlay.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
@@ -1,5 +1,5 @@
1/********************************************************** 1/**********************************************************
2 * Copyright 2007-2009 VMware, Inc. All rights reserved. 2 * Copyright 2007-2015 VMware, Inc. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person 4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation 5 * obtaining a copy of this software and associated documentation
@@ -152,19 +152,17 @@ VMwareVideoGetAttributes(const SVGAOverlayFormat format, /* IN */
152 switch (format) { 152 switch (format) {
153 case VMWARE_FOURCC_YV12: 153 case VMWARE_FOURCC_YV12:
154 *height = (*height + 1) & ~1; 154 *height = (*height + 1) & ~1;
155 *size = (*width + 3) & ~3; 155 *size = (*width) * (*height);
156 156
157 if (pitches) { 157 if (pitches) {
158 pitches[0] = *size; 158 pitches[0] = *width;
159 } 159 }
160 160
161 *size *= *height;
162
163 if (offsets) { 161 if (offsets) {
164 offsets[1] = *size; 162 offsets[1] = *size;
165 } 163 }
166 164
167 tmp = ((*width >> 1) + 3) & ~3; 165 tmp = *width >> 1;
168 166
169 if (pitches) { 167 if (pitches) {
170 pitches[1] = pitches[2] = tmp; 168 pitches[1] = pitches[2] = tmp;
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index e4259c2c1acc..6e0ccb70a700 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -1,5 +1,5 @@
1/********************************************************** 1/**********************************************************
2 * Copyright 1998-2009 VMware, Inc. All rights reserved. 2 * Copyright 1998-2015 VMware, Inc. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person 4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation 5 * obtaining a copy of this software and associated documentation
@@ -31,20 +31,38 @@
31 31
32#ifndef _SVGA_REG_H_ 32#ifndef _SVGA_REG_H_
33#define _SVGA_REG_H_ 33#define _SVGA_REG_H_
34#include <linux/pci_ids.h>
35
36#define INCLUDE_ALLOW_MODULE
37#define INCLUDE_ALLOW_USERLEVEL
38
39#define INCLUDE_ALLOW_VMCORE
40#include "includeCheck.h"
41
42#include "svga_types.h"
34 43
35/* 44/*
36 * PCI device IDs. 45 * SVGA_REG_ENABLE bit definitions.
37 */ 46 */
38#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405 47typedef enum {
48 SVGA_REG_ENABLE_DISABLE = 0,
49 SVGA_REG_ENABLE_ENABLE = (1 << 0),
50 SVGA_REG_ENABLE_HIDE = (1 << 1),
51} SvgaRegEnable;
52
53typedef uint32 SVGAMobId;
39 54
40/* 55/*
41 * SVGA_REG_ENABLE bit definitions. 56 * Arbitrary and meaningless limits. Please ignore these when writing
57 * new drivers.
42 */ 58 */
43#define SVGA_REG_ENABLE_DISABLE 0 59#define SVGA_MAX_WIDTH 2560
44#define SVGA_REG_ENABLE_ENABLE 1 60#define SVGA_MAX_HEIGHT 1600
45#define SVGA_REG_ENABLE_HIDE 2 61
46#define SVGA_REG_ENABLE_ENABLE_HIDE (SVGA_REG_ENABLE_ENABLE |\ 62
47 SVGA_REG_ENABLE_HIDE) 63#define SVGA_MAX_BITS_PER_PIXEL 32
64#define SVGA_MAX_DEPTH 24
65#define SVGA_MAX_DISPLAYS 10
48 66
49/* 67/*
50 * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned 68 * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
@@ -57,14 +75,9 @@
57#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */ 75#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */
58 76
59/* 77/*
60 * The maximum framebuffer size that can traced for e.g. guests in VESA mode. 78 * The maximum framebuffer size that can traced for guests unless the
61 * The changeMap in the monitor is proportional to this number. Therefore, we'd 79 * SVGA_CAP_GBOBJECTS is set in SVGA_REG_CAPABILITIES. In that case
62 * like to keep it as small as possible to reduce monitor overhead (using 80 * the full framebuffer can be traced independent of this limit.
63 * SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
64 * 4k!).
65 *
66 * NB: For compatibility reasons, this value must be greater than 0xff0000.
67 * See bug 335072.
68 */ 81 */
69#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000 82#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000
70 83
@@ -106,6 +119,8 @@
106#define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */ 119#define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */
107#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */ 120#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */
108#define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */ 121#define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */
122#define SVGA_IRQFLAG_COMMAND_BUFFER 0x8 /* Command buffer completed */
123#define SVGA_IRQFLAG_ERROR 0x10 /* Error while processing commands */
109 124
110/* 125/*
111 * Registers 126 * Registers
@@ -131,6 +146,7 @@ enum {
131 SVGA_REG_FB_SIZE = 16, 146 SVGA_REG_FB_SIZE = 16,
132 147
133 /* ID 0 implementation only had the above registers, then the palette */ 148 /* ID 0 implementation only had the above registers, then the palette */
149 SVGA_REG_ID_0_TOP = 17,
134 150
135 SVGA_REG_CAPABILITIES = 17, 151 SVGA_REG_CAPABILITIES = 17,
136 SVGA_REG_MEM_START = 18, /* (Deprecated) */ 152 SVGA_REG_MEM_START = 18, /* (Deprecated) */
@@ -171,7 +187,7 @@ enum {
171 SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */ 187 SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */
172 SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */ 188 SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */
173 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ 189 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
174 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ 190 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */
175 SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ 191 SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
176 SVGA_REG_CMD_PREPEND_LOW = 53, 192 SVGA_REG_CMD_PREPEND_LOW = 53,
177 SVGA_REG_CMD_PREPEND_HIGH = 54, 193 SVGA_REG_CMD_PREPEND_HIGH = 54,
@@ -182,7 +198,6 @@ enum {
182 198
183 SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ 199 SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
184 /* Next 768 (== 256*3) registers exist for colormap */ 200 /* Next 768 (== 256*3) registers exist for colormap */
185
186 SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS 201 SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
187 /* Base of scratch registers */ 202 /* Base of scratch registers */
188 /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage: 203 /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
@@ -190,7 +205,6 @@ enum {
190 the use of the current SVGA driver. */ 205 the use of the current SVGA driver. */
191}; 206};
192 207
193
194/* 208/*
195 * Guest memory regions (GMRs): 209 * Guest memory regions (GMRs):
196 * 210 *
@@ -288,17 +302,205 @@ enum {
288#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) /* Guest Framebuffer (GFB) */ 302#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) /* Guest Framebuffer (GFB) */
289 303
290typedef 304typedef
305#include "vmware_pack_begin.h"
291struct SVGAGuestMemDescriptor { 306struct SVGAGuestMemDescriptor {
292 uint32 ppn; 307 uint32 ppn;
293 uint32 numPages; 308 uint32 numPages;
294} SVGAGuestMemDescriptor; 309}
310#include "vmware_pack_end.h"
311SVGAGuestMemDescriptor;
295 312
296typedef 313typedef
314#include "vmware_pack_begin.h"
297struct SVGAGuestPtr { 315struct SVGAGuestPtr {
298 uint32 gmrId; 316 uint32 gmrId;
299 uint32 offset; 317 uint32 offset;
300} SVGAGuestPtr; 318}
319#include "vmware_pack_end.h"
320SVGAGuestPtr;
321
322/*
323 * Register based command buffers --
324 *
325 * Provide an SVGA device interface that allows the guest to submit
326 * command buffers to the SVGA device through an SVGA device register.
327 * The metadata for each command buffer is contained in the
328 * SVGACBHeader structure along with the return status codes.
329 *
330 * The SVGA device supports command buffers if
331 * SVGA_CAP_COMMAND_BUFFERS is set in the device caps register. The
332 * fifo must be enabled for command buffers to be submitted.
333 *
334 * Command buffers are submitted when the guest writing the 64 byte
335 * aligned physical address into the SVGA_REG_COMMAND_LOW and
336 * SVGA_REG_COMMAND_HIGH. SVGA_REG_COMMAND_HIGH contains the upper 32
337 * bits of the physical address. SVGA_REG_COMMAND_LOW contains the
338 * lower 32 bits of the physical address, since the command buffer
339 * headers are required to be 64 byte aligned the lower 6 bits are
340 * used for the SVGACBContext value. Writing to SVGA_REG_COMMAND_LOW
341 * submits the command buffer to the device and queues it for
342 * execution. The SVGA device supports at least
343 * SVGA_CB_MAX_QUEUED_PER_CONTEXT command buffers that can be queued
344 * per context and if that limit is reached the device will write the
345 * status SVGA_CB_STATUS_QUEUE_FULL to the status value of the command
346 * buffer header synchronously and not raise any IRQs.
347 *
348 * It is invalid to submit a command buffer without a valid physical
349 * address and results are undefined.
350 *
351 * The device guarantees that command buffers of size SVGA_CB_MAX_SIZE
352 * will be supported. If a larger command buffer is submitted results
353 * are unspecified and the device will either complete the command
354 * buffer or return an error.
355 *
356 * The device guarantees that any individual command in a command
357 * buffer can be up to SVGA_CB_MAX_COMMAND_SIZE in size which is
358 * enough to fit a 64x64 color-cursor definition. If the command is
359 * too large the device is allowed to process the command or return an
360 * error.
361 *
362 * The device context is a special SVGACBContext that allows for
363 * synchronous register like accesses with the flexibility of
364 * commands. There is a different command set defined by
365 * SVGADeviceContextCmdId. The commands in each command buffer is not
366 * allowed to straddle physical pages.
367 *
368 * The offset field which is available starting with the
369 * SVGA_CAP_CMD_BUFFERS_2 cap bit can be set by the guest to bias the
370 * start of command processing into the buffer. If an error is
371 * encountered the errorOffset will still be relative to the specific
372 * PA, not biased by the offset. When the command buffer is finished
373 * the guest should not read the offset field as there is no guarantee
374 * what it will set to.
375 */
376
377#define SVGA_CB_MAX_SIZE (512 * 1024) /* 512 KB */
378#define SVGA_CB_MAX_QUEUED_PER_CONTEXT 32
379#define SVGA_CB_MAX_COMMAND_SIZE (32 * 1024) /* 32 KB */
380
381#define SVGA_CB_CONTEXT_MASK 0x3f
382typedef enum {
383 SVGA_CB_CONTEXT_DEVICE = 0x3f,
384 SVGA_CB_CONTEXT_0 = 0x0,
385 SVGA_CB_CONTEXT_MAX = 0x1,
386} SVGACBContext;
387
388
389typedef enum {
390 /*
391 * The guest is supposed to write SVGA_CB_STATUS_NONE to the status
392 * field before submitting the command buffer header, the host will
393 * change the value when it is done with the command buffer.
394 */
395 SVGA_CB_STATUS_NONE = 0,
396
397 /*
398 * Written by the host when a command buffer completes successfully.
399 * The device raises an IRQ with SVGA_IRQFLAG_COMMAND_BUFFER unless
400 * the SVGA_CB_FLAG_NO_IRQ flag is set.
401 */
402 SVGA_CB_STATUS_COMPLETED = 1,
403
404 /*
405 * Written by the host synchronously with the command buffer
406 * submission to indicate the command buffer was not submitted. No
407 * IRQ is raised.
408 */
409 SVGA_CB_STATUS_QUEUE_FULL = 2,
410
411 /*
412 * Written by the host when an error was detected parsing a command
413 * in the command buffer, errorOffset is written to contain the
414 * offset to the first byte of the failing command. The device
415 * raises the IRQ with both SVGA_IRQFLAG_ERROR and
416 * SVGA_IRQFLAG_COMMAND_BUFFER. Some of the commands may have been
417 * processed.
418 */
419 SVGA_CB_STATUS_COMMAND_ERROR = 3,
420
421 /*
422 * Written by the host if there is an error parsing the command
423 * buffer header. The device raises the IRQ with both
424 * SVGA_IRQFLAG_ERROR and SVGA_IRQFLAG_COMMAND_BUFFER. The device
425 * did not processes any of the command buffer.
426 */
427 SVGA_CB_STATUS_CB_HEADER_ERROR = 4,
301 428
429 /*
430 * Written by the host if the guest requested the host to preempt
431 * the command buffer. The device will not raise any IRQs and the
432 * command buffer was not processed.
433 */
434 SVGA_CB_STATUS_PREEMPTED = 5,
435
436 /*
437 * Written by the host synchronously with the command buffer
438 * submission to indicate the the command buffer was not submitted
439 * due to an error. No IRQ is raised.
440 */
441 SVGA_CB_STATUS_SUBMISSION_ERROR = 6,
442} SVGACBStatus;
443
444typedef enum {
445 SVGA_CB_FLAG_NONE = 0,
446 SVGA_CB_FLAG_NO_IRQ = 1 << 0,
447 SVGA_CB_FLAG_DX_CONTEXT = 1 << 1,
448 SVGA_CB_FLAG_MOB = 1 << 2,
449} SVGACBFlags;
450
451typedef
452#include "vmware_pack_begin.h"
453struct {
454 volatile SVGACBStatus status;
455 volatile uint32 errorOffset;
456 uint64 id;
457 SVGACBFlags flags;
458 uint32 length;
459 union {
460 PA pa;
461 struct {
462 SVGAMobId mobid;
463 uint32 mobOffset;
464 } mob;
465 } ptr;
466 uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise */
467 uint32 dxContext; /* Valid if DX_CONTEXT flag set, must be zero otherwise */
468 uint32 mustBeZero[6];
469}
470#include "vmware_pack_end.h"
471SVGACBHeader;
472
473typedef enum {
474 SVGA_DC_CMD_NOP = 0,
475 SVGA_DC_CMD_START_STOP_CONTEXT = 1,
476 SVGA_DC_CMD_PREEMPT = 2,
477 SVGA_DC_CMD_MAX = 3,
478 SVGA_DC_CMD_FORCE_UINT = MAX_UINT32,
479} SVGADeviceContextCmdId;
480
481typedef struct {
482 uint32 enable;
483 SVGACBContext context;
484} SVGADCCmdStartStop;
485
486/*
487 * SVGADCCmdPreempt --
488 *
489 * This command allows the guest to request that all command buffers
490 * on the specified context be preempted that can be. After execution
491 * of this command all command buffers that were preempted will
492 * already have SVGA_CB_STATUS_PREEMPTED written into the status
493 * field. The device might still be processing a command buffer,
494 * assuming execution of it started before the preemption request was
495 * received. Specifying the ignoreIDZero flag to TRUE will cause the
496 * device to not preempt command buffers with the id field in the
497 * command buffer header set to zero.
498 */
499
500typedef struct {
501 SVGACBContext context;
502 uint32 ignoreIDZero;
503} SVGADCCmdPreempt;
302 504
303/* 505/*
304 * SVGAGMRImageFormat -- 506 * SVGAGMRImageFormat --
@@ -320,13 +522,12 @@ struct SVGAGuestPtr {
320 * 522 *
321 */ 523 */
322 524
323typedef 525typedef struct SVGAGMRImageFormat {
324struct SVGAGMRImageFormat {
325 union { 526 union {
326 struct { 527 struct {
327 uint32 bitsPerPixel : 8; 528 uint32 bitsPerPixel : 8;
328 uint32 colorDepth : 8; 529 uint32 colorDepth : 8;
329 uint32 reserved : 16; /* Must be zero */ 530 uint32 reserved : 16; /* Must be zero */
330 }; 531 };
331 532
332 uint32 value; 533 uint32 value;
@@ -334,6 +535,7 @@ struct SVGAGMRImageFormat {
334} SVGAGMRImageFormat; 535} SVGAGMRImageFormat;
335 536
336typedef 537typedef
538#include "vmware_pack_begin.h"
337struct SVGAGuestImage { 539struct SVGAGuestImage {
338 SVGAGuestPtr ptr; 540 SVGAGuestPtr ptr;
339 541
@@ -353,7 +555,9 @@ struct SVGAGuestImage {
353 * assuming each row of blocks is tightly packed. 555 * assuming each row of blocks is tightly packed.
354 */ 556 */
355 uint32 pitch; 557 uint32 pitch;
356} SVGAGuestImage; 558}
559#include "vmware_pack_end.h"
560SVGAGuestImage;
357 561
358/* 562/*
359 * SVGAColorBGRX -- 563 * SVGAColorBGRX --
@@ -363,14 +567,13 @@ struct SVGAGuestImage {
363 * GMRFB state. 567 * GMRFB state.
364 */ 568 */
365 569
366typedef 570typedef struct SVGAColorBGRX {
367struct SVGAColorBGRX {
368 union { 571 union {
369 struct { 572 struct {
370 uint32 b : 8; 573 uint32 b : 8;
371 uint32 g : 8; 574 uint32 g : 8;
372 uint32 r : 8; 575 uint32 r : 8;
373 uint32 x : 8; /* Unused */ 576 uint32 x : 8; /* Unused */
374 }; 577 };
375 578
376 uint32 value; 579 uint32 value;
@@ -392,26 +595,49 @@ struct SVGAColorBGRX {
392 */ 595 */
393 596
394typedef 597typedef
395struct SVGASignedRect { 598#include "vmware_pack_begin.h"
599struct {
396 int32 left; 600 int32 left;
397 int32 top; 601 int32 top;
398 int32 right; 602 int32 right;
399 int32 bottom; 603 int32 bottom;
400} SVGASignedRect; 604}
605#include "vmware_pack_end.h"
606SVGASignedRect;
401 607
402typedef 608typedef
403struct SVGASignedPoint { 609#include "vmware_pack_begin.h"
610struct {
404 int32 x; 611 int32 x;
405 int32 y; 612 int32 y;
406} SVGASignedPoint; 613}
614#include "vmware_pack_end.h"
615SVGASignedPoint;
407 616
408 617
409/* 618/*
410 * Capabilities 619 * SVGA Device Capabilities
620 *
621 * Note the holes in the bitfield. Missing bits have been deprecated,
622 * and must not be reused. Those capabilities will never be reported
623 * by new versions of the SVGA device.
624 *
625 * XXX: Add longer descriptions for each capability, including a list
626 * of the new features that each capability provides.
411 * 627 *
412 * Note the holes in the bitfield. Missing bits have been deprecated, 628 * SVGA_CAP_IRQMASK --
413 * and must not be reused. Those capabilities will never be reported 629 * Provides device interrupts. Adds device register SVGA_REG_IRQMASK
414 * by new versions of the SVGA device. 630 * to set interrupt mask and direct I/O port SVGA_IRQSTATUS_PORT to
631 * set/clear pending interrupts.
632 *
633 * SVGA_CAP_GMR --
634 * Provides synchronous mapping of guest memory regions (GMR).
635 * Adds device registers SVGA_REG_GMR_ID, SVGA_REG_GMR_DESCRIPTOR,
636 * SVGA_REG_GMR_MAX_IDS, and SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH.
637 *
638 * SVGA_CAP_TRACES --
639 * Allows framebuffer trace-based updates even when FIFO is enabled.
640 * Adds device register SVGA_REG_TRACES.
415 * 641 *
416 * SVGA_CAP_GMR2 -- 642 * SVGA_CAP_GMR2 --
417 * Provides asynchronous commands to define and remap guest memory 643 * Provides asynchronous commands to define and remap guest memory
@@ -421,21 +647,39 @@ struct SVGASignedPoint {
421 * SVGA_CAP_SCREEN_OBJECT_2 -- 647 * SVGA_CAP_SCREEN_OBJECT_2 --
422 * Allow screen object support, and require backing stores from the 648 * Allow screen object support, and require backing stores from the
423 * guest for each screen object. 649 * guest for each screen object.
650 *
651 * SVGA_CAP_COMMAND_BUFFERS --
652 * Enable register based command buffer submission.
653 *
654 * SVGA_CAP_DEAD1 --
655 * This cap was incorrectly used by old drivers and should not be
656 * reused.
657 *
658 * SVGA_CAP_CMD_BUFFERS_2 --
659 * Enable support for the prepend command buffer submision
660 * registers. SVGA_REG_CMD_PREPEND_LOW and
661 * SVGA_REG_CMD_PREPEND_HIGH.
662 *
663 * SVGA_CAP_GBOBJECTS --
664 * Enable guest-backed objects and surfaces.
665 *
666 * SVGA_CAP_CMD_BUFFERS_3 --
667 * Enable support for command buffers in a mob.
424 */ 668 */
425 669
426#define SVGA_CAP_NONE 0x00000000 670#define SVGA_CAP_NONE 0x00000000
427#define SVGA_CAP_RECT_COPY 0x00000002 671#define SVGA_CAP_RECT_COPY 0x00000002
428#define SVGA_CAP_CURSOR 0x00000020 672#define SVGA_CAP_CURSOR 0x00000020
429#define SVGA_CAP_CURSOR_BYPASS 0x00000040 /* Legacy (Use Cursor Bypass 3 instead) */ 673#define SVGA_CAP_CURSOR_BYPASS 0x00000040
430#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 /* Legacy (Use Cursor Bypass 3 instead) */ 674#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080
431#define SVGA_CAP_8BIT_EMULATION 0x00000100 675#define SVGA_CAP_8BIT_EMULATION 0x00000100
432#define SVGA_CAP_ALPHA_CURSOR 0x00000200 676#define SVGA_CAP_ALPHA_CURSOR 0x00000200
433#define SVGA_CAP_3D 0x00004000 677#define SVGA_CAP_3D 0x00004000
434#define SVGA_CAP_EXTENDED_FIFO 0x00008000 678#define SVGA_CAP_EXTENDED_FIFO 0x00008000
435#define SVGA_CAP_MULTIMON 0x00010000 /* Legacy multi-monitor support */ 679#define SVGA_CAP_MULTIMON 0x00010000
436#define SVGA_CAP_PITCHLOCK 0x00020000 680#define SVGA_CAP_PITCHLOCK 0x00020000
437#define SVGA_CAP_IRQMASK 0x00040000 681#define SVGA_CAP_IRQMASK 0x00040000
438#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 /* Legacy multi-monitor support */ 682#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000
439#define SVGA_CAP_GMR 0x00100000 683#define SVGA_CAP_GMR 0x00100000
440#define SVGA_CAP_TRACES 0x00200000 684#define SVGA_CAP_TRACES 0x00200000
441#define SVGA_CAP_GMR2 0x00400000 685#define SVGA_CAP_GMR2 0x00400000
@@ -444,6 +688,33 @@ struct SVGASignedPoint {
444#define SVGA_CAP_DEAD1 0x02000000 688#define SVGA_CAP_DEAD1 0x02000000
445#define SVGA_CAP_CMD_BUFFERS_2 0x04000000 689#define SVGA_CAP_CMD_BUFFERS_2 0x04000000
446#define SVGA_CAP_GBOBJECTS 0x08000000 690#define SVGA_CAP_GBOBJECTS 0x08000000
691#define SVGA_CAP_DX 0x10000000
692
693#define SVGA_CAP_CMD_RESERVED 0x80000000
694
695
696/*
697 * The Guest can optionally read some SVGA device capabilities through
698 * the backdoor with command BDOOR_CMD_GET_SVGA_CAPABILITIES before
699 * the SVGA device is initialized. The type of capability the guest
700 * is requesting from the SVGABackdoorCapType enum should be placed in
701 * the upper 16 bits of the backdoor command id (ECX). On success the
702 * the value of EBX will be set to BDOOR_MAGIC and EAX will be set to
703 * the requested capability. If the command is not supported then EBX
704 * will be left unchanged and EAX will be set to -1. Because it is
705 * possible that -1 is the value of the requested cap the correct way
706 * to check if the command was successful is to check if EBX was changed
707 * to BDOOR_MAGIC making sure to initialize the register to something
708 * else first.
709 */
710
711typedef enum {
712 SVGABackdoorCapDeviceCaps = 0,
713 SVGABackdoorCapFifoCaps = 1,
714 SVGABackdoorCap3dHWVersion = 2,
715 SVGABackdoorCapMax = 3,
716} SVGABackdoorCapType;
717
447 718
448/* 719/*
449 * FIFO register indices. 720 * FIFO register indices.
@@ -883,7 +1154,8 @@ enum {
883 SVGA_VIDEO_PITCH_2, 1154 SVGA_VIDEO_PITCH_2,
884 SVGA_VIDEO_PITCH_3, 1155 SVGA_VIDEO_PITCH_3,
885 SVGA_VIDEO_DATA_GMRID, /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */ 1156 SVGA_VIDEO_DATA_GMRID, /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
886 SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords (SVGA_ID_INVALID) */ 1157 SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords */
1158 /* (SVGA_ID_INVALID) */
887 SVGA_VIDEO_NUM_REGS 1159 SVGA_VIDEO_NUM_REGS
888}; 1160};
889 1161
@@ -896,7 +1168,9 @@ enum {
896 * video frame to be displayed. 1168 * video frame to be displayed.
897 */ 1169 */
898 1170
899typedef struct SVGAOverlayUnit { 1171typedef
1172#include "vmware_pack_begin.h"
1173struct SVGAOverlayUnit {
900 uint32 enabled; 1174 uint32 enabled;
901 uint32 flags; 1175 uint32 flags;
902 uint32 dataOffset; 1176 uint32 dataOffset;
@@ -916,7 +1190,27 @@ typedef struct SVGAOverlayUnit {
916 uint32 pitches[3]; 1190 uint32 pitches[3];
917 uint32 dataGMRId; 1191 uint32 dataGMRId;
918 uint32 dstScreenId; 1192 uint32 dstScreenId;
919} SVGAOverlayUnit; 1193}
1194#include "vmware_pack_end.h"
1195SVGAOverlayUnit;
1196
1197
1198/*
1199 * Guest display topology
1200 *
1201 * XXX: This structure is not part of the SVGA device's interface, and
1202 * doesn't really belong here.
1203 */
1204#define SVGA_INVALID_DISPLAY_ID ((uint32)-1)
1205
1206typedef struct SVGADisplayTopology {
1207 uint16 displayId;
1208 uint16 isPrimary;
1209 uint32 width;
1210 uint32 height;
1211 uint32 positionX;
1212 uint32 positionY;
1213} SVGADisplayTopology;
920 1214
921 1215
922/* 1216/*
@@ -951,10 +1245,10 @@ typedef struct SVGAOverlayUnit {
951 * value of zero means no cloning should happen. 1245 * value of zero means no cloning should happen.
952 */ 1246 */
953 1247
954#define SVGA_SCREEN_MUST_BE_SET (1 << 0) /* Must be set or results undefined */ 1248#define SVGA_SCREEN_MUST_BE_SET (1 << 0)
955#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */ 1249#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
956#define SVGA_SCREEN_IS_PRIMARY (1 << 1) /* Guest considers this screen to be 'primary' */ 1250#define SVGA_SCREEN_IS_PRIMARY (1 << 1)
957#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* Guest is running a fullscreen app here */ 1251#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2)
958 1252
959/* 1253/*
960 * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When the screen is 1254 * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When the screen is
@@ -977,7 +1271,8 @@ typedef struct SVGAOverlayUnit {
977#define SVGA_SCREEN_BLANKING (1 << 4) 1271#define SVGA_SCREEN_BLANKING (1 << 4)
978 1272
979typedef 1273typedef
980struct SVGAScreenObject { 1274#include "vmware_pack_begin.h"
1275struct {
981 uint32 structSize; /* sizeof(SVGAScreenObject) */ 1276 uint32 structSize; /* sizeof(SVGAScreenObject) */
982 uint32 id; 1277 uint32 id;
983 uint32 flags; 1278 uint32 flags;
@@ -995,8 +1290,17 @@ struct SVGAScreenObject {
995 * with SVGA_FIFO_CAP_SCREEN_OBJECT. 1290 * with SVGA_FIFO_CAP_SCREEN_OBJECT.
996 */ 1291 */
997 SVGAGuestImage backingStore; 1292 SVGAGuestImage backingStore;
1293
1294 /*
1295 * The cloneCount field is treated as a hint from the guest that
1296 * the user wants this display to be cloned, cloneCount times.
1297 *
1298 * A value of zero means no cloning should happen.
1299 */
998 uint32 cloneCount; 1300 uint32 cloneCount;
999} SVGAScreenObject; 1301}
1302#include "vmware_pack_end.h"
1303SVGAScreenObject;
1000 1304
1001 1305
1002/* 1306/*
@@ -1009,7 +1313,7 @@ struct SVGAScreenObject {
1009 * Note the holes in the command ID numbers: These commands have been 1313 * Note the holes in the command ID numbers: These commands have been
1010 * deprecated, and the old IDs must not be reused. 1314 * deprecated, and the old IDs must not be reused.
1011 * 1315 *
1012 * Command IDs from 1000 to 1999 are reserved for use by the SVGA3D 1316 * Command IDs from 1000 to 2999 are reserved for use by the SVGA3D
1013 * protocol. 1317 * protocol.
1014 * 1318 *
1015 * Each command's parameters are described by the comments and 1319 * Each command's parameters are described by the comments and
@@ -1020,6 +1324,7 @@ typedef enum {
1020 SVGA_CMD_INVALID_CMD = 0, 1324 SVGA_CMD_INVALID_CMD = 0,
1021 SVGA_CMD_UPDATE = 1, 1325 SVGA_CMD_UPDATE = 1,
1022 SVGA_CMD_RECT_COPY = 3, 1326 SVGA_CMD_RECT_COPY = 3,
1327 SVGA_CMD_RECT_ROP_COPY = 14,
1023 SVGA_CMD_DEFINE_CURSOR = 19, 1328 SVGA_CMD_DEFINE_CURSOR = 19,
1024 SVGA_CMD_DEFINE_ALPHA_CURSOR = 22, 1329 SVGA_CMD_DEFINE_ALPHA_CURSOR = 22,
1025 SVGA_CMD_UPDATE_VERBOSE = 25, 1330 SVGA_CMD_UPDATE_VERBOSE = 25,
@@ -1035,9 +1340,14 @@ typedef enum {
1035 SVGA_CMD_ANNOTATION_COPY = 40, 1340 SVGA_CMD_ANNOTATION_COPY = 40,
1036 SVGA_CMD_DEFINE_GMR2 = 41, 1341 SVGA_CMD_DEFINE_GMR2 = 41,
1037 SVGA_CMD_REMAP_GMR2 = 42, 1342 SVGA_CMD_REMAP_GMR2 = 42,
1343 SVGA_CMD_DEAD = 43,
1344 SVGA_CMD_DEAD_2 = 44,
1345 SVGA_CMD_NOP = 45,
1346 SVGA_CMD_NOP_ERROR = 46,
1038 SVGA_CMD_MAX 1347 SVGA_CMD_MAX
1039} SVGAFifoCmdId; 1348} SVGAFifoCmdId;
1040 1349
1350#define SVGA_CMD_MAX_DATASIZE (256 * 1024)
1041#define SVGA_CMD_MAX_ARGS 64 1351#define SVGA_CMD_MAX_ARGS 64
1042 1352
1043 1353
@@ -1070,12 +1380,15 @@ typedef enum {
1070 */ 1380 */
1071 1381
1072typedef 1382typedef
1073struct SVGAFifoCmdUpdate { 1383#include "vmware_pack_begin.h"
1384struct {
1074 uint32 x; 1385 uint32 x;
1075 uint32 y; 1386 uint32 y;
1076 uint32 width; 1387 uint32 width;
1077 uint32 height; 1388 uint32 height;
1078} SVGAFifoCmdUpdate; 1389}
1390#include "vmware_pack_end.h"
1391SVGAFifoCmdUpdate;
1079 1392
1080 1393
1081/* 1394/*
@@ -1089,14 +1402,44 @@ struct SVGAFifoCmdUpdate {
1089 */ 1402 */
1090 1403
1091typedef 1404typedef
1092struct SVGAFifoCmdRectCopy { 1405#include "vmware_pack_begin.h"
1406struct {
1407 uint32 srcX;
1408 uint32 srcY;
1409 uint32 destX;
1410 uint32 destY;
1411 uint32 width;
1412 uint32 height;
1413}
1414#include "vmware_pack_end.h"
1415SVGAFifoCmdRectCopy;
1416
1417
1418/*
1419 * SVGA_CMD_RECT_ROP_COPY --
1420 *
1421 * Perform a rectangular DMA transfer from one area of the GFB to
1422 * another, and copy the result to any screens which intersect it.
1423 * The value of ROP may only be SVGA_ROP_COPY, and this command is
1424 * only supported for backwards compatibility reasons.
1425 *
1426 * Availability:
1427 * SVGA_CAP_RECT_COPY
1428 */
1429
1430typedef
1431#include "vmware_pack_begin.h"
1432struct {
1093 uint32 srcX; 1433 uint32 srcX;
1094 uint32 srcY; 1434 uint32 srcY;
1095 uint32 destX; 1435 uint32 destX;
1096 uint32 destY; 1436 uint32 destY;
1097 uint32 width; 1437 uint32 width;
1098 uint32 height; 1438 uint32 height;
1099} SVGAFifoCmdRectCopy; 1439 uint32 rop;
1440}
1441#include "vmware_pack_end.h"
1442SVGAFifoCmdRectRopCopy;
1100 1443
1101 1444
1102/* 1445/*
@@ -1113,7 +1456,8 @@ struct SVGAFifoCmdRectCopy {
1113 */ 1456 */
1114 1457
1115typedef 1458typedef
1116struct SVGAFifoCmdDefineCursor { 1459#include "vmware_pack_begin.h"
1460struct {
1117 uint32 id; /* Reserved, must be zero. */ 1461 uint32 id; /* Reserved, must be zero. */
1118 uint32 hotspotX; 1462 uint32 hotspotX;
1119 uint32 hotspotY; 1463 uint32 hotspotY;
@@ -1125,7 +1469,9 @@ struct SVGAFifoCmdDefineCursor {
1125 * Followed by scanline data for AND mask, then XOR mask. 1469 * Followed by scanline data for AND mask, then XOR mask.
1126 * Each scanline is padded to a 32-bit boundary. 1470 * Each scanline is padded to a 32-bit boundary.
1127 */ 1471 */
1128} SVGAFifoCmdDefineCursor; 1472}
1473#include "vmware_pack_end.h"
1474SVGAFifoCmdDefineCursor;
1129 1475
1130 1476
1131/* 1477/*
@@ -1142,14 +1488,17 @@ struct SVGAFifoCmdDefineCursor {
1142 */ 1488 */
1143 1489
1144typedef 1490typedef
1145struct SVGAFifoCmdDefineAlphaCursor { 1491#include "vmware_pack_begin.h"
1492struct {
1146 uint32 id; /* Reserved, must be zero. */ 1493 uint32 id; /* Reserved, must be zero. */
1147 uint32 hotspotX; 1494 uint32 hotspotX;
1148 uint32 hotspotY; 1495 uint32 hotspotY;
1149 uint32 width; 1496 uint32 width;
1150 uint32 height; 1497 uint32 height;
1151 /* Followed by scanline data */ 1498 /* Followed by scanline data */
1152} SVGAFifoCmdDefineAlphaCursor; 1499}
1500#include "vmware_pack_end.h"
1501SVGAFifoCmdDefineAlphaCursor;
1153 1502
1154 1503
1155/* 1504/*
@@ -1165,13 +1514,16 @@ struct SVGAFifoCmdDefineAlphaCursor {
1165 */ 1514 */
1166 1515
1167typedef 1516typedef
1168struct SVGAFifoCmdUpdateVerbose { 1517#include "vmware_pack_begin.h"
1518struct {
1169 uint32 x; 1519 uint32 x;
1170 uint32 y; 1520 uint32 y;
1171 uint32 width; 1521 uint32 width;
1172 uint32 height; 1522 uint32 height;
1173 uint32 reason; 1523 uint32 reason;
1174} SVGAFifoCmdUpdateVerbose; 1524}
1525#include "vmware_pack_end.h"
1526SVGAFifoCmdUpdateVerbose;
1175 1527
1176 1528
1177/* 1529/*
@@ -1190,14 +1542,17 @@ struct SVGAFifoCmdUpdateVerbose {
1190#define SVGA_ROP_COPY 0x03 1542#define SVGA_ROP_COPY 0x03
1191 1543
1192typedef 1544typedef
1193struct SVGAFifoCmdFrontRopFill { 1545#include "vmware_pack_begin.h"
1546struct {
1194 uint32 color; /* In the same format as the GFB */ 1547 uint32 color; /* In the same format as the GFB */
1195 uint32 x; 1548 uint32 x;
1196 uint32 y; 1549 uint32 y;
1197 uint32 width; 1550 uint32 width;
1198 uint32 height; 1551 uint32 height;
1199 uint32 rop; /* Must be SVGA_ROP_COPY */ 1552 uint32 rop; /* Must be SVGA_ROP_COPY */
1200} SVGAFifoCmdFrontRopFill; 1553}
1554#include "vmware_pack_end.h"
1555SVGAFifoCmdFrontRopFill;
1201 1556
1202 1557
1203/* 1558/*
@@ -1216,9 +1571,12 @@ struct SVGAFifoCmdFrontRopFill {
1216 */ 1571 */
1217 1572
1218typedef 1573typedef
1574#include "vmware_pack_begin.h"
1219struct { 1575struct {
1220 uint32 fence; 1576 uint32 fence;
1221} SVGAFifoCmdFence; 1577}
1578#include "vmware_pack_end.h"
1579SVGAFifoCmdFence;
1222 1580
1223 1581
1224/* 1582/*
@@ -1233,11 +1591,14 @@ struct {
1233 */ 1591 */
1234 1592
1235typedef 1593typedef
1236struct SVGAFifoCmdEscape { 1594#include "vmware_pack_begin.h"
1595struct {
1237 uint32 nsid; 1596 uint32 nsid;
1238 uint32 size; 1597 uint32 size;
1239 /* followed by 'size' bytes of data */ 1598 /* followed by 'size' bytes of data */
1240} SVGAFifoCmdEscape; 1599}
1600#include "vmware_pack_end.h"
1601SVGAFifoCmdEscape;
1241 1602
1242 1603
1243/* 1604/*
@@ -1267,9 +1628,12 @@ struct SVGAFifoCmdEscape {
1267 */ 1628 */
1268 1629
1269typedef 1630typedef
1631#include "vmware_pack_begin.h"
1270struct { 1632struct {
1271 SVGAScreenObject screen; /* Variable-length according to version */ 1633 SVGAScreenObject screen; /* Variable-length according to version */
1272} SVGAFifoCmdDefineScreen; 1634}
1635#include "vmware_pack_end.h"
1636SVGAFifoCmdDefineScreen;
1273 1637
1274 1638
1275/* 1639/*
@@ -1283,9 +1647,12 @@ struct {
1283 */ 1647 */
1284 1648
1285typedef 1649typedef
1650#include "vmware_pack_begin.h"
1286struct { 1651struct {
1287 uint32 screenId; 1652 uint32 screenId;
1288} SVGAFifoCmdDestroyScreen; 1653}
1654#include "vmware_pack_end.h"
1655SVGAFifoCmdDestroyScreen;
1289 1656
1290 1657
1291/* 1658/*
@@ -1336,11 +1703,14 @@ struct {
1336 */ 1703 */
1337 1704
1338typedef 1705typedef
1706#include "vmware_pack_begin.h"
1339struct { 1707struct {
1340 SVGAGuestPtr ptr; 1708 SVGAGuestPtr ptr;
1341 uint32 bytesPerLine; 1709 uint32 bytesPerLine;
1342 SVGAGMRImageFormat format; 1710 SVGAGMRImageFormat format;
1343} SVGAFifoCmdDefineGMRFB; 1711}
1712#include "vmware_pack_end.h"
1713SVGAFifoCmdDefineGMRFB;
1344 1714
1345 1715
1346/* 1716/*
@@ -1348,19 +1718,10 @@ struct {
1348 * 1718 *
1349 * This is a guest-to-host blit. It performs a DMA operation to 1719 * This is a guest-to-host blit. It performs a DMA operation to
1350 * copy a rectangular region of pixels from the current GMRFB to 1720 * copy a rectangular region of pixels from the current GMRFB to
1351 * one or more Screen Objects. 1721 * a ScreenObject.
1352 * 1722 *
1353 * The destination coordinate may be specified relative to a 1723 * The destination coordinate may be specified relative to a
1354 * screen's origin (if a screen ID is specified) or relative to the 1724 * screen's origin. The provided screen ID must be valid.
1355 * virtual coordinate system's origin (if the screen ID is
1356 * SVGA_ID_INVALID). The actual destination may span zero or more
1357 * screens, in the case of a virtual destination rect or a rect
1358 * which extends off the edge of the specified screen.
1359 *
1360 * This command writes to the screen's "base layer": the underlying
1361 * framebuffer which exists below any cursor or video overlays. No
1362 * action is necessary to explicitly hide or update any overlays
1363 * which exist on top of the updated region.
1364 * 1725 *
1365 * The SVGA device is guaranteed to finish reading from the GMRFB 1726 * The SVGA device is guaranteed to finish reading from the GMRFB
1366 * by the time any subsequent FENCE commands are reached. 1727 * by the time any subsequent FENCE commands are reached.
@@ -1373,46 +1734,27 @@ struct {
1373 */ 1734 */
1374 1735
1375typedef 1736typedef
1737#include "vmware_pack_begin.h"
1376struct { 1738struct {
1377 SVGASignedPoint srcOrigin; 1739 SVGASignedPoint srcOrigin;
1378 SVGASignedRect destRect; 1740 SVGASignedRect destRect;
1379 uint32 destScreenId; 1741 uint32 destScreenId;
1380} SVGAFifoCmdBlitGMRFBToScreen; 1742}
1743#include "vmware_pack_end.h"
1744SVGAFifoCmdBlitGMRFBToScreen;
1381 1745
1382 1746
1383/* 1747/*
1384 * SVGA_CMD_BLIT_SCREEN_TO_GMRFB -- 1748 * SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
1385 * 1749 *
1386 * This is a host-to-guest blit. It performs a DMA operation to 1750 * This is a host-to-guest blit. It performs a DMA operation to
1387 * copy a rectangular region of pixels from a single Screen Object 1751 * copy a rectangular region of pixels from a single ScreenObject
1388 * back to the current GMRFB. 1752 * back to the current GMRFB.
1389 * 1753 *
1390 * Usage note: This command should be used rarely. It will
1391 * typically be inefficient, but it is necessary for some types of
1392 * synchronization between 3D (GPU) and 2D (CPU) rendering into
1393 * overlapping areas of a screen.
1394 *
1395 * The source coordinate is specified relative to a screen's 1754 * The source coordinate is specified relative to a screen's
1396 * origin. The provided screen ID must be valid. If any parameters 1755 * origin. The provided screen ID must be valid. If any parameters
1397 * are invalid, the resulting pixel values are undefined. 1756 * are invalid, the resulting pixel values are undefined.
1398 * 1757 *
1399 * This command reads the screen's "base layer". Overlays like
1400 * video and cursor are not included, but any data which was sent
1401 * using a blit-to-screen primitive will be available, no matter
1402 * whether the data's original source was the GMRFB or the 3D
1403 * acceleration hardware.
1404 *
1405 * Note that our guest-to-host blits and host-to-guest blits aren't
1406 * symmetric in their current implementation. While the parameters
1407 * are identical, host-to-guest blits are a lot less featureful.
1408 * They do not support clipping: If the source parameters don't
1409 * fully fit within a screen, the blit fails. They must originate
1410 * from exactly one screen. Virtual coordinates are not directly
1411 * supported.
1412 *
1413 * Host-to-guest blits do support the same set of GMRFB formats
1414 * offered by guest-to-host blits.
1415 *
1416 * The SVGA device is guaranteed to finish writing to the GMRFB by 1758 * The SVGA device is guaranteed to finish writing to the GMRFB by
1417 * the time any subsequent FENCE commands are reached. 1759 * the time any subsequent FENCE commands are reached.
1418 * 1760 *
@@ -1421,77 +1763,57 @@ struct {
1421 */ 1763 */
1422 1764
1423typedef 1765typedef
1766#include "vmware_pack_begin.h"
1424struct { 1767struct {
1425 SVGASignedPoint destOrigin; 1768 SVGASignedPoint destOrigin;
1426 SVGASignedRect srcRect; 1769 SVGASignedRect srcRect;
1427 uint32 srcScreenId; 1770 uint32 srcScreenId;
1428} SVGAFifoCmdBlitScreenToGMRFB; 1771}
1772#include "vmware_pack_end.h"
1773SVGAFifoCmdBlitScreenToGMRFB;
1429 1774
1430 1775
1431/* 1776/*
1432 * SVGA_CMD_ANNOTATION_FILL -- 1777 * SVGA_CMD_ANNOTATION_FILL --
1433 * 1778 *
1434 * This is a blit annotation. This command stores a small piece of 1779 * The annotation commands have been deprecated, should not be used
1435 * device state which is consumed by the next blit-to-screen 1780 * by new drivers. They used to provide performance hints to the SVGA
1436 * command. The state is only cleared by commands which are 1781 * device about the content of screen updates, but newer SVGA devices
1437 * specifically documented as consuming an annotation. Other 1782 * ignore these.
1438 * commands (such as ESCAPEs for debugging) may intervene between
1439 * the annotation and its associated blit.
1440 *
1441 * This annotation is a promise about the contents of the next
1442 * blit: The video driver is guaranteeing that all pixels in that
1443 * blit will have the same value, specified here as a color in
1444 * SVGAColorBGRX format.
1445 *
1446 * The SVGA device can still render the blit correctly even if it
1447 * ignores this annotation, but the annotation may allow it to
1448 * perform the blit more efficiently, for example by ignoring the
1449 * source data and performing a fill in hardware.
1450 *
1451 * This annotation is most important for performance when the
1452 * user's display is being remoted over a network connection.
1453 * 1783 *
1454 * Availability: 1784 * Availability:
1455 * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 1785 * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
1456 */ 1786 */
1457 1787
1458typedef 1788typedef
1789#include "vmware_pack_begin.h"
1459struct { 1790struct {
1460 SVGAColorBGRX color; 1791 SVGAColorBGRX color;
1461} SVGAFifoCmdAnnotationFill; 1792}
1793#include "vmware_pack_end.h"
1794SVGAFifoCmdAnnotationFill;
1462 1795
1463 1796
1464/* 1797/*
1465 * SVGA_CMD_ANNOTATION_COPY -- 1798 * SVGA_CMD_ANNOTATION_COPY --
1466 * 1799 *
1467 * This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more 1800 * The annotation commands have been deprecated, should not be used
1468 * information about annotations. 1801 * by new drivers. They used to provide performance hints to the SVGA
1469 * 1802 * device about the content of screen updates, but newer SVGA devices
1470 * This annotation is a promise about the contents of the next 1803 * ignore these.
1471 * blit: The video driver is guaranteeing that all pixels in that
1472 * blit will have the same value as those which already exist at an
1473 * identically-sized region on the same or a different screen.
1474 *
1475 * Note that the source pixels for the COPY in this annotation are
1476 * sampled before applying the anqnotation's associated blit. They
1477 * are allowed to overlap with the blit's destination pixels.
1478 *
1479 * The copy source rectangle is specified the same way as the blit
1480 * destination: it can be a rectangle which spans zero or more
1481 * screens, specified relative to either a screen or to the virtual
1482 * coordinate system's origin. If the source rectangle includes
1483 * pixels which are not from exactly one screen, the results are
1484 * undefined.
1485 * 1804 *
1486 * Availability: 1805 * Availability:
1487 * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 1806 * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
1488 */ 1807 */
1489 1808
1490typedef 1809typedef
1810#include "vmware_pack_begin.h"
1491struct { 1811struct {
1492 SVGASignedPoint srcOrigin; 1812 SVGASignedPoint srcOrigin;
1493 uint32 srcScreenId; 1813 uint32 srcScreenId;
1494} SVGAFifoCmdAnnotationCopy; 1814}
1815#include "vmware_pack_end.h"
1816SVGAFifoCmdAnnotationCopy;
1495 1817
1496 1818
1497/* 1819/*
@@ -1504,10 +1826,13 @@ struct {
1504 */ 1826 */
1505 1827
1506typedef 1828typedef
1829#include "vmware_pack_begin.h"
1507struct { 1830struct {
1508 uint32 gmrId; 1831 uint32 gmrId;
1509 uint32 numPages; 1832 uint32 numPages;
1510} SVGAFifoCmdDefineGMR2; 1833}
1834#include "vmware_pack_end.h"
1835SVGAFifoCmdDefineGMR2;
1511 1836
1512 1837
1513/* 1838/*
@@ -1546,6 +1871,7 @@ typedef enum {
1546} SVGARemapGMR2Flags; 1871} SVGARemapGMR2Flags;
1547 1872
1548typedef 1873typedef
1874#include "vmware_pack_begin.h"
1549struct { 1875struct {
1550 uint32 gmrId; 1876 uint32 gmrId;
1551 SVGARemapGMR2Flags flags; 1877 SVGARemapGMR2Flags flags;
@@ -1559,6 +1885,52 @@ struct {
1559 * (according to flag SVGA_REMAP_GMR2_PPN64) follows. If flag 1885 * (according to flag SVGA_REMAP_GMR2_PPN64) follows. If flag
1560 * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry. 1886 * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
1561 */ 1887 */
1562} SVGAFifoCmdRemapGMR2; 1888}
1889#include "vmware_pack_end.h"
1890SVGAFifoCmdRemapGMR2;
1891
1892
1893/*
1894 * Size of SVGA device memory such as frame buffer and FIFO.
1895 */
1896#define SVGA_VRAM_MIN_SIZE (4 * 640 * 480) /* bytes */
1897#define SVGA_VRAM_MIN_SIZE_3D (16 * 1024 * 1024)
1898#define SVGA_VRAM_MAX_SIZE (128 * 1024 * 1024)
1899#define SVGA_MEMORY_SIZE_MAX (1024 * 1024 * 1024)
1900#define SVGA_FIFO_SIZE_MAX (2 * 1024 * 1024)
1901#define SVGA_GRAPHICS_MEMORY_KB_MIN (32 * 1024)
1902#define SVGA_GRAPHICS_MEMORY_KB_MAX (2 * 1024 * 1024)
1903#define SVGA_GRAPHICS_MEMORY_KB_DEFAULT (256 * 1024)
1904
1905#define SVGA_VRAM_SIZE_W2K (64 * 1024 * 1024) /* 64 MB */
1906
1907/*
1908 * To simplify autoDetect display configuration, support a minimum of
1909 * two 1920x1200 monitors, 32bpp, side-by-side, optionally rotated:
1910 * numDisplays = 2
1911 * maxWidth = numDisplay * 1920 = 3840
1912 * maxHeight = rotated width of single monitor = 1920
1913 * vramSize = maxWidth * maxHeight * 4 = 29491200
1914 */
1915#define SVGA_VRAM_SIZE_AUTODETECT (32 * 1024 * 1024)
1916
1917#if defined(VMX86_SERVER)
1918#define SVGA_VRAM_SIZE (4 * 1024 * 1024)
1919#define SVGA_VRAM_SIZE_3D (64 * 1024 * 1024)
1920#define SVGA_FIFO_SIZE (256 * 1024)
1921#define SVGA_FIFO_SIZE_3D (516 * 1024)
1922#define SVGA_MEMORY_SIZE_DEFAULT (160 * 1024 * 1024)
1923#define SVGA_AUTODETECT_DEFAULT FALSE
1924#else
1925#define SVGA_VRAM_SIZE (16 * 1024 * 1024)
1926#define SVGA_VRAM_SIZE_3D SVGA_VRAM_MAX_SIZE
1927#define SVGA_FIFO_SIZE (2 * 1024 * 1024)
1928#define SVGA_FIFO_SIZE_3D SVGA_FIFO_SIZE
1929#define SVGA_MEMORY_SIZE_DEFAULT (768 * 1024 * 1024)
1930#define SVGA_AUTODETECT_DEFAULT TRUE
1931#endif
1932
1933#define SVGA_FIFO_SIZE_GBOBJECTS (256 * 1024)
1934#define SVGA_VRAM_SIZE_GBOBJECTS (4 * 1024 * 1024)
1563 1935
1564#endif 1936#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
new file mode 100644
index 000000000000..2e8ba4df8de9
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
@@ -0,0 +1,46 @@
1/**********************************************************
2 * Copyright 2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25#ifndef _VM_BASIC_TYPES_H_
26#define _VM_BASIC_TYPES_H_
27#include <linux/kernel.h>
28
29typedef u32 uint32;
30typedef s32 int32;
31typedef u64 uint64;
32typedef u16 uint16;
33typedef s16 int16;
34typedef u8 uint8;
35typedef s8 int8;
36
37typedef uint64 PA;
38typedef uint32 PPN;
39typedef uint64 PPN64;
40
41typedef bool Bool;
42
43#define MAX_UINT32 U32_MAX
44#define MAX_UINT16 U16_MAX
45
46#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
new file mode 100644
index 000000000000..120eab830eaf
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
@@ -0,0 +1,21 @@
1#ifndef _VM_BASIC_TYPES_H_
2#define _VM_BASIC_TYPES_H_
3#include <linux/kernel.h>
4
5typedef u32 uint32;
6typedef s32 int32;
7typedef u64 uint64;
8typedef u16 uint16;
9typedef s16 int16;
10typedef u8 uint8;
11typedef s8 int8;
12
13typedef uint64 PA;
14typedef uint32 PPN;
15typedef uint64 PPN64;
16
17typedef bool Bool;
18
19#define MAX_UINT32 U32_MAX
20
21#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
new file mode 100644
index 000000000000..7e7b0ce34aa2
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
@@ -0,0 +1,25 @@
1/**********************************************************
2 * Copyright 2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25#include <linux/compiler.h>
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
new file mode 100644
index 000000000000..e2e440ed3d44
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
@@ -0,0 +1,25 @@
1/**********************************************************
2 * Copyright 2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25__packed
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
deleted file mode 100644
index f58dc7dd15c5..000000000000
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ /dev/null
@@ -1,2627 +0,0 @@
1/**********************************************************
2 * Copyright 1998-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_reg.h --
28 *
29 * SVGA 3D hardware definitions
30 */
31
32#ifndef _SVGA3D_REG_H_
33#define _SVGA3D_REG_H_
34
35#include "svga_reg.h"
36
37typedef uint32 PPN;
38typedef __le64 PPN64;
39
40/*
41 * 3D Hardware Version
42 *
43 * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
44 * register. Is set by the host and read by the guest. This lets
45 * us make new guest drivers which are backwards-compatible with old
46 * SVGA hardware revisions. It does not let us support old guest
47 * drivers. Good enough for now.
48 *
49 */
50
51#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
52#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
53#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
54
55typedef enum {
56 SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
57 SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
58 SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
59 SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
60 SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
61 SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
62 SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1),
63 SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1,
64} SVGA3dHardwareVersion;
65
66/*
67 * Generic Types
68 */
69
70typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
71#define SVGA3D_NUM_CLIPPLANES 6
72#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8
73#define SVGA3D_MAX_CONTEXT_IDS 256
74#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
75
76#define SVGA3D_NUM_TEXTURE_UNITS 32
77#define SVGA3D_NUM_LIGHTS 8
78
79/*
80 * Surface formats.
81 *
82 * If you modify this list, be sure to keep GLUtil.c in sync. It
83 * includes the internal format definition of each surface in
84 * GLUtil_ConvertSurfaceFormat, and it contains a table of
85 * human-readable names in GLUtil_GetFormatName.
86 */
87
88typedef enum SVGA3dSurfaceFormat {
89 SVGA3D_FORMAT_MIN = 0,
90 SVGA3D_FORMAT_INVALID = 0,
91
92 SVGA3D_X8R8G8B8 = 1,
93 SVGA3D_A8R8G8B8 = 2,
94
95 SVGA3D_R5G6B5 = 3,
96 SVGA3D_X1R5G5B5 = 4,
97 SVGA3D_A1R5G5B5 = 5,
98 SVGA3D_A4R4G4B4 = 6,
99
100 SVGA3D_Z_D32 = 7,
101 SVGA3D_Z_D16 = 8,
102 SVGA3D_Z_D24S8 = 9,
103 SVGA3D_Z_D15S1 = 10,
104
105 SVGA3D_LUMINANCE8 = 11,
106 SVGA3D_LUMINANCE4_ALPHA4 = 12,
107 SVGA3D_LUMINANCE16 = 13,
108 SVGA3D_LUMINANCE8_ALPHA8 = 14,
109
110 SVGA3D_DXT1 = 15,
111 SVGA3D_DXT2 = 16,
112 SVGA3D_DXT3 = 17,
113 SVGA3D_DXT4 = 18,
114 SVGA3D_DXT5 = 19,
115
116 SVGA3D_BUMPU8V8 = 20,
117 SVGA3D_BUMPL6V5U5 = 21,
118 SVGA3D_BUMPX8L8V8U8 = 22,
119 SVGA3D_BUMPL8V8U8 = 23,
120
121 SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
122 SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
123
124 SVGA3D_A2R10G10B10 = 26,
125
126 /* signed formats */
127 SVGA3D_V8U8 = 27,
128 SVGA3D_Q8W8V8U8 = 28,
129 SVGA3D_CxV8U8 = 29,
130
131 /* mixed formats */
132 SVGA3D_X8L8V8U8 = 30,
133 SVGA3D_A2W10V10U10 = 31,
134
135 SVGA3D_ALPHA8 = 32,
136
137 /* Single- and dual-component floating point formats */
138 SVGA3D_R_S10E5 = 33,
139 SVGA3D_R_S23E8 = 34,
140 SVGA3D_RG_S10E5 = 35,
141 SVGA3D_RG_S23E8 = 36,
142
143 SVGA3D_BUFFER = 37,
144
145 SVGA3D_Z_D24X8 = 38,
146
147 SVGA3D_V16U16 = 39,
148
149 SVGA3D_G16R16 = 40,
150 SVGA3D_A16B16G16R16 = 41,
151
152 /* Packed Video formats */
153 SVGA3D_UYVY = 42,
154 SVGA3D_YUY2 = 43,
155
156 /* Planar video formats */
157 SVGA3D_NV12 = 44,
158
159 /* Video format with alpha */
160 SVGA3D_AYUV = 45,
161
162 SVGA3D_R32G32B32A32_TYPELESS = 46,
163 SVGA3D_R32G32B32A32_FLOAT = 25,
164 SVGA3D_R32G32B32A32_UINT = 47,
165 SVGA3D_R32G32B32A32_SINT = 48,
166 SVGA3D_R32G32B32_TYPELESS = 49,
167 SVGA3D_R32G32B32_FLOAT = 50,
168 SVGA3D_R32G32B32_UINT = 51,
169 SVGA3D_R32G32B32_SINT = 52,
170 SVGA3D_R16G16B16A16_TYPELESS = 53,
171 SVGA3D_R16G16B16A16_FLOAT = 24,
172 SVGA3D_R16G16B16A16_UNORM = 41,
173 SVGA3D_R16G16B16A16_UINT = 54,
174 SVGA3D_R16G16B16A16_SNORM = 55,
175 SVGA3D_R16G16B16A16_SINT = 56,
176 SVGA3D_R32G32_TYPELESS = 57,
177 SVGA3D_R32G32_FLOAT = 36,
178 SVGA3D_R32G32_UINT = 58,
179 SVGA3D_R32G32_SINT = 59,
180 SVGA3D_R32G8X24_TYPELESS = 60,
181 SVGA3D_D32_FLOAT_S8X24_UINT = 61,
182 SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62,
183 SVGA3D_X32_TYPELESS_G8X24_UINT = 63,
184 SVGA3D_R10G10B10A2_TYPELESS = 64,
185 SVGA3D_R10G10B10A2_UNORM = 26,
186 SVGA3D_R10G10B10A2_UINT = 65,
187 SVGA3D_R11G11B10_FLOAT = 66,
188 SVGA3D_R8G8B8A8_TYPELESS = 67,
189 SVGA3D_R8G8B8A8_UNORM = 68,
190 SVGA3D_R8G8B8A8_UNORM_SRGB = 69,
191 SVGA3D_R8G8B8A8_UINT = 70,
192 SVGA3D_R8G8B8A8_SNORM = 28,
193 SVGA3D_R8G8B8A8_SINT = 71,
194 SVGA3D_R16G16_TYPELESS = 72,
195 SVGA3D_R16G16_FLOAT = 35,
196 SVGA3D_R16G16_UNORM = 40,
197 SVGA3D_R16G16_UINT = 73,
198 SVGA3D_R16G16_SNORM = 39,
199 SVGA3D_R16G16_SINT = 74,
200 SVGA3D_R32_TYPELESS = 75,
201 SVGA3D_D32_FLOAT = 76,
202 SVGA3D_R32_FLOAT = 34,
203 SVGA3D_R32_UINT = 77,
204 SVGA3D_R32_SINT = 78,
205 SVGA3D_R24G8_TYPELESS = 79,
206 SVGA3D_D24_UNORM_S8_UINT = 80,
207 SVGA3D_R24_UNORM_X8_TYPELESS = 81,
208 SVGA3D_X24_TYPELESS_G8_UINT = 82,
209 SVGA3D_R8G8_TYPELESS = 83,
210 SVGA3D_R8G8_UNORM = 84,
211 SVGA3D_R8G8_UINT = 85,
212 SVGA3D_R8G8_SNORM = 27,
213 SVGA3D_R8G8_SINT = 86,
214 SVGA3D_R16_TYPELESS = 87,
215 SVGA3D_R16_FLOAT = 33,
216 SVGA3D_D16_UNORM = 8,
217 SVGA3D_R16_UNORM = 88,
218 SVGA3D_R16_UINT = 89,
219 SVGA3D_R16_SNORM = 90,
220 SVGA3D_R16_SINT = 91,
221 SVGA3D_R8_TYPELESS = 92,
222 SVGA3D_R8_UNORM = 93,
223 SVGA3D_R8_UINT = 94,
224 SVGA3D_R8_SNORM = 95,
225 SVGA3D_R8_SINT = 96,
226 SVGA3D_A8_UNORM = 32,
227 SVGA3D_R1_UNORM = 97,
228 SVGA3D_R9G9B9E5_SHAREDEXP = 98,
229 SVGA3D_R8G8_B8G8_UNORM = 99,
230 SVGA3D_G8R8_G8B8_UNORM = 100,
231 SVGA3D_BC1_TYPELESS = 101,
232 SVGA3D_BC1_UNORM = 15,
233 SVGA3D_BC1_UNORM_SRGB = 102,
234 SVGA3D_BC2_TYPELESS = 103,
235 SVGA3D_BC2_UNORM = 17,
236 SVGA3D_BC2_UNORM_SRGB = 104,
237 SVGA3D_BC3_TYPELESS = 105,
238 SVGA3D_BC3_UNORM = 19,
239 SVGA3D_BC3_UNORM_SRGB = 106,
240 SVGA3D_BC4_TYPELESS = 107,
241 SVGA3D_BC4_UNORM = 108,
242 SVGA3D_BC4_SNORM = 109,
243 SVGA3D_BC5_TYPELESS = 110,
244 SVGA3D_BC5_UNORM = 111,
245 SVGA3D_BC5_SNORM = 112,
246 SVGA3D_B5G6R5_UNORM = 3,
247 SVGA3D_B5G5R5A1_UNORM = 5,
248 SVGA3D_B8G8R8A8_UNORM = 2,
249 SVGA3D_B8G8R8X8_UNORM = 1,
250 SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113,
251 SVGA3D_B8G8R8A8_TYPELESS = 114,
252 SVGA3D_B8G8R8A8_UNORM_SRGB = 115,
253 SVGA3D_B8G8R8X8_TYPELESS = 116,
254 SVGA3D_B8G8R8X8_UNORM_SRGB = 117,
255
256 /* Advanced D3D9 depth formats. */
257 SVGA3D_Z_DF16 = 118,
258 SVGA3D_Z_DF24 = 119,
259 SVGA3D_Z_D24S8_INT = 120,
260
261 /* Planar video formats. */
262 SVGA3D_YV12 = 121,
263
264 SVGA3D_FORMAT_MAX = 122,
265} SVGA3dSurfaceFormat;
266
267typedef uint32 SVGA3dColor; /* a, r, g, b */
268
269/*
270 * These match the D3DFORMAT_OP definitions used by Direct3D. We need
271 * them so that we can query the host for what the supported surface
272 * operations are (when we're using the D3D backend, in particular),
273 * and so we can send those operations to the guest.
274 */
275typedef enum {
276 SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
277 SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
278 SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004,
279 SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008,
280 SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010,
281 SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040,
282 SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080,
283
284/*
285 * This format can be used as a render target if the current display mode
286 * is the same depth if the alpha channel is ignored. e.g. if the device
287 * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
288 * format op list entry for A8R8G8B8 should have this cap.
289 */
290 SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100,
291
292/*
293 * This format contains DirectDraw support (including Flip). This flag
294 * should not to be set on alpha formats.
295 */
296 SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400,
297
298/*
299 * The rasterizer can support some level of Direct3D support in this format
300 * and implies that the driver can create a Context in this mode (for some
301 * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
302 * flag must also be set.
303 */
304 SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800,
305
306/*
307 * This is set for a private format when the driver has put the bpp in
308 * the structure.
309 */
310 SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000,
311
312/*
313 * Indicates that this format can be converted to any RGB format for which
314 * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
315 */
316 SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
317
318/*
319 * Indicates that this format can be used to create offscreen plain surfaces.
320 */
321 SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000,
322
323/*
324 * Indicated that this format can be read as an SRGB texture (meaning that the
325 * sampler will linearize the looked up data)
326 */
327 SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
328
329/*
330 * Indicates that this format can be used in the bumpmap instructions
331 */
332 SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
333
334/*
335 * Indicates that this format can be sampled by the displacement map sampler
336 */
337 SVGA3DFORMAT_OP_DMAP = 0x00020000,
338
339/*
340 * Indicates that this format cannot be used with texture filtering
341 */
342 SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
343
344/*
345 * Indicates that format conversions are supported to this RGB format if
346 * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
347 */
348 SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000,
349
350/*
351 * Indicated that this format can be written as an SRGB target (meaning that the
352 * pixel pipe will DE-linearize data on output to format)
353 */
354 SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
355
356/*
357 * Indicates that this format cannot be used with alpha blending
358 */
359 SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
360
361/*
362 * Indicates that the device can auto-generated sublevels for resources
363 * of this format
364 */
365 SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
366
367/*
368 * Indicates that this format can be used by vertex texture sampler
369 */
370 SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
371
372/*
373 * Indicates that this format supports neither texture coordinate wrap
374 * modes, nor mipmapping
375 */
376 SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000
377} SVGA3dFormatOp;
378
379/*
380 * This structure is a conversion of SVGA3DFORMAT_OP_*.
381 * Entries must be located at the same position.
382 */
383typedef union {
384 uint32 value;
385 struct {
386 uint32 texture : 1;
387 uint32 volumeTexture : 1;
388 uint32 cubeTexture : 1;
389 uint32 offscreenRenderTarget : 1;
390 uint32 sameFormatRenderTarget : 1;
391 uint32 unknown1 : 1;
392 uint32 zStencil : 1;
393 uint32 zStencilArbitraryDepth : 1;
394 uint32 sameFormatUpToAlpha : 1;
395 uint32 unknown2 : 1;
396 uint32 displayMode : 1;
397 uint32 acceleration3d : 1;
398 uint32 pixelSize : 1;
399 uint32 convertToARGB : 1;
400 uint32 offscreenPlain : 1;
401 uint32 sRGBRead : 1;
402 uint32 bumpMap : 1;
403 uint32 dmap : 1;
404 uint32 noFilter : 1;
405 uint32 memberOfGroupARGB : 1;
406 uint32 sRGBWrite : 1;
407 uint32 noAlphaBlend : 1;
408 uint32 autoGenMipMap : 1;
409 uint32 vertexTexture : 1;
410 uint32 noTexCoordWrapNorMip : 1;
411 };
412} SVGA3dSurfaceFormatCaps;
413
414/*
415 * SVGA_3D_CMD_SETRENDERSTATE Types. All value types
416 * must fit in a uint32.
417 */
418
419typedef enum {
420 SVGA3D_RS_INVALID = 0,
421 SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */
422 SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */
423 SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */
424 SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */
425 SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */
426 SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */
427 SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */
428 SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */
429 SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */
430 SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */
431 SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */
432 SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */
433 SVGA3D_RS_STENCILREF = 13, /* uint32 */
434 SVGA3D_RS_STENCILMASK = 14, /* uint32 */
435 SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */
436 SVGA3D_RS_FOGSTART = 16, /* float */
437 SVGA3D_RS_FOGEND = 17, /* float */
438 SVGA3D_RS_FOGDENSITY = 18, /* float */
439 SVGA3D_RS_POINTSIZE = 19, /* float */
440 SVGA3D_RS_POINTSIZEMIN = 20, /* float */
441 SVGA3D_RS_POINTSIZEMAX = 21, /* float */
442 SVGA3D_RS_POINTSCALE_A = 22, /* float */
443 SVGA3D_RS_POINTSCALE_B = 23, /* float */
444 SVGA3D_RS_POINTSCALE_C = 24, /* float */
445 SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */
446 SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */
447 SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */
448 SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */
449 SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */
450 SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */
451 SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */
452 SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */
453 SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */
454 SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */
455 SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */
456 SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */
457 SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */
458 SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */
459 SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */
460 SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */
461 SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */
462 SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */
463 SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */
464 SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */
465 SVGA3D_RS_ZBIAS = 45, /* float */
466 SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */
467 SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */
468 SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */
469 SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */
470 SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */
471 SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */
472 SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */
473 SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */
474 SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */
475 SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */
476 SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */
477 SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */
478 SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */
479 SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */
480 SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */
481 SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */
482 SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */
483 SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */
484 SVGA3D_RS_DEPTHBIAS = 64, /* float */
485
486
487 /*
488 * Output Gamma Level
489 *
490 * Output gamma effects the gamma curve of colors that are output from the
491 * rendering pipeline. A value of 1.0 specifies a linear color space. If the
492 * value is <= 0.0, gamma correction is ignored and linear color space is
493 * used.
494 */
495
496 SVGA3D_RS_OUTPUTGAMMA = 65, /* float */
497 SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */
498 SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */
499 SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */
500 SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */
501 SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */
502 SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */
503 SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */
504 SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */
505 SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */
506 SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */
507 SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */
508 SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */
509 SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */
510 SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */
511 SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */
512 SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */
513 SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */
514 SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */
515 SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */
516 SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */
517 SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */
518 SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */
519 SVGA3D_RS_TWEENFACTOR = 88, /* float */
520 SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */
521 SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */
522 SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */
523 SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */
524 SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */
525 SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
526 SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
527 SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
528 SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */
529 SVGA3D_RS_LINEAA = 98, /* SVGA3dBool */
530 SVGA3D_RS_LINEWIDTH = 99, /* float */
531 SVGA3D_RS_MAX
532} SVGA3dRenderStateName;
533
534typedef enum {
535 SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0,
536 SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1,
537 SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2,
538 SVGA3D_TRANSPARENCYANTIALIAS_MAX
539} SVGA3dTransparencyAntialiasType;
540
541typedef enum {
542 SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
543 SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
544 SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
545} SVGA3dVertexMaterial;
546
547typedef enum {
548 SVGA3D_FILLMODE_INVALID = 0,
549 SVGA3D_FILLMODE_POINT = 1,
550 SVGA3D_FILLMODE_LINE = 2,
551 SVGA3D_FILLMODE_FILL = 3,
552 SVGA3D_FILLMODE_MAX
553} SVGA3dFillModeType;
554
555
556typedef
557union {
558 struct {
559 uint16 mode; /* SVGA3dFillModeType */
560 uint16 face; /* SVGA3dFace */
561 };
562 uint32 uintValue;
563} SVGA3dFillMode;
564
565typedef enum {
566 SVGA3D_SHADEMODE_INVALID = 0,
567 SVGA3D_SHADEMODE_FLAT = 1,
568 SVGA3D_SHADEMODE_SMOOTH = 2,
569 SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */
570 SVGA3D_SHADEMODE_MAX
571} SVGA3dShadeMode;
572
573typedef
574union {
575 struct {
576 uint16 repeat;
577 uint16 pattern;
578 };
579 uint32 uintValue;
580} SVGA3dLinePattern;
581
582typedef enum {
583 SVGA3D_BLENDOP_INVALID = 0,
584 SVGA3D_BLENDOP_ZERO = 1,
585 SVGA3D_BLENDOP_ONE = 2,
586 SVGA3D_BLENDOP_SRCCOLOR = 3,
587 SVGA3D_BLENDOP_INVSRCCOLOR = 4,
588 SVGA3D_BLENDOP_SRCALPHA = 5,
589 SVGA3D_BLENDOP_INVSRCALPHA = 6,
590 SVGA3D_BLENDOP_DESTALPHA = 7,
591 SVGA3D_BLENDOP_INVDESTALPHA = 8,
592 SVGA3D_BLENDOP_DESTCOLOR = 9,
593 SVGA3D_BLENDOP_INVDESTCOLOR = 10,
594 SVGA3D_BLENDOP_SRCALPHASAT = 11,
595 SVGA3D_BLENDOP_BLENDFACTOR = 12,
596 SVGA3D_BLENDOP_INVBLENDFACTOR = 13,
597 SVGA3D_BLENDOP_MAX
598} SVGA3dBlendOp;
599
600typedef enum {
601 SVGA3D_BLENDEQ_INVALID = 0,
602 SVGA3D_BLENDEQ_ADD = 1,
603 SVGA3D_BLENDEQ_SUBTRACT = 2,
604 SVGA3D_BLENDEQ_REVSUBTRACT = 3,
605 SVGA3D_BLENDEQ_MINIMUM = 4,
606 SVGA3D_BLENDEQ_MAXIMUM = 5,
607 SVGA3D_BLENDEQ_MAX
608} SVGA3dBlendEquation;
609
610typedef enum {
611 SVGA3D_FRONTWINDING_INVALID = 0,
612 SVGA3D_FRONTWINDING_CW = 1,
613 SVGA3D_FRONTWINDING_CCW = 2,
614 SVGA3D_FRONTWINDING_MAX
615} SVGA3dFrontWinding;
616
617typedef enum {
618 SVGA3D_FACE_INVALID = 0,
619 SVGA3D_FACE_NONE = 1,
620 SVGA3D_FACE_FRONT = 2,
621 SVGA3D_FACE_BACK = 3,
622 SVGA3D_FACE_FRONT_BACK = 4,
623 SVGA3D_FACE_MAX
624} SVGA3dFace;
625
626/*
627 * The order and the values should not be changed
628 */
629
630typedef enum {
631 SVGA3D_CMP_INVALID = 0,
632 SVGA3D_CMP_NEVER = 1,
633 SVGA3D_CMP_LESS = 2,
634 SVGA3D_CMP_EQUAL = 3,
635 SVGA3D_CMP_LESSEQUAL = 4,
636 SVGA3D_CMP_GREATER = 5,
637 SVGA3D_CMP_NOTEQUAL = 6,
638 SVGA3D_CMP_GREATEREQUAL = 7,
639 SVGA3D_CMP_ALWAYS = 8,
640 SVGA3D_CMP_MAX
641} SVGA3dCmpFunc;
642
643/*
644 * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
645 * the fog factor to be specified in the alpha component of the specular
646 * (a.k.a. secondary) vertex color.
647 */
648typedef enum {
649 SVGA3D_FOGFUNC_INVALID = 0,
650 SVGA3D_FOGFUNC_EXP = 1,
651 SVGA3D_FOGFUNC_EXP2 = 2,
652 SVGA3D_FOGFUNC_LINEAR = 3,
653 SVGA3D_FOGFUNC_PER_VERTEX = 4
654} SVGA3dFogFunction;
655
656/*
657 * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
658 * or per-pixel basis.
659 */
660typedef enum {
661 SVGA3D_FOGTYPE_INVALID = 0,
662 SVGA3D_FOGTYPE_VERTEX = 1,
663 SVGA3D_FOGTYPE_PIXEL = 2,
664 SVGA3D_FOGTYPE_MAX = 3
665} SVGA3dFogType;
666
667/*
668 * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
669 * computed using the eye Z value of each pixel (or vertex), whereas range-
670 * based fog is computed using the actual distance (range) to the eye.
671 */
672typedef enum {
673 SVGA3D_FOGBASE_INVALID = 0,
674 SVGA3D_FOGBASE_DEPTHBASED = 1,
675 SVGA3D_FOGBASE_RANGEBASED = 2,
676 SVGA3D_FOGBASE_MAX = 3
677} SVGA3dFogBase;
678
679typedef enum {
680 SVGA3D_STENCILOP_INVALID = 0,
681 SVGA3D_STENCILOP_KEEP = 1,
682 SVGA3D_STENCILOP_ZERO = 2,
683 SVGA3D_STENCILOP_REPLACE = 3,
684 SVGA3D_STENCILOP_INCRSAT = 4,
685 SVGA3D_STENCILOP_DECRSAT = 5,
686 SVGA3D_STENCILOP_INVERT = 6,
687 SVGA3D_STENCILOP_INCR = 7,
688 SVGA3D_STENCILOP_DECR = 8,
689 SVGA3D_STENCILOP_MAX
690} SVGA3dStencilOp;
691
692typedef enum {
693 SVGA3D_CLIPPLANE_0 = (1 << 0),
694 SVGA3D_CLIPPLANE_1 = (1 << 1),
695 SVGA3D_CLIPPLANE_2 = (1 << 2),
696 SVGA3D_CLIPPLANE_3 = (1 << 3),
697 SVGA3D_CLIPPLANE_4 = (1 << 4),
698 SVGA3D_CLIPPLANE_5 = (1 << 5),
699} SVGA3dClipPlanes;
700
701typedef enum {
702 SVGA3D_CLEAR_COLOR = 0x1,
703 SVGA3D_CLEAR_DEPTH = 0x2,
704 SVGA3D_CLEAR_STENCIL = 0x4
705} SVGA3dClearFlag;
706
707typedef enum {
708 SVGA3D_RT_DEPTH = 0,
709 SVGA3D_RT_STENCIL = 1,
710 SVGA3D_RT_COLOR0 = 2,
711 SVGA3D_RT_COLOR1 = 3,
712 SVGA3D_RT_COLOR2 = 4,
713 SVGA3D_RT_COLOR3 = 5,
714 SVGA3D_RT_COLOR4 = 6,
715 SVGA3D_RT_COLOR5 = 7,
716 SVGA3D_RT_COLOR6 = 8,
717 SVGA3D_RT_COLOR7 = 9,
718 SVGA3D_RT_MAX,
719 SVGA3D_RT_INVALID = ((uint32)-1),
720} SVGA3dRenderTargetType;
721
722#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
723
724typedef
725union {
726 struct {
727 uint32 red : 1;
728 uint32 green : 1;
729 uint32 blue : 1;
730 uint32 alpha : 1;
731 };
732 uint32 uintValue;
733} SVGA3dColorMask;
734
735typedef enum {
736 SVGA3D_VBLEND_DISABLE = 0,
737 SVGA3D_VBLEND_1WEIGHT = 1,
738 SVGA3D_VBLEND_2WEIGHT = 2,
739 SVGA3D_VBLEND_3WEIGHT = 3,
740} SVGA3dVertexBlendFlags;
741
742typedef enum {
743 SVGA3D_WRAPCOORD_0 = 1 << 0,
744 SVGA3D_WRAPCOORD_1 = 1 << 1,
745 SVGA3D_WRAPCOORD_2 = 1 << 2,
746 SVGA3D_WRAPCOORD_3 = 1 << 3,
747 SVGA3D_WRAPCOORD_ALL = 0xF,
748} SVGA3dWrapFlags;
749
750/*
751 * SVGA_3D_CMD_TEXTURESTATE Types. All value types
752 * must fit in a uint32.
753 */
754
755typedef enum {
756 SVGA3D_TS_INVALID = 0,
757 SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */
758 SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */
759 SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */
760 SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */
761 SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */
762 SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */
763 SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */
764 SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */
765 SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */
766 SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */
767 SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */
768 SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */
769 SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */
770 SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */
771 SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */
772 SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */
773 SVGA3D_TS_BUMPENVMAT00 = 17, /* float */
774 SVGA3D_TS_BUMPENVMAT01 = 18, /* float */
775 SVGA3D_TS_BUMPENVMAT10 = 19, /* float */
776 SVGA3D_TS_BUMPENVMAT11 = 20, /* float */
777 SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */
778 SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */
779 SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */
780 SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */
781
782
783 /*
784 * Sampler Gamma Level
785 *
786 * Sampler gamma effects the color of samples taken from the sampler. A
787 * value of 1.0 will produce linear samples. If the value is <= 0.0 the
788 * gamma value is ignored and a linear space is used.
789 */
790
791 SVGA3D_TS_GAMMA = 25, /* float */
792 SVGA3D_TS_BUMPENVLSCALE = 26, /* float */
793 SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */
794 SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */
795 SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */
796 SVGA3D_TS_MAX
797} SVGA3dTextureStateName;
798
799typedef enum {
800 SVGA3D_TC_INVALID = 0,
801 SVGA3D_TC_DISABLE = 1,
802 SVGA3D_TC_SELECTARG1 = 2,
803 SVGA3D_TC_SELECTARG2 = 3,
804 SVGA3D_TC_MODULATE = 4,
805 SVGA3D_TC_ADD = 5,
806 SVGA3D_TC_ADDSIGNED = 6,
807 SVGA3D_TC_SUBTRACT = 7,
808 SVGA3D_TC_BLENDTEXTUREALPHA = 8,
809 SVGA3D_TC_BLENDDIFFUSEALPHA = 9,
810 SVGA3D_TC_BLENDCURRENTALPHA = 10,
811 SVGA3D_TC_BLENDFACTORALPHA = 11,
812 SVGA3D_TC_MODULATE2X = 12,
813 SVGA3D_TC_MODULATE4X = 13,
814 SVGA3D_TC_DSDT = 14,
815 SVGA3D_TC_DOTPRODUCT3 = 15,
816 SVGA3D_TC_BLENDTEXTUREALPHAPM = 16,
817 SVGA3D_TC_ADDSIGNED2X = 17,
818 SVGA3D_TC_ADDSMOOTH = 18,
819 SVGA3D_TC_PREMODULATE = 19,
820 SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20,
821 SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21,
822 SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
823 SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
824 SVGA3D_TC_BUMPENVMAPLUMINANCE = 24,
825 SVGA3D_TC_MULTIPLYADD = 25,
826 SVGA3D_TC_LERP = 26,
827 SVGA3D_TC_MAX
828} SVGA3dTextureCombiner;
829
830#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
831
832typedef enum {
833 SVGA3D_TEX_ADDRESS_INVALID = 0,
834 SVGA3D_TEX_ADDRESS_WRAP = 1,
835 SVGA3D_TEX_ADDRESS_MIRROR = 2,
836 SVGA3D_TEX_ADDRESS_CLAMP = 3,
837 SVGA3D_TEX_ADDRESS_BORDER = 4,
838 SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
839 SVGA3D_TEX_ADDRESS_EDGE = 6,
840 SVGA3D_TEX_ADDRESS_MAX
841} SVGA3dTextureAddress;
842
843/*
844 * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
845 * disabled, and the rasterizer should use the magnification filter instead.
846 */
847typedef enum {
848 SVGA3D_TEX_FILTER_NONE = 0,
849 SVGA3D_TEX_FILTER_NEAREST = 1,
850 SVGA3D_TEX_FILTER_LINEAR = 2,
851 SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
852 SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */
853 SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */
854 SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */
855 SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */
856 SVGA3D_TEX_FILTER_MAX
857} SVGA3dTextureFilter;
858
859typedef enum {
860 SVGA3D_TEX_TRANSFORM_OFF = 0,
861 SVGA3D_TEX_TRANSFORM_S = (1 << 0),
862 SVGA3D_TEX_TRANSFORM_T = (1 << 1),
863 SVGA3D_TEX_TRANSFORM_R = (1 << 2),
864 SVGA3D_TEX_TRANSFORM_Q = (1 << 3),
865 SVGA3D_TEX_PROJECTED = (1 << 15),
866} SVGA3dTexTransformFlags;
867
868typedef enum {
869 SVGA3D_TEXCOORD_GEN_OFF = 0,
870 SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1,
871 SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2,
872 SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
873 SVGA3D_TEXCOORD_GEN_SPHERE = 4,
874 SVGA3D_TEXCOORD_GEN_MAX
875} SVGA3dTextureCoordGen;
876
877/*
878 * Texture argument constants for texture combiner
879 */
880typedef enum {
881 SVGA3D_TA_INVALID = 0,
882 SVGA3D_TA_CONSTANT = 1,
883 SVGA3D_TA_PREVIOUS = 2,
884 SVGA3D_TA_DIFFUSE = 3,
885 SVGA3D_TA_TEXTURE = 4,
886 SVGA3D_TA_SPECULAR = 5,
887 SVGA3D_TA_MAX
888} SVGA3dTextureArgData;
889
890#define SVGA3D_TM_MASK_LEN 4
891
892/* Modifiers for texture argument constants defined above. */
893typedef enum {
894 SVGA3D_TM_NONE = 0,
895 SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN),
896 SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN),
897} SVGA3dTextureArgModifier;
898
899#define SVGA3D_INVALID_ID ((uint32)-1)
900#define SVGA3D_MAX_CLIP_PLANES 6
901
902/*
903 * This is the limit to the number of fixed-function texture
904 * transforms and texture coordinates we can support. It does *not*
905 * correspond to the number of texture image units (samplers) we
906 * support!
907 */
908#define SVGA3D_MAX_TEXTURE_COORDS 8
909
910/*
911 * Vertex declarations
912 *
913 * Notes:
914 *
915 * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
916 * draw with any POSITIONT vertex arrays, the programmable vertex
917 * pipeline will be implicitly disabled. Drawing will take place as if
918 * no vertex shader was bound.
919 */
920
921typedef enum {
922 SVGA3D_DECLUSAGE_POSITION = 0,
923 SVGA3D_DECLUSAGE_BLENDWEIGHT, /* 1 */
924 SVGA3D_DECLUSAGE_BLENDINDICES, /* 2 */
925 SVGA3D_DECLUSAGE_NORMAL, /* 3 */
926 SVGA3D_DECLUSAGE_PSIZE, /* 4 */
927 SVGA3D_DECLUSAGE_TEXCOORD, /* 5 */
928 SVGA3D_DECLUSAGE_TANGENT, /* 6 */
929 SVGA3D_DECLUSAGE_BINORMAL, /* 7 */
930 SVGA3D_DECLUSAGE_TESSFACTOR, /* 8 */
931 SVGA3D_DECLUSAGE_POSITIONT, /* 9 */
932 SVGA3D_DECLUSAGE_COLOR, /* 10 */
933 SVGA3D_DECLUSAGE_FOG, /* 11 */
934 SVGA3D_DECLUSAGE_DEPTH, /* 12 */
935 SVGA3D_DECLUSAGE_SAMPLE, /* 13 */
936 SVGA3D_DECLUSAGE_MAX
937} SVGA3dDeclUsage;
938
939typedef enum {
940 SVGA3D_DECLMETHOD_DEFAULT = 0,
941 SVGA3D_DECLMETHOD_PARTIALU,
942 SVGA3D_DECLMETHOD_PARTIALV,
943 SVGA3D_DECLMETHOD_CROSSUV, /* Normal */
944 SVGA3D_DECLMETHOD_UV,
945 SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */
946 SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement map */
947} SVGA3dDeclMethod;
948
949typedef enum {
950 SVGA3D_DECLTYPE_FLOAT1 = 0,
951 SVGA3D_DECLTYPE_FLOAT2 = 1,
952 SVGA3D_DECLTYPE_FLOAT3 = 2,
953 SVGA3D_DECLTYPE_FLOAT4 = 3,
954 SVGA3D_DECLTYPE_D3DCOLOR = 4,
955 SVGA3D_DECLTYPE_UBYTE4 = 5,
956 SVGA3D_DECLTYPE_SHORT2 = 6,
957 SVGA3D_DECLTYPE_SHORT4 = 7,
958 SVGA3D_DECLTYPE_UBYTE4N = 8,
959 SVGA3D_DECLTYPE_SHORT2N = 9,
960 SVGA3D_DECLTYPE_SHORT4N = 10,
961 SVGA3D_DECLTYPE_USHORT2N = 11,
962 SVGA3D_DECLTYPE_USHORT4N = 12,
963 SVGA3D_DECLTYPE_UDEC3 = 13,
964 SVGA3D_DECLTYPE_DEC3N = 14,
965 SVGA3D_DECLTYPE_FLOAT16_2 = 15,
966 SVGA3D_DECLTYPE_FLOAT16_4 = 16,
967 SVGA3D_DECLTYPE_MAX,
968} SVGA3dDeclType;
969
970/*
971 * This structure is used for the divisor for geometry instancing;
972 * it's a direct translation of the Direct3D equivalent.
973 */
974typedef union {
975 struct {
976 /*
977 * For index data, this number represents the number of instances to draw.
978 * For instance data, this number represents the number of
979 * instances/vertex in this stream
980 */
981 uint32 count : 30;
982
983 /*
984 * This is 1 if this is supposed to be the data that is repeated for
985 * every instance.
986 */
987 uint32 indexedData : 1;
988
989 /*
990 * This is 1 if this is supposed to be the per-instance data.
991 */
992 uint32 instanceData : 1;
993 };
994
995 uint32 value;
996} SVGA3dVertexDivisor;
997
998typedef enum {
999 SVGA3D_PRIMITIVE_INVALID = 0,
1000 SVGA3D_PRIMITIVE_TRIANGLELIST = 1,
1001 SVGA3D_PRIMITIVE_POINTLIST = 2,
1002 SVGA3D_PRIMITIVE_LINELIST = 3,
1003 SVGA3D_PRIMITIVE_LINESTRIP = 4,
1004 SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5,
1005 SVGA3D_PRIMITIVE_TRIANGLEFAN = 6,
1006 SVGA3D_PRIMITIVE_MAX
1007} SVGA3dPrimitiveType;
1008
1009typedef enum {
1010 SVGA3D_COORDINATE_INVALID = 0,
1011 SVGA3D_COORDINATE_LEFTHANDED = 1,
1012 SVGA3D_COORDINATE_RIGHTHANDED = 2,
1013 SVGA3D_COORDINATE_MAX
1014} SVGA3dCoordinateType;
1015
1016typedef enum {
1017 SVGA3D_TRANSFORM_INVALID = 0,
1018 SVGA3D_TRANSFORM_WORLD = 1,
1019 SVGA3D_TRANSFORM_VIEW = 2,
1020 SVGA3D_TRANSFORM_PROJECTION = 3,
1021 SVGA3D_TRANSFORM_TEXTURE0 = 4,
1022 SVGA3D_TRANSFORM_TEXTURE1 = 5,
1023 SVGA3D_TRANSFORM_TEXTURE2 = 6,
1024 SVGA3D_TRANSFORM_TEXTURE3 = 7,
1025 SVGA3D_TRANSFORM_TEXTURE4 = 8,
1026 SVGA3D_TRANSFORM_TEXTURE5 = 9,
1027 SVGA3D_TRANSFORM_TEXTURE6 = 10,
1028 SVGA3D_TRANSFORM_TEXTURE7 = 11,
1029 SVGA3D_TRANSFORM_WORLD1 = 12,
1030 SVGA3D_TRANSFORM_WORLD2 = 13,
1031 SVGA3D_TRANSFORM_WORLD3 = 14,
1032 SVGA3D_TRANSFORM_MAX
1033} SVGA3dTransformType;
1034
1035typedef enum {
1036 SVGA3D_LIGHTTYPE_INVALID = 0,
1037 SVGA3D_LIGHTTYPE_POINT = 1,
1038 SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */
1039 SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */
1040 SVGA3D_LIGHTTYPE_DIRECTIONAL = 4,
1041 SVGA3D_LIGHTTYPE_MAX
1042} SVGA3dLightType;
1043
1044typedef enum {
1045 SVGA3D_CUBEFACE_POSX = 0,
1046 SVGA3D_CUBEFACE_NEGX = 1,
1047 SVGA3D_CUBEFACE_POSY = 2,
1048 SVGA3D_CUBEFACE_NEGY = 3,
1049 SVGA3D_CUBEFACE_POSZ = 4,
1050 SVGA3D_CUBEFACE_NEGZ = 5,
1051} SVGA3dCubeFace;
1052
1053typedef enum {
1054 SVGA3D_SHADERTYPE_INVALID = 0,
1055 SVGA3D_SHADERTYPE_MIN = 1,
1056 SVGA3D_SHADERTYPE_VS = 1,
1057 SVGA3D_SHADERTYPE_PS = 2,
1058 SVGA3D_SHADERTYPE_MAX = 3,
1059 SVGA3D_SHADERTYPE_GS = 3,
1060} SVGA3dShaderType;
1061
1062#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
1063
1064typedef enum {
1065 SVGA3D_CONST_TYPE_FLOAT = 0,
1066 SVGA3D_CONST_TYPE_INT = 1,
1067 SVGA3D_CONST_TYPE_BOOL = 2,
1068 SVGA3D_CONST_TYPE_MAX
1069} SVGA3dShaderConstType;
1070
1071#define SVGA3D_MAX_SURFACE_FACES 6
1072
1073typedef enum {
1074 SVGA3D_STRETCH_BLT_POINT = 0,
1075 SVGA3D_STRETCH_BLT_LINEAR = 1,
1076 SVGA3D_STRETCH_BLT_MAX
1077} SVGA3dStretchBltMode;
1078
1079typedef enum {
1080 SVGA3D_QUERYTYPE_OCCLUSION = 0,
1081 SVGA3D_QUERYTYPE_MAX
1082} SVGA3dQueryType;
1083
1084typedef enum {
1085 SVGA3D_QUERYSTATE_PENDING = 0, /* Waiting on the host (set by guest) */
1086 SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully (set by host) */
1087 SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully (set by host) */
1088 SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (For guest use only) */
1089} SVGA3dQueryState;
1090
1091typedef enum {
1092 SVGA3D_WRITE_HOST_VRAM = 1,
1093 SVGA3D_READ_HOST_VRAM = 2,
1094} SVGA3dTransferType;
1095
1096/*
1097 * The maximum number of vertex arrays we're guaranteed to support in
1098 * SVGA_3D_CMD_DRAWPRIMITIVES.
1099 */
1100#define SVGA3D_MAX_VERTEX_ARRAYS 32
1101
1102/*
1103 * The maximum number of primitive ranges we're guaranteed to support
1104 * in SVGA_3D_CMD_DRAWPRIMITIVES.
1105 */
1106#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
1107
1108/*
1109 * Identifiers for commands in the command FIFO.
1110 *
1111 * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
1112 * the SVGA3D protocol and remain reserved; they should not be used in the
1113 * future.
1114 *
1115 * IDs between 1040 and 1999 (inclusive) are available for use by the
1116 * current SVGA3D protocol.
1117 *
1118 * FIFO clients other than SVGA3D should stay below 1000, or at 2000
1119 * and up.
1120 */
1121
1122#define SVGA_3D_CMD_LEGACY_BASE 1000
1123#define SVGA_3D_CMD_BASE 1040
1124
1125#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0 /* Deprecated */
1126#define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1
1127#define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2
1128#define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3
1129#define SVGA_3D_CMD_SURFACE_DMA SVGA_3D_CMD_BASE + 4
1130#define SVGA_3D_CMD_CONTEXT_DEFINE SVGA_3D_CMD_BASE + 5
1131#define SVGA_3D_CMD_CONTEXT_DESTROY SVGA_3D_CMD_BASE + 6
1132#define SVGA_3D_CMD_SETTRANSFORM SVGA_3D_CMD_BASE + 7
1133#define SVGA_3D_CMD_SETZRANGE SVGA_3D_CMD_BASE + 8
1134#define SVGA_3D_CMD_SETRENDERSTATE SVGA_3D_CMD_BASE + 9
1135#define SVGA_3D_CMD_SETRENDERTARGET SVGA_3D_CMD_BASE + 10
1136#define SVGA_3D_CMD_SETTEXTURESTATE SVGA_3D_CMD_BASE + 11
1137#define SVGA_3D_CMD_SETMATERIAL SVGA_3D_CMD_BASE + 12
1138#define SVGA_3D_CMD_SETLIGHTDATA SVGA_3D_CMD_BASE + 13
1139#define SVGA_3D_CMD_SETLIGHTENABLED SVGA_3D_CMD_BASE + 14
1140#define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15
1141#define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16
1142#define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17
1143#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 /* Deprecated */
1144#define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19
1145#define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20
1146#define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21
1147#define SVGA_3D_CMD_SET_SHADER_CONST SVGA_3D_CMD_BASE + 22
1148#define SVGA_3D_CMD_DRAW_PRIMITIVES SVGA_3D_CMD_BASE + 23
1149#define SVGA_3D_CMD_SETSCISSORRECT SVGA_3D_CMD_BASE + 24
1150#define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25
1151#define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26
1152#define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27
1153#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 /* Deprecated */
1154#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
1155#define SVGA_3D_CMD_SURFACE_DEFINE_V2 SVGA_3D_CMD_BASE + 30
1156#define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31
1157#define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40
1158#define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41
1159#define SVGA_3D_CMD_SCREEN_DMA 1082
1160#define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083
1161#define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE 1084
1162
1163#define SVGA_3D_CMD_LOGICOPS_BITBLT 1085
1164#define SVGA_3D_CMD_LOGICOPS_TRANSBLT 1086
1165#define SVGA_3D_CMD_LOGICOPS_STRETCHBLT 1087
1166#define SVGA_3D_CMD_LOGICOPS_COLORFILL 1088
1167#define SVGA_3D_CMD_LOGICOPS_ALPHABLEND 1089
1168#define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND 1090
1169
1170#define SVGA_3D_CMD_SET_OTABLE_BASE 1091
1171#define SVGA_3D_CMD_READBACK_OTABLE 1092
1172
1173#define SVGA_3D_CMD_DEFINE_GB_MOB 1093
1174#define SVGA_3D_CMD_DESTROY_GB_MOB 1094
1175#define SVGA_3D_CMD_REDEFINE_GB_MOB 1095
1176#define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING 1096
1177
1178#define SVGA_3D_CMD_DEFINE_GB_SURFACE 1097
1179#define SVGA_3D_CMD_DESTROY_GB_SURFACE 1098
1180#define SVGA_3D_CMD_BIND_GB_SURFACE 1099
1181#define SVGA_3D_CMD_COND_BIND_GB_SURFACE 1100
1182#define SVGA_3D_CMD_UPDATE_GB_IMAGE 1101
1183#define SVGA_3D_CMD_UPDATE_GB_SURFACE 1102
1184#define SVGA_3D_CMD_READBACK_GB_IMAGE 1103
1185#define SVGA_3D_CMD_READBACK_GB_SURFACE 1104
1186#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1105
1187#define SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1106
1188
1189#define SVGA_3D_CMD_DEFINE_GB_CONTEXT 1107
1190#define SVGA_3D_CMD_DESTROY_GB_CONTEXT 1108
1191#define SVGA_3D_CMD_BIND_GB_CONTEXT 1109
1192#define SVGA_3D_CMD_READBACK_GB_CONTEXT 1110
1193#define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT 1111
1194
1195#define SVGA_3D_CMD_DEFINE_GB_SHADER 1112
1196#define SVGA_3D_CMD_DESTROY_GB_SHADER 1113
1197#define SVGA_3D_CMD_BIND_GB_SHADER 1114
1198
1199#define SVGA_3D_CMD_SET_OTABLE_BASE64 1115
1200
1201#define SVGA_3D_CMD_BEGIN_GB_QUERY 1116
1202#define SVGA_3D_CMD_END_GB_QUERY 1117
1203#define SVGA_3D_CMD_WAIT_FOR_GB_QUERY 1118
1204
1205#define SVGA_3D_CMD_NOP 1119
1206
1207#define SVGA_3D_CMD_ENABLE_GART 1120
1208#define SVGA_3D_CMD_DISABLE_GART 1121
1209#define SVGA_3D_CMD_MAP_MOB_INTO_GART 1122
1210#define SVGA_3D_CMD_UNMAP_GART_RANGE 1123
1211
1212#define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET 1124
1213#define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET 1125
1214#define SVGA_3D_CMD_BIND_GB_SCREENTARGET 1126
1215#define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET 1127
1216
1217#define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL 1128
1218#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
1219
1220#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130
1221#define SVGA_3D_CMD_GB_SCREEN_DMA 1131
1222#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132
1223#define SVGA_3D_CMD_GB_MOB_FENCE 1133
1224#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134
1225#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135
1226#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136
1227#define SVGA_3D_CMD_NOP_ERROR 1137
1228
1229#define SVGA_3D_CMD_RESERVED1 1138
1230#define SVGA_3D_CMD_RESERVED2 1139
1231#define SVGA_3D_CMD_RESERVED3 1140
1232#define SVGA_3D_CMD_RESERVED4 1141
1233#define SVGA_3D_CMD_RESERVED5 1142
1234
1235#define SVGA_3D_CMD_MAX 1142
1236#define SVGA_3D_CMD_FUTURE_MAX 3000
1237
1238/*
1239 * Common substructures used in multiple FIFO commands:
1240 */
1241
1242typedef struct {
1243 union {
1244 struct {
1245 uint16 function; /* SVGA3dFogFunction */
1246 uint8 type; /* SVGA3dFogType */
1247 uint8 base; /* SVGA3dFogBase */
1248 };
1249 uint32 uintValue;
1250 };
1251} SVGA3dFogMode;
1252
1253/*
1254 * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
1255 * is a surface ID as well as face/mipmap indices.
1256 */
1257
1258typedef
1259struct SVGA3dSurfaceImageId {
1260 uint32 sid;
1261 uint32 face;
1262 uint32 mipmap;
1263} SVGA3dSurfaceImageId;
1264
1265typedef
1266struct SVGA3dGuestImage {
1267 SVGAGuestPtr ptr;
1268
1269 /*
1270 * A note on interpretation of pitch: This value of pitch is the
1271 * number of bytes between vertically adjacent image
1272 * blocks. Normally this is the number of bytes between the first
1273 * pixel of two adjacent scanlines. With compressed textures,
1274 * however, this may represent the number of bytes between
1275 * compression blocks rather than between rows of pixels.
1276 *
1277 * XXX: Compressed textures currently must be tightly packed in guest memory.
1278 *
1279 * If the image is 1-dimensional, pitch is ignored.
1280 *
1281 * If 'pitch' is zero, the SVGA3D device calculates a pitch value
1282 * assuming each row of blocks is tightly packed.
1283 */
1284 uint32 pitch;
1285} SVGA3dGuestImage;
1286
1287
1288/*
1289 * FIFO command format definitions:
1290 */
1291
1292/*
1293 * The data size header following cmdNum for every 3d command
1294 */
1295typedef
1296struct {
1297 uint32 id;
1298 uint32 size;
1299} SVGA3dCmdHeader;
1300
1301/*
1302 * A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
1303 * optional mipmaps and cube faces.
1304 */
1305
1306typedef
1307struct {
1308 uint32 width;
1309 uint32 height;
1310 uint32 depth;
1311} SVGA3dSize;
1312
1313typedef enum {
1314 SVGA3D_SURFACE_CUBEMAP = (1 << 0),
1315 SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
1316 SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
1317 SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
1318 SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
1319 SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
1320 SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
1321 SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
1322 SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
1323 SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9),
1324 SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10),
1325} SVGA3dSurfaceFlags;
1326
1327typedef
1328struct {
1329 uint32 numMipLevels;
1330} SVGA3dSurfaceFace;
1331
1332typedef
1333struct {
1334 uint32 sid;
1335 SVGA3dSurfaceFlags surfaceFlags;
1336 SVGA3dSurfaceFormat format;
1337 /*
1338 * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
1339 * structures must have the same value of numMipLevels field.
1340 * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
1341 * numMipLevels set to 0.
1342 */
1343 SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
1344 /*
1345 * Followed by an SVGA3dSize structure for each mip level in each face.
1346 *
1347 * A note on surface sizes: Sizes are always specified in pixels,
1348 * even if the true surface size is not a multiple of the minimum
1349 * block size of the surface's format. For example, a 3x3x1 DXT1
1350 * compressed texture would actually be stored as a 4x4x1 image in
1351 * memory.
1352 */
1353} SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */
1354
1355typedef
1356struct {
1357 uint32 sid;
1358 SVGA3dSurfaceFlags surfaceFlags;
1359 SVGA3dSurfaceFormat format;
1360 /*
1361 * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
1362 * structures must have the same value of numMipLevels field.
1363 * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
1364 * numMipLevels set to 0.
1365 */
1366 SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
1367 uint32 multisampleCount;
1368 SVGA3dTextureFilter autogenFilter;
1369 /*
1370 * Followed by an SVGA3dSize structure for each mip level in each face.
1371 *
1372 * A note on surface sizes: Sizes are always specified in pixels,
1373 * even if the true surface size is not a multiple of the minimum
1374 * block size of the surface's format. For example, a 3x3x1 DXT1
1375 * compressed texture would actually be stored as a 4x4x1 image in
1376 * memory.
1377 */
1378} SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
1379
1380typedef
1381struct {
1382 uint32 sid;
1383} SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
1384
1385typedef
1386struct {
1387 uint32 cid;
1388} SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */
1389
1390typedef
1391struct {
1392 uint32 cid;
1393} SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */
1394
1395typedef
1396struct {
1397 uint32 cid;
1398 SVGA3dClearFlag clearFlag;
1399 uint32 color;
1400 float depth;
1401 uint32 stencil;
1402 /* Followed by variable number of SVGA3dRect structures */
1403} SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */
1404
1405typedef
1406struct SVGA3dCopyRect {
1407 uint32 x;
1408 uint32 y;
1409 uint32 w;
1410 uint32 h;
1411 uint32 srcx;
1412 uint32 srcy;
1413} SVGA3dCopyRect;
1414
1415typedef
1416struct SVGA3dCopyBox {
1417 uint32 x;
1418 uint32 y;
1419 uint32 z;
1420 uint32 w;
1421 uint32 h;
1422 uint32 d;
1423 uint32 srcx;
1424 uint32 srcy;
1425 uint32 srcz;
1426} SVGA3dCopyBox;
1427
1428typedef
1429struct {
1430 uint32 x;
1431 uint32 y;
1432 uint32 w;
1433 uint32 h;
1434} SVGA3dRect;
1435
1436typedef
1437struct {
1438 uint32 x;
1439 uint32 y;
1440 uint32 z;
1441 uint32 w;
1442 uint32 h;
1443 uint32 d;
1444} SVGA3dBox;
1445
1446typedef
1447struct {
1448 uint32 x;
1449 uint32 y;
1450 uint32 z;
1451} SVGA3dPoint;
1452
1453typedef
1454struct {
1455 SVGA3dLightType type;
1456 SVGA3dBool inWorldSpace;
1457 float diffuse[4];
1458 float specular[4];
1459 float ambient[4];
1460 float position[4];
1461 float direction[4];
1462 float range;
1463 float falloff;
1464 float attenuation0;
1465 float attenuation1;
1466 float attenuation2;
1467 float theta;
1468 float phi;
1469} SVGA3dLightData;
1470
1471typedef
1472struct {
1473 uint32 sid;
1474 /* Followed by variable number of SVGA3dCopyRect structures */
1475} SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */
1476
1477typedef
1478struct {
1479 SVGA3dRenderStateName state;
1480 union {
1481 uint32 uintValue;
1482 float floatValue;
1483 };
1484} SVGA3dRenderState;
1485
1486typedef
1487struct {
1488 uint32 cid;
1489 /* Followed by variable number of SVGA3dRenderState structures */
1490} SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */
1491
1492typedef
1493struct {
1494 uint32 cid;
1495 SVGA3dRenderTargetType type;
1496 SVGA3dSurfaceImageId target;
1497} SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */
1498
1499typedef
1500struct {
1501 SVGA3dSurfaceImageId src;
1502 SVGA3dSurfaceImageId dest;
1503 /* Followed by variable number of SVGA3dCopyBox structures */
1504} SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
1505
1506typedef
1507struct {
1508 SVGA3dSurfaceImageId src;
1509 SVGA3dSurfaceImageId dest;
1510 SVGA3dBox boxSrc;
1511 SVGA3dBox boxDest;
1512 SVGA3dStretchBltMode mode;
1513} SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
1514
1515typedef
1516struct {
1517 /*
1518 * If the discard flag is present in a surface DMA operation, the host may
1519 * discard the contents of the current mipmap level and face of the target
1520 * surface before applying the surface DMA contents.
1521 */
1522 uint32 discard : 1;
1523
1524 /*
1525 * If the unsynchronized flag is present, the host may perform this upload
1526 * without syncing to pending reads on this surface.
1527 */
1528 uint32 unsynchronized : 1;
1529
1530 /*
1531 * Guests *MUST* set the reserved bits to 0 before submitting the command
1532 * suffix as future flags may occupy these bits.
1533 */
1534 uint32 reserved : 30;
1535} SVGA3dSurfaceDMAFlags;
1536
1537typedef
1538struct {
1539 SVGA3dGuestImage guest;
1540 SVGA3dSurfaceImageId host;
1541 SVGA3dTransferType transfer;
1542 /*
1543 * Followed by variable number of SVGA3dCopyBox structures. For consistency
1544 * in all clipping logic and coordinate translation, we define the
1545 * "source" in each copyBox as the guest image and the
1546 * "destination" as the host image, regardless of transfer
1547 * direction.
1548 *
1549 * For efficiency, the SVGA3D device is free to copy more data than
1550 * specified. For example, it may round copy boxes outwards such
1551 * that they lie on particular alignment boundaries.
1552 */
1553} SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */
1554
1555/*
1556 * SVGA3dCmdSurfaceDMASuffix --
1557 *
1558 * This is a command suffix that will appear after a SurfaceDMA command in
1559 * the FIFO. It contains some extra information that hosts may use to
1560 * optimize performance or protect the guest. This suffix exists to preserve
1561 * backwards compatibility while also allowing for new functionality to be
1562 * implemented.
1563 */
1564
1565typedef
1566struct {
1567 uint32 suffixSize;
1568
1569 /*
1570 * The maximum offset is used to determine the maximum offset from the
1571 * guestPtr base address that will be accessed or written to during this
1572 * surfaceDMA. If the suffix is supported, the host will respect this
1573 * boundary while performing surface DMAs.
1574 *
1575 * Defaults to MAX_UINT32
1576 */
1577 uint32 maximumOffset;
1578
1579 /*
1580 * A set of flags that describes optimizations that the host may perform
1581 * while performing this surface DMA operation. The guest should never rely
1582 * on behaviour that is different when these flags are set for correctness.
1583 *
1584 * Defaults to 0
1585 */
1586 SVGA3dSurfaceDMAFlags flags;
1587} SVGA3dCmdSurfaceDMASuffix;
1588
1589/*
1590 * SVGA_3D_CMD_DRAW_PRIMITIVES --
1591 *
1592 * This command is the SVGA3D device's generic drawing entry point.
1593 * It can draw multiple ranges of primitives, optionally using an
1594 * index buffer, using an arbitrary collection of vertex buffers.
1595 *
1596 * Each SVGA3dVertexDecl defines a distinct vertex array to bind
1597 * during this draw call. The declarations specify which surface
1598 * the vertex data lives in, what that vertex data is used for,
1599 * and how to interpret it.
1600 *
1601 * Each SVGA3dPrimitiveRange defines a collection of primitives
1602 * to render using the same vertex arrays. An index buffer is
1603 * optional.
1604 */
1605
1606typedef
1607struct {
1608 /*
1609 * A range hint is an optional specification for the range of indices
1610 * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
1611 * that the entire array will be used.
1612 *
1613 * These are only hints. The SVGA3D device may use them for
1614 * performance optimization if possible, but it's also allowed to
1615 * ignore these values.
1616 */
1617 uint32 first;
1618 uint32 last;
1619} SVGA3dArrayRangeHint;
1620
1621typedef
1622struct {
1623 /*
1624 * Define the origin and shape of a vertex or index array. Both
1625 * 'offset' and 'stride' are in bytes. The provided surface will be
1626 * reinterpreted as a flat array of bytes in the same format used
1627 * by surface DMA operations. To avoid unnecessary conversions, the
1628 * surface should be created with the SVGA3D_BUFFER format.
1629 *
1630 * Index 0 in the array starts 'offset' bytes into the surface.
1631 * Index 1 begins at byte 'offset + stride', etc. Array indices may
1632 * not be negative.
1633 */
1634 uint32 surfaceId;
1635 uint32 offset;
1636 uint32 stride;
1637} SVGA3dArray;
1638
1639typedef
1640struct {
1641 /*
1642 * Describe a vertex array's data type, and define how it is to be
1643 * used by the fixed function pipeline or the vertex shader. It
1644 * isn't useful to have two VertexDecls with the same
1645 * VertexArrayIdentity in one draw call.
1646 */
1647 SVGA3dDeclType type;
1648 SVGA3dDeclMethod method;
1649 SVGA3dDeclUsage usage;
1650 uint32 usageIndex;
1651} SVGA3dVertexArrayIdentity;
1652
1653typedef
1654struct {
1655 SVGA3dVertexArrayIdentity identity;
1656 SVGA3dArray array;
1657 SVGA3dArrayRangeHint rangeHint;
1658} SVGA3dVertexDecl;
1659
1660typedef
1661struct {
1662 /*
1663 * Define a group of primitives to render, from sequential indices.
1664 *
1665 * The value of 'primitiveType' and 'primitiveCount' imply the
1666 * total number of vertices that will be rendered.
1667 */
1668 SVGA3dPrimitiveType primType;
1669 uint32 primitiveCount;
1670
1671 /*
1672 * Optional index buffer. If indexArray.surfaceId is
1673 * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
1674 * without an index buffer is identical to rendering with an index
1675 * buffer containing the sequence [0, 1, 2, 3, ...].
1676 *
1677 * If an index buffer is in use, indexWidth specifies the width in
1678 * bytes of each index value. It must be less than or equal to
1679 * indexArray.stride.
1680 *
1681 * (Currently, the SVGA3D device requires index buffers to be tightly
1682 * packed. In other words, indexWidth == indexArray.stride)
1683 */
1684 SVGA3dArray indexArray;
1685 uint32 indexWidth;
1686
1687 /*
1688 * Optional index bias. This number is added to all indices from
1689 * indexArray before they are used as vertex array indices. This
1690 * can be used in multiple ways:
1691 *
1692 * - When not using an indexArray, this bias can be used to
1693 * specify where in the vertex arrays to begin rendering.
1694 *
1695 * - A positive number here is equivalent to increasing the
1696 * offset in each vertex array.
1697 *
1698 * - A negative number can be used to render using a small
1699 * vertex array and an index buffer that contains large
1700 * values. This may be used by some applications that
1701 * crop a vertex buffer without modifying their index
1702 * buffer.
1703 *
1704 * Note that rendering with a negative bias value may be slower and
1705 * use more memory than rendering with a positive or zero bias.
1706 */
1707 int32 indexBias;
1708} SVGA3dPrimitiveRange;
1709
1710typedef
1711struct {
1712 uint32 cid;
1713 uint32 numVertexDecls;
1714 uint32 numRanges;
1715
1716 /*
1717 * There are two variable size arrays after the
1718 * SVGA3dCmdDrawPrimitives structure. In order,
1719 * they are:
1720 *
1721 * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
1722 * SVGA3D_MAX_VERTEX_ARRAYS;
1723 * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
1724 * SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
1725 * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
1726 * the frequency divisor for the corresponding vertex decl).
1727 */
1728} SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
1729
1730typedef
1731struct {
1732 uint32 stage;
1733 SVGA3dTextureStateName name;
1734 union {
1735 uint32 value;
1736 float floatValue;
1737 };
1738} SVGA3dTextureState;
1739
1740typedef
1741struct {
1742 uint32 cid;
1743 /* Followed by variable number of SVGA3dTextureState structures */
1744} SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */
1745
1746typedef
1747struct {
1748 uint32 cid;
1749 SVGA3dTransformType type;
1750 float matrix[16];
1751} SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */
1752
1753typedef
1754struct {
1755 float min;
1756 float max;
1757} SVGA3dZRange;
1758
1759typedef
1760struct {
1761 uint32 cid;
1762 SVGA3dZRange zRange;
1763} SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */
1764
1765typedef
1766struct {
1767 float diffuse[4];
1768 float ambient[4];
1769 float specular[4];
1770 float emissive[4];
1771 float shininess;
1772} SVGA3dMaterial;
1773
1774typedef
1775struct {
1776 uint32 cid;
1777 SVGA3dFace face;
1778 SVGA3dMaterial material;
1779} SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */
1780
1781typedef
1782struct {
1783 uint32 cid;
1784 uint32 index;
1785 SVGA3dLightData data;
1786} SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */
1787
1788typedef
1789struct {
1790 uint32 cid;
1791 uint32 index;
1792 uint32 enabled;
1793} SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */
1794
1795typedef
1796struct {
1797 uint32 cid;
1798 SVGA3dRect rect;
1799} SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */
1800
1801typedef
1802struct {
1803 uint32 cid;
1804 SVGA3dRect rect;
1805} SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */
1806
1807typedef
1808struct {
1809 uint32 cid;
1810 uint32 index;
1811 float plane[4];
1812} SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */
1813
1814typedef
1815struct {
1816 uint32 cid;
1817 uint32 shid;
1818 SVGA3dShaderType type;
1819 /* Followed by variable number of DWORDs for shader bycode */
1820} SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */
1821
1822typedef
1823struct {
1824 uint32 cid;
1825 uint32 shid;
1826 SVGA3dShaderType type;
1827} SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */
1828
1829typedef
1830struct {
1831 uint32 cid;
1832 uint32 reg; /* register number */
1833 SVGA3dShaderType type;
1834 SVGA3dShaderConstType ctype;
1835 uint32 values[4];
1836} SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */
1837
1838typedef
1839struct {
1840 uint32 cid;
1841 SVGA3dShaderType type;
1842 uint32 shid;
1843} SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */
1844
1845typedef
1846struct {
1847 uint32 cid;
1848 SVGA3dQueryType type;
1849} SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */
1850
1851typedef
1852struct {
1853 uint32 cid;
1854 SVGA3dQueryType type;
1855 SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */
1856} SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */
1857
1858typedef
1859struct {
1860 uint32 cid; /* Same parameters passed to END_QUERY */
1861 SVGA3dQueryType type;
1862 SVGAGuestPtr guestResult;
1863} SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */
1864
1865typedef
1866struct {
1867 uint32 totalSize; /* Set by guest before query is ended. */
1868 SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */
1869 union { /* Set by host on exit from PENDING state */
1870 uint32 result32;
1871 };
1872} SVGA3dQueryResult;
1873
1874/*
1875 * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
1876 *
1877 * This is a blit from an SVGA3D surface to a Screen Object. Just
1878 * like GMR-to-screen blits, this blit may be directed at a
1879 * specific screen or to the virtual coordinate space.
1880 *
1881 * The blit copies from a rectangular region of an SVGA3D surface
1882 * image to a rectangular region of a screen or screens.
1883 *
1884 * This command takes an optional variable-length list of clipping
1885 * rectangles after the body of the command. If no rectangles are
1886 * specified, there is no clipping region. The entire destRect is
1887 * drawn to. If one or more rectangles are included, they describe
1888 * a clipping region. The clip rectangle coordinates are measured
1889 * relative to the top-left corner of destRect.
1890 *
1891 * This clipping region serves multiple purposes:
1892 *
1893 * - It can be used to perform an irregularly shaped blit more
1894 * efficiently than by issuing many separate blit commands.
1895 *
1896 * - It is equivalent to allowing blits with non-integer
1897 * source coordinates. You could blit just one half-pixel
1898 * of a source, for example, by specifying a larger
1899 * destination rectangle than you need, then removing
1900 * part of it using a clip rectangle.
1901 *
1902 * Availability:
1903 * SVGA_FIFO_CAP_SCREEN_OBJECT
1904 *
1905 * Limitations:
1906 *
1907 * - Currently, no backend supports blits from a mipmap or face
1908 * other than the first one.
1909 */
1910
1911typedef
1912struct {
1913 SVGA3dSurfaceImageId srcImage;
1914 SVGASignedRect srcRect;
1915 uint32 destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */
1916 SVGASignedRect destRect; /* Supports scaling if src/rest different size */
1917 /* Clipping: zero or more SVGASignedRects follow */
1918} SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
1919
1920typedef
1921struct {
1922 uint32 sid;
1923 SVGA3dTextureFilter filter;
1924} SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
1925
1926
1927/*
1928 * Guest-backed surface definitions.
1929 */
1930
1931typedef uint32 SVGAMobId;
1932
1933typedef enum SVGAMobFormat {
1934 SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
1935 SVGA3D_MOBFMT_PTDEPTH_0 = 0,
1936 SVGA3D_MOBFMT_PTDEPTH_1 = 1,
1937 SVGA3D_MOBFMT_PTDEPTH_2 = 2,
1938 SVGA3D_MOBFMT_RANGE = 3,
1939 SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
1940 SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
1941 SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
1942 SVGA3D_MOBFMT_MAX,
1943} SVGAMobFormat;
1944
1945/*
1946 * Sizes of opaque types.
1947 */
1948
1949#define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16
1950#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8
1951#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64
1952#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16
1953#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64
1954#define SVGA3D_CONTEXT_DATA_SIZE 16384
1955
1956/*
1957 * SVGA3dCmdSetOTableBase --
1958 *
1959 * This command allows the guest to specify the base PPN of the
1960 * specified object table.
1961 */
1962
1963typedef enum {
1964 SVGA_OTABLE_MOB = 0,
1965 SVGA_OTABLE_MIN = 0,
1966 SVGA_OTABLE_SURFACE = 1,
1967 SVGA_OTABLE_CONTEXT = 2,
1968 SVGA_OTABLE_SHADER = 3,
1969 SVGA_OTABLE_SCREEN_TARGET = 4,
1970 SVGA_OTABLE_DX9_MAX = 5,
1971 SVGA_OTABLE_MAX = 8
1972} SVGAOTableType;
1973
1974typedef
1975struct {
1976 SVGAOTableType type;
1977 PPN baseAddress;
1978 uint32 sizeInBytes;
1979 uint32 validSizeInBytes;
1980 SVGAMobFormat ptDepth;
1981} __packed
1982SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
1983
1984typedef
1985struct {
1986 SVGAOTableType type;
1987 PPN64 baseAddress;
1988 uint32 sizeInBytes;
1989 uint32 validSizeInBytes;
1990 SVGAMobFormat ptDepth;
1991} __packed
1992SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
1993
1994typedef
1995struct {
1996 SVGAOTableType type;
1997} __packed
1998SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
1999
2000/*
2001 * Define a memory object (Mob) in the OTable.
2002 */
2003
2004typedef
2005struct SVGA3dCmdDefineGBMob {
2006 SVGAMobId mobid;
2007 SVGAMobFormat ptDepth;
2008 PPN base;
2009 uint32 sizeInBytes;
2010} __packed
2011SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
2012
2013
2014/*
2015 * Destroys an object in the OTable.
2016 */
2017
2018typedef
2019struct SVGA3dCmdDestroyGBMob {
2020 SVGAMobId mobid;
2021} __packed
2022SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
2023
2024/*
2025 * Redefine an object in the OTable.
2026 */
2027
2028typedef
2029struct SVGA3dCmdRedefineGBMob {
2030 SVGAMobId mobid;
2031 SVGAMobFormat ptDepth;
2032 PPN base;
2033 uint32 sizeInBytes;
2034} __packed
2035SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */
2036
2037/*
2038 * Define a memory object (Mob) in the OTable with a PPN64 base.
2039 */
2040
2041typedef
2042struct SVGA3dCmdDefineGBMob64 {
2043 SVGAMobId mobid;
2044 SVGAMobFormat ptDepth;
2045 PPN64 base;
2046 uint32 sizeInBytes;
2047} __packed
2048SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
2049
2050/*
2051 * Redefine an object in the OTable with PPN64 base.
2052 */
2053
2054typedef
2055struct SVGA3dCmdRedefineGBMob64 {
2056 SVGAMobId mobid;
2057 SVGAMobFormat ptDepth;
2058 PPN64 base;
2059 uint32 sizeInBytes;
2060} __packed
2061SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
2062
2063/*
2064 * Notification that the page tables have been modified.
2065 */
2066
2067typedef
2068struct SVGA3dCmdUpdateGBMobMapping {
2069 SVGAMobId mobid;
2070} __packed
2071SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
2072
2073/*
2074 * Define a guest-backed surface.
2075 */
2076
2077typedef
2078struct SVGA3dCmdDefineGBSurface {
2079 uint32 sid;
2080 SVGA3dSurfaceFlags surfaceFlags;
2081 SVGA3dSurfaceFormat format;
2082 uint32 numMipLevels;
2083 uint32 multisampleCount;
2084 SVGA3dTextureFilter autogenFilter;
2085 SVGA3dSize size;
2086} __packed
2087SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
2088
2089/*
2090 * Destroy a guest-backed surface.
2091 */
2092
2093typedef
2094struct SVGA3dCmdDestroyGBSurface {
2095 uint32 sid;
2096} __packed
2097SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
2098
2099/*
2100 * Bind a guest-backed surface to an object.
2101 */
2102
2103typedef
2104struct SVGA3dCmdBindGBSurface {
2105 uint32 sid;
2106 SVGAMobId mobid;
2107} __packed
2108SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
2109
2110/*
2111 * Conditionally bind a mob to a guest backed surface if testMobid
2112 * matches the currently bound mob. Optionally issue a readback on
2113 * the surface while it is still bound to the old mobid if the mobid
2114 * is changed by this command.
2115 */
2116
2117#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
2118
2119typedef
2120struct{
2121 uint32 sid;
2122 SVGAMobId testMobid;
2123 SVGAMobId mobid;
2124 uint32 flags;
2125} __packed
2126SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
2127
2128/*
2129 * Update an image in a guest-backed surface.
2130 * (Inform the device that the guest-contents have been updated.)
2131 */
2132
2133typedef
2134struct SVGA3dCmdUpdateGBImage {
2135 SVGA3dSurfaceImageId image;
2136 SVGA3dBox box;
2137} __packed
2138SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
2139
2140/*
2141 * Update an entire guest-backed surface.
2142 * (Inform the device that the guest-contents have been updated.)
2143 */
2144
2145typedef
2146struct SVGA3dCmdUpdateGBSurface {
2147 uint32 sid;
2148} __packed
2149SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
2150
2151/*
2152 * Readback an image in a guest-backed surface.
2153 * (Request the device to flush the dirty contents into the guest.)
2154 */
2155
2156typedef
2157struct SVGA3dCmdReadbackGBImage {
2158 SVGA3dSurfaceImageId image;
2159} __packed
2160SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
2161
2162/*
2163 * Readback an entire guest-backed surface.
2164 * (Request the device to flush the dirty contents into the guest.)
2165 */
2166
2167typedef
2168struct SVGA3dCmdReadbackGBSurface {
2169 uint32 sid;
2170} __packed
2171SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
2172
2173/*
2174 * Readback a sub rect of an image in a guest-backed surface. After
2175 * issuing this command the driver is required to issue an update call
2176 * of the same region before issuing any other commands that reference
2177 * this surface or rendering is not guaranteed.
2178 */
2179
2180typedef
2181struct SVGA3dCmdReadbackGBImagePartial {
2182 SVGA3dSurfaceImageId image;
2183 SVGA3dBox box;
2184 uint32 invertBox;
2185} __packed
2186SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
2187
2188/*
2189 * Invalidate an image in a guest-backed surface.
2190 * (Notify the device that the contents can be lost.)
2191 */
2192
2193typedef
2194struct SVGA3dCmdInvalidateGBImage {
2195 SVGA3dSurfaceImageId image;
2196} __packed
2197SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
2198
2199/*
2200 * Invalidate an entire guest-backed surface.
2201 * (Notify the device that the contents if all images can be lost.)
2202 */
2203
2204typedef
2205struct SVGA3dCmdInvalidateGBSurface {
2206 uint32 sid;
2207} __packed
2208SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
2209
2210/*
2211 * Invalidate a sub rect of an image in a guest-backed surface. After
2212 * issuing this command the driver is required to issue an update call
2213 * of the same region before issuing any other commands that reference
2214 * this surface or rendering is not guaranteed.
2215 */
2216
2217typedef
2218struct SVGA3dCmdInvalidateGBImagePartial {
2219 SVGA3dSurfaceImageId image;
2220 SVGA3dBox box;
2221 uint32 invertBox;
2222} __packed
2223SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
2224
2225/*
2226 * Define a guest-backed context.
2227 */
2228
2229typedef
2230struct SVGA3dCmdDefineGBContext {
2231 uint32 cid;
2232} __packed
2233SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
2234
2235/*
2236 * Destroy a guest-backed context.
2237 */
2238
2239typedef
2240struct SVGA3dCmdDestroyGBContext {
2241 uint32 cid;
2242} __packed
2243SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
2244
2245/*
2246 * Bind a guest-backed context.
2247 *
2248 * validContents should be set to 0 for new contexts,
2249 * and 1 if this is an old context which is getting paged
2250 * back on to the device.
2251 *
2252 * For new contexts, it is recommended that the driver
2253 * issue commands to initialize all interesting state
2254 * prior to rendering.
2255 */
2256
2257typedef
2258struct SVGA3dCmdBindGBContext {
2259 uint32 cid;
2260 SVGAMobId mobid;
2261 uint32 validContents;
2262} __packed
2263SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
2264
2265/*
2266 * Readback a guest-backed context.
2267 * (Request that the device flush the contents back into guest memory.)
2268 */
2269
2270typedef
2271struct SVGA3dCmdReadbackGBContext {
2272 uint32 cid;
2273} __packed
2274SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
2275
2276/*
2277 * Invalidate a guest-backed context.
2278 */
2279typedef
2280struct SVGA3dCmdInvalidateGBContext {
2281 uint32 cid;
2282} __packed
2283SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
2284
2285/*
2286 * Define a guest-backed shader.
2287 */
2288
2289typedef
2290struct SVGA3dCmdDefineGBShader {
2291 uint32 shid;
2292 SVGA3dShaderType type;
2293 uint32 sizeInBytes;
2294} __packed
2295SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
2296
2297/*
2298 * Bind a guest-backed shader.
2299 */
2300
2301typedef struct SVGA3dCmdBindGBShader {
2302 uint32 shid;
2303 SVGAMobId mobid;
2304 uint32 offsetInBytes;
2305} __packed
2306SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
2307
2308/*
2309 * Destroy a guest-backed shader.
2310 */
2311
2312typedef struct SVGA3dCmdDestroyGBShader {
2313 uint32 shid;
2314} __packed
2315SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
2316
2317typedef
2318struct {
2319 uint32 cid;
2320 uint32 regStart;
2321 SVGA3dShaderType shaderType;
2322 SVGA3dShaderConstType constType;
2323
2324 /*
2325 * Followed by a variable number of shader constants.
2326 *
2327 * Note that FLOAT and INT constants are 4-dwords in length, while
2328 * BOOL constants are 1-dword in length.
2329 */
2330} __packed
2331SVGA3dCmdSetGBShaderConstInline;
2332/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
2333
2334typedef
2335struct {
2336 uint32 cid;
2337 SVGA3dQueryType type;
2338} __packed
2339SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
2340
2341typedef
2342struct {
2343 uint32 cid;
2344 SVGA3dQueryType type;
2345 SVGAMobId mobid;
2346 uint32 offset;
2347} __packed
2348SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
2349
2350
2351/*
2352 * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
2353 *
2354 * The semantics of this command are identical to the
2355 * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
2356 * to a Mob instead of a GMR.
2357 */
2358
2359typedef
2360struct {
2361 uint32 cid;
2362 SVGA3dQueryType type;
2363 SVGAMobId mobid;
2364 uint32 offset;
2365} __packed
2366SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
2367
2368typedef
2369struct {
2370 SVGAMobId mobid;
2371 uint32 fbOffset;
2372 uint32 initalized;
2373} __packed
2374SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
2375
2376typedef
2377struct {
2378 SVGAMobId mobid;
2379 uint32 gartOffset;
2380} __packed
2381SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
2382
2383
2384typedef
2385struct {
2386 uint32 gartOffset;
2387 uint32 numPages;
2388} __packed
2389SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
2390
2391
2392/*
2393 * Screen Targets
2394 */
2395#define SVGA_STFLAG_PRIMARY (1 << 0)
2396
2397typedef
2398struct {
2399 uint32 stid;
2400 uint32 width;
2401 uint32 height;
2402 int32 xRoot;
2403 int32 yRoot;
2404 uint32 flags;
2405} __packed
2406SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
2407
2408typedef
2409struct {
2410 uint32 stid;
2411} __packed
2412SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
2413
2414typedef
2415struct {
2416 uint32 stid;
2417 SVGA3dSurfaceImageId image;
2418} __packed
2419SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
2420
2421typedef
2422struct {
2423 uint32 stid;
2424 SVGA3dBox box;
2425} __packed
2426SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
2427
2428/*
2429 * Capability query index.
2430 *
2431 * Notes:
2432 *
2433 * 1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
2434 * fixed-function texture units available. Each of these units
2435 * work in both FFP and Shader modes, and they support texture
2436 * transforms and texture coordinates. The host may have additional
2437 * texture image units that are only usable with shaders.
2438 *
2439 * 2. The BUFFER_FORMAT capabilities are deprecated, and they always
2440 * return TRUE. Even on physical hardware that does not support
2441 * these formats natively, the SVGA3D device will provide an emulation
2442 * which should be invisible to the guest OS.
2443 *
2444 * In general, the SVGA3D device should support any operation on
2445 * any surface format, it just may perform some of these
2446 * operations in software depending on the capabilities of the
2447 * available physical hardware.
2448 *
2449 * XXX: In the future, we will add capabilities that describe in
2450 * detail what formats are supported in hardware for what kinds
2451 * of operations.
2452 */
2453
2454typedef enum {
2455 SVGA3D_DEVCAP_3D = 0,
2456 SVGA3D_DEVCAP_MAX_LIGHTS = 1,
2457 SVGA3D_DEVCAP_MAX_TEXTURES = 2, /* See note (1) */
2458 SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
2459 SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
2460 SVGA3D_DEVCAP_VERTEX_SHADER = 5,
2461 SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
2462 SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
2463 SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
2464 SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
2465 SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
2466 SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
2467 SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12, /* See note (2) */
2468 SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13, /* See note (2) */
2469 SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14, /* See note (2) */
2470 SVGA3D_DEVCAP_QUERY_TYPES = 15,
2471 SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
2472 SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
2473 SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
2474 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
2475 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
2476 SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
2477 SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
2478 SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
2479 SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
2480 SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
2481 SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
2482 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
2483 SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
2484 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
2485 SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
2486 SVGA3D_DEVCAP_TEXTURE_OPS = 31,
2487 SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
2488 SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
2489 SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
2490 SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
2491 SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
2492 SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
2493 SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
2494 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
2495 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
2496 SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
2497 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
2498 SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
2499 SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
2500 SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
2501 SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
2502 SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
2503 SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
2504 SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
2505 SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
2506 SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
2507 SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
2508 SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
2509 SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
2510 SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
2511 SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
2512 SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
2513 SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
2514 SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
2515 SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
2516 SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
2517 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
2518
2519 /*
2520 * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
2521 * render targets. This does no include the depth or stencil targets.
2522 */
2523 SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
2524
2525 SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
2526 SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
2527 SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
2528 SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
2529 SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
2530 SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70,
2531 SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71,
2532 SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72,
2533 SVGA3D_DEVCAP_SUPERSAMPLE = 73,
2534 SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74,
2535 SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75,
2536 SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76,
2537
2538 /*
2539 * This is the maximum number of SVGA context IDs that the guest
2540 * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
2541 */
2542 SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77,
2543
2544 /*
2545 * This is the maximum number of SVGA surface IDs that the guest
2546 * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
2547 */
2548 SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78,
2549
2550 SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79,
2551 SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80,
2552 SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81,
2553
2554 SVGA3D_DEVCAP_SURFACEFMT_BC4_UNORM = 82,
2555 SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83,
2556
2557 /*
2558 * Deprecated.
2559 */
2560 SVGA3D_DEVCAP_VGPU10 = 84,
2561
2562 /*
2563 * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
2564 * ored together, one for every type of video decoding supported.
2565 */
2566 SVGA3D_DEVCAP_VIDEO_DECODE = 85,
2567
2568 /*
2569 * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
2570 * ored together, one for every type of video processing supported.
2571 */
2572 SVGA3D_DEVCAP_VIDEO_PROCESS = 86,
2573
2574 SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */
2575 SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */
2576 SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */
2577 SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */
2578
2579 SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91,
2580
2581 /*
2582 * Does the host support the SVGA logic ops commands?
2583 */
2584 SVGA3D_DEVCAP_LOGICOPS = 92,
2585
2586 /*
2587 * What support does the host have for screen targets?
2588 *
2589 * See the SVGA3D_SCREENTARGET_CAP bits below.
2590 */
2591 SVGA3D_DEVCAP_SCREENTARGETS = 93,
2592
2593 SVGA3D_DEVCAP_MAX /* This must be the last index. */
2594} SVGA3dDevCapIndex;
2595
2596typedef union {
2597 Bool b;
2598 uint32 u;
2599 int32 i;
2600 float f;
2601} SVGA3dDevCapResult;
2602
2603typedef enum {
2604 SVGA3DCAPS_RECORD_UNKNOWN = 0,
2605 SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
2606 SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
2607 SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
2608} SVGA3dCapsRecordType;
2609
2610typedef
2611struct SVGA3dCapsRecordHeader {
2612 uint32 length;
2613 SVGA3dCapsRecordType type;
2614}
2615SVGA3dCapsRecordHeader;
2616
2617typedef
2618struct SVGA3dCapsRecord {
2619 SVGA3dCapsRecordHeader header;
2620 uint32 data[1];
2621}
2622SVGA3dCapsRecord;
2623
2624
2625typedef uint32 SVGA3dCapPair[2];
2626
2627#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
deleted file mode 100644
index ef3385096145..000000000000
--- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
+++ /dev/null
@@ -1,912 +0,0 @@
1/**************************************************************************
2 *
3 * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifdef __KERNEL__
29
30#include <drm/vmwgfx_drm.h>
31#define surf_size_struct struct drm_vmw_size
32
33#else /* __KERNEL__ */
34
35#ifndef ARRAY_SIZE
36#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
37#endif /* ARRAY_SIZE */
38
39#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
40#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
41#define min_t(type, x, y) ((x) < (y) ? (x) : (y))
42#define surf_size_struct SVGA3dSize
43#define u32 uint32
44#define u64 uint64_t
45#define U32_MAX ((u32)~0U)
46
47#endif /* __KERNEL__ */
48
49#include "svga3d_reg.h"
50
51/*
52 * enum svga3d_block_desc describes the active data channels in a block.
53 *
54 * There can be at-most four active channels in a block:
55 * 1. Red, bump W, luminance and depth are stored in the first channel.
56 * 2. Green, bump V and stencil are stored in the second channel.
57 * 3. Blue and bump U are stored in the third channel.
58 * 4. Alpha and bump Q are stored in the fourth channel.
59 *
60 * Block channels can be used to store compressed and buffer data:
61 * 1. For compressed formats, only the data channel is used and its size
62 * is equal to that of a singular block in the compression scheme.
63 * 2. For buffer formats, only the data channel is used and its size is
64 * exactly one byte in length.
65 * 3. In each case the bit depth represent the size of a singular block.
66 *
67 * Note: Compressed and IEEE formats do not use the bitMask structure.
68 */
69
70enum svga3d_block_desc {
71 SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
72 SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
73 data */
74 SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
75 data */
76 SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
77 U and V */
78 SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
79 data */
80 SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
81 data */
82 SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
83 channel */
84 SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
85 data */
86 SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
87 data */
88 SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
89 data */
90 SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
91 data */
92 SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
93 SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
94 channel */
95 SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
96 data */
97 SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
98 data */
99 SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
100 data depending on the
101 compression method used */
102 SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
103 floating point
104 representation in
105 all channels */
106 SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
107 data. */
108 SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
109 SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
110 SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
111 SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
112 SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
113 e.g., NV12. */
114 SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
115 Y, U, V, e.g., YV12. */
116
117 SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
118 SVGA3DBLOCKDESC_GREEN,
119 SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
120 SVGA3DBLOCKDESC_BLUE,
121 SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
122 SVGA3DBLOCKDESC_SRGB,
123 SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
124 SVGA3DBLOCKDESC_ALPHA,
125 SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
126 SVGA3DBLOCKDESC_SRGB,
127 SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
128 SVGA3DBLOCKDESC_V,
129 SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
130 SVGA3DBLOCKDESC_LUMINANCE,
131 SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
132 SVGA3DBLOCKDESC_W,
133 SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
134 SVGA3DBLOCKDESC_ALPHA,
135 SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
136 SVGA3DBLOCKDESC_V |
137 SVGA3DBLOCKDESC_W |
138 SVGA3DBLOCKDESC_Q,
139 SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
140 SVGA3DBLOCKDESC_ALPHA,
141 SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
142 SVGA3DBLOCKDESC_IEEE_FP,
143 SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
144 SVGA3DBLOCKDESC_GREEN,
145 SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
146 SVGA3DBLOCKDESC_BLUE,
147 SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
148 SVGA3DBLOCKDESC_ALPHA,
149 SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
150 SVGA3DBLOCKDESC_STENCIL,
151 SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
152 SVGA3DBLOCKDESC_Y,
153 SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
154 SVGA3DBLOCKDESC_Y |
155 SVGA3DBLOCKDESC_U_VIDEO |
156 SVGA3DBLOCKDESC_V_VIDEO,
157 SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
158 SVGA3DBLOCKDESC_EXP,
159 SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
160 SVGA3DBLOCKDESC_SRGB,
161 SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
162 SVGA3DBLOCKDESC_2PLANAR_YUV,
163 SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
164 SVGA3DBLOCKDESC_3PLANAR_YUV,
165};
166
167/*
168 * SVGA3dSurfaceDesc describes the actual pixel data.
169 *
170 * This structure provides the following information:
171 * 1. Block description.
172 * 2. Dimensions of a block in the surface.
173 * 3. Size of block in bytes.
174 * 4. Bit depth of the pixel data.
175 * 5. Channel bit depths and masks (if applicable).
176 */
177#define SVGA3D_CHANNEL_DEF(type) \
178 struct { \
179 union { \
180 type blue; \
181 type u; \
182 type uv_video; \
183 type u_video; \
184 }; \
185 union { \
186 type green; \
187 type v; \
188 type stencil; \
189 type v_video; \
190 }; \
191 union { \
192 type red; \
193 type w; \
194 type luminance; \
195 type y; \
196 type depth; \
197 type data; \
198 }; \
199 union { \
200 type alpha; \
201 type q; \
202 type exp; \
203 }; \
204 }
205
206struct svga3d_surface_desc {
207 enum svga3d_block_desc block_desc;
208 surf_size_struct block_size;
209 u32 bytes_per_block;
210 u32 pitch_bytes_per_block;
211
212 struct {
213 u32 total;
214 SVGA3D_CHANNEL_DEF(uint8);
215 } bit_depth;
216
217 struct {
218 SVGA3D_CHANNEL_DEF(uint8);
219 } bit_offset;
220};
221
222static const struct svga3d_surface_desc svga3d_surface_descs[] = {
223 {SVGA3DBLOCKDESC_NONE,
224 {1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } },
225 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */
226
227 {SVGA3DBLOCKDESC_RGB,
228 {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
229 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */
230
231 {SVGA3DBLOCKDESC_RGBA,
232 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
233 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */
234
235 {SVGA3DBLOCKDESC_RGB,
236 {1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } },
237 {{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */
238
239 {SVGA3DBLOCKDESC_RGB,
240 {1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } },
241 {{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */
242
243 {SVGA3DBLOCKDESC_RGBA,
244 {1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } },
245 {{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */
246
247 {SVGA3DBLOCKDESC_RGBA,
248 {1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } },
249 {{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */
250
251 {SVGA3DBLOCKDESC_DEPTH,
252 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
253 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */
254
255 {SVGA3DBLOCKDESC_DEPTH,
256 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
257 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */
258
259 {SVGA3DBLOCKDESC_DS,
260 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
261 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */
262
263 {SVGA3DBLOCKDESC_DS,
264 {1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } },
265 {{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */
266
267 {SVGA3DBLOCKDESC_LUMINANCE,
268 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
269 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */
270
271 {SVGA3DBLOCKDESC_LA,
272 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } },
273 {{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */
274
275 {SVGA3DBLOCKDESC_LUMINANCE,
276 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
277 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */
278
279 {SVGA3DBLOCKDESC_LA,
280 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
281 {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */
282
283 {SVGA3DBLOCKDESC_COMPRESSED,
284 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
285 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */
286
287 {SVGA3DBLOCKDESC_COMPRESSED,
288 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
289 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */
290
291 {SVGA3DBLOCKDESC_COMPRESSED,
292 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
293 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */
294
295 {SVGA3DBLOCKDESC_COMPRESSED,
296 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
297 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */
298
299 {SVGA3DBLOCKDESC_COMPRESSED,
300 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
301 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */
302
303 {SVGA3DBLOCKDESC_UV,
304 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
305 {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */
306
307 {SVGA3DBLOCKDESC_UVL,
308 {1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } },
309 {{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */
310
311 {SVGA3DBLOCKDESC_UVL,
312 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } },
313 {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */
314
315 {SVGA3DBLOCKDESC_UVL,
316 {1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } },
317 {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */
318
319 {SVGA3DBLOCKDESC_RGBA_FP,
320 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
321 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */
322
323 {SVGA3DBLOCKDESC_RGBA_FP,
324 {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
325 {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */
326
327 {SVGA3DBLOCKDESC_RGBA,
328 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
329 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */
330
331 {SVGA3DBLOCKDESC_UV,
332 {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
333 {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */
334
335 {SVGA3DBLOCKDESC_UVWQ,
336 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
337 {{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */
338
339 {SVGA3DBLOCKDESC_UV,
340 {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
341 {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */
342
343 {SVGA3DBLOCKDESC_UVL,
344 {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
345 {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */
346
347 {SVGA3DBLOCKDESC_UVWA,
348 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
349 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */
350
351 {SVGA3DBLOCKDESC_ALPHA,
352 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } },
353 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */
354
355 {SVGA3DBLOCKDESC_R_FP,
356 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
357 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */
358
359 {SVGA3DBLOCKDESC_R_FP,
360 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
361 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */
362
363 {SVGA3DBLOCKDESC_RG_FP,
364 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
365 {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */
366
367 {SVGA3DBLOCKDESC_RG_FP,
368 {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
369 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */
370
371 {SVGA3DBLOCKDESC_BUFFER,
372 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
373 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */
374
375 {SVGA3DBLOCKDESC_DEPTH,
376 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
377 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */
378
379 {SVGA3DBLOCKDESC_UV,
380 {1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } },
381 {{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */
382
383 {SVGA3DBLOCKDESC_RG,
384 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
385 {{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */
386
387 {SVGA3DBLOCKDESC_RGBA,
388 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
389 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */
390
391 {SVGA3DBLOCKDESC_YUV,
392 {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
393 {{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */
394
395 {SVGA3DBLOCKDESC_YUV,
396 {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
397 {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */
398
399 {SVGA3DBLOCKDESC_NV12,
400 {2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } },
401 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */
402
403 {SVGA3DBLOCKDESC_AYUV,
404 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
405 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */
406
407 {SVGA3DBLOCKDESC_RGBA,
408 {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
409 {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */
410
411 {SVGA3DBLOCKDESC_RGBA,
412 {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
413 {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */
414
415 {SVGA3DBLOCKDESC_UVWQ,
416 {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
417 {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */
418
419 {SVGA3DBLOCKDESC_RGB,
420 {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
421 {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */
422
423 {SVGA3DBLOCKDESC_RGB_FP,
424 {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
425 {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */
426
427 {SVGA3DBLOCKDESC_RGB,
428 {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
429 {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */
430
431 {SVGA3DBLOCKDESC_UVW,
432 {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
433 {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */
434
435 {SVGA3DBLOCKDESC_RGBA,
436 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
437 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */
438
439 {SVGA3DBLOCKDESC_RGBA,
440 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
441 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */
442
443 {SVGA3DBLOCKDESC_UVWQ,
444 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
445 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */
446
447 {SVGA3DBLOCKDESC_UVWQ,
448 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
449 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */
450
451 {SVGA3DBLOCKDESC_RG,
452 {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
453 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */
454
455 {SVGA3DBLOCKDESC_RG,
456 {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
457 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */
458
459 {SVGA3DBLOCKDESC_UV,
460 {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
461 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */
462
463 {SVGA3DBLOCKDESC_RG,
464 {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
465 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */
466
467 {SVGA3DBLOCKDESC_DS,
468 {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
469 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */
470
471 {SVGA3DBLOCKDESC_R_FP,
472 {1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } },
473 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
474
475 {SVGA3DBLOCKDESC_GREEN,
476 {1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } },
477 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */
478
479 {SVGA3DBLOCKDESC_RGBA,
480 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
481 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */
482
483 {SVGA3DBLOCKDESC_RGBA,
484 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
485 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */
486
487 {SVGA3DBLOCKDESC_RGB_FP,
488 {1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } },
489 {{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */
490
491 {SVGA3DBLOCKDESC_RGBA,
492 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
493 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */
494
495 {SVGA3DBLOCKDESC_RGBA,
496 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
497 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */
498
499 {SVGA3DBLOCKDESC_RGBA_SRGB,
500 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
501 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */
502
503 {SVGA3DBLOCKDESC_RGBA,
504 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
505 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */
506
507 {SVGA3DBLOCKDESC_RGBA,
508 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
509 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */
510
511 {SVGA3DBLOCKDESC_RG,
512 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
513 {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */
514
515 {SVGA3DBLOCKDESC_RG_FP,
516 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
517 {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */
518
519 {SVGA3DBLOCKDESC_UV,
520 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
521 {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */
522
523 {SVGA3DBLOCKDESC_RED,
524 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
525 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */
526
527 {SVGA3DBLOCKDESC_DEPTH,
528 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
529 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */
530
531 {SVGA3DBLOCKDESC_RED,
532 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
533 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */
534
535 {SVGA3DBLOCKDESC_RED,
536 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
537 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */
538
539 {SVGA3DBLOCKDESC_RG,
540 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
541 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */
542
543 {SVGA3DBLOCKDESC_DS,
544 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
545 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */
546
547 {SVGA3DBLOCKDESC_RED,
548 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
549 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */
550
551 {SVGA3DBLOCKDESC_GREEN,
552 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } },
553 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */
554
555 {SVGA3DBLOCKDESC_RG,
556 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
557 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */
558
559 {SVGA3DBLOCKDESC_RG,
560 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
561 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */
562
563 {SVGA3DBLOCKDESC_RG,
564 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
565 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */
566
567 {SVGA3DBLOCKDESC_UV,
568 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
569 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */
570
571 {SVGA3DBLOCKDESC_RED,
572 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
573 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */
574
575 {SVGA3DBLOCKDESC_RED,
576 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
577 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */
578
579 {SVGA3DBLOCKDESC_RED,
580 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
581 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */
582
583 {SVGA3DBLOCKDESC_U,
584 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
585 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */
586
587 {SVGA3DBLOCKDESC_U,
588 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
589 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */
590
591 {SVGA3DBLOCKDESC_RED,
592 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
593 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */
594
595 {SVGA3DBLOCKDESC_RED,
596 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
597 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */
598
599 {SVGA3DBLOCKDESC_RED,
600 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
601 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */
602
603 {SVGA3DBLOCKDESC_U,
604 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
605 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */
606
607 {SVGA3DBLOCKDESC_U,
608 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
609 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */
610
611 {SVGA3DBLOCKDESC_RED,
612 {8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
613 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */
614
615 {SVGA3DBLOCKDESC_RGBE,
616 {1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } },
617 {{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */
618
619 {SVGA3DBLOCKDESC_RG,
620 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
621 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */
622
623 {SVGA3DBLOCKDESC_RG,
624 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
625 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */
626
627 {SVGA3DBLOCKDESC_COMPRESSED,
628 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
629 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */
630
631 {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
632 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
633 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */
634
635 {SVGA3DBLOCKDESC_COMPRESSED,
636 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
637 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */
638
639 {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
640 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
641 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */
642
643 {SVGA3DBLOCKDESC_COMPRESSED,
644 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
645 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */
646
647 {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
648 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
649 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */
650
651 {SVGA3DBLOCKDESC_COMPRESSED,
652 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
653 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */
654
655 {SVGA3DBLOCKDESC_COMPRESSED,
656 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
657 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */
658
659 {SVGA3DBLOCKDESC_COMPRESSED,
660 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
661 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */
662
663 {SVGA3DBLOCKDESC_COMPRESSED,
664 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
665 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */
666
667 {SVGA3DBLOCKDESC_COMPRESSED,
668 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
669 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */
670
671 {SVGA3DBLOCKDESC_COMPRESSED,
672 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
673 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */
674
675 {SVGA3DBLOCKDESC_RGBA,
676 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
677 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
678
679 {SVGA3DBLOCKDESC_RGBA,
680 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
681 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */
682
683 {SVGA3DBLOCKDESC_RGBA_SRGB,
684 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
685 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */
686
687 {SVGA3DBLOCKDESC_RGB,
688 {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
689 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */
690
691 {SVGA3DBLOCKDESC_RGB_SRGB,
692 {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
693 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */
694
695 {SVGA3DBLOCKDESC_DEPTH,
696 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
697 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */
698
699 {SVGA3DBLOCKDESC_DS,
700 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
701 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */
702
703 {SVGA3DBLOCKDESC_DS,
704 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
705 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */
706};
707
708static inline u32 clamped_umul32(u32 a, u32 b)
709{
710 u64 tmp = (u64) a*b;
711 return (tmp > (u64) U32_MAX) ? U32_MAX : tmp;
712}
713
714static inline const struct svga3d_surface_desc *
715svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
716{
717 if (format < ARRAY_SIZE(svga3d_surface_descs))
718 return &svga3d_surface_descs[format];
719
720 return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
721}
722
723/*
724 *----------------------------------------------------------------------
725 *
726 * svga3dsurface_get_mip_size --
727 *
728 * Given a base level size and the mip level, compute the size of
729 * the mip level.
730 *
731 * Results:
732 * See above.
733 *
734 * Side effects:
735 * None.
736 *
737 *----------------------------------------------------------------------
738 */
739
740static inline surf_size_struct
741svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
742{
743 surf_size_struct size;
744
745 size.width = max_t(u32, base_level.width >> mip_level, 1);
746 size.height = max_t(u32, base_level.height >> mip_level, 1);
747 size.depth = max_t(u32, base_level.depth >> mip_level, 1);
748 return size;
749}
750
751static inline void
752svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
753 const surf_size_struct *pixel_size,
754 surf_size_struct *block_size)
755{
756 block_size->width = DIV_ROUND_UP(pixel_size->width,
757 desc->block_size.width);
758 block_size->height = DIV_ROUND_UP(pixel_size->height,
759 desc->block_size.height);
760 block_size->depth = DIV_ROUND_UP(pixel_size->depth,
761 desc->block_size.depth);
762}
763
764static inline bool
765svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
766{
767 return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
768}
769
770static inline u32
771svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
772 const surf_size_struct *size)
773{
774 u32 pitch;
775 surf_size_struct blocks;
776
777 svga3dsurface_get_size_in_blocks(desc, size, &blocks);
778
779 pitch = blocks.width * desc->pitch_bytes_per_block;
780
781 return pitch;
782}
783
784/*
785 *-----------------------------------------------------------------------------
786 *
787 * svga3dsurface_get_image_buffer_size --
788 *
789 * Return the number of bytes of buffer space required to store
790 * one image of a surface, optionally using the specified pitch.
791 *
792 * If pitch is zero, it is assumed that rows are tightly packed.
793 *
794 * This function is overflow-safe. If the result would have
795 * overflowed, instead we return MAX_UINT32.
796 *
797 * Results:
798 * Byte count.
799 *
800 * Side effects:
801 * None.
802 *
803 *-----------------------------------------------------------------------------
804 */
805
806static inline u32
807svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
808 const surf_size_struct *size,
809 u32 pitch)
810{
811 surf_size_struct image_blocks;
812 u32 slice_size, total_size;
813
814 svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
815
816 if (svga3dsurface_is_planar_surface(desc)) {
817 total_size = clamped_umul32(image_blocks.width,
818 image_blocks.height);
819 total_size = clamped_umul32(total_size, image_blocks.depth);
820 total_size = clamped_umul32(total_size, desc->bytes_per_block);
821 return total_size;
822 }
823
824 if (pitch == 0)
825 pitch = svga3dsurface_calculate_pitch(desc, size);
826
827 slice_size = clamped_umul32(image_blocks.height, pitch);
828 total_size = clamped_umul32(slice_size, image_blocks.depth);
829
830 return total_size;
831}
832
833static inline u32
834svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
835 surf_size_struct base_level_size,
836 u32 num_mip_levels,
837 bool cubemap)
838{
839 const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
840 u64 total_size = 0;
841 u32 mip;
842
843 for (mip = 0; mip < num_mip_levels; mip++) {
844 surf_size_struct size =
845 svga3dsurface_get_mip_size(base_level_size, mip);
846 total_size += svga3dsurface_get_image_buffer_size(desc,
847 &size, 0);
848 }
849
850 if (cubemap)
851 total_size *= SVGA3D_MAX_SURFACE_FACES;
852
853 return (u32) min_t(u64, total_size, (u64) U32_MAX);
854}
855
856
857/**
858 * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
859 * in an image (or volume).
860 *
861 * @width: The image width in pixels.
862 * @height: The image height in pixels
863 */
864static inline u32
865svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
866 u32 width, u32 height,
867 u32 x, u32 y, u32 z)
868{
869 const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
870 const u32 bw = desc->block_size.width, bh = desc->block_size.height;
871 const u32 bd = desc->block_size.depth;
872 const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
873 const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
874 const u32 offset = (z / bd * imgstride +
875 y / bh * rowstride +
876 x / bw * desc->bytes_per_block);
877 return offset;
878}
879
880
881static inline u32
882svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
883 surf_size_struct baseLevelSize,
884 u32 numMipLevels,
885 u32 face,
886 u32 mip)
887
888{
889 u32 offset;
890 u32 mipChainBytes;
891 u32 mipChainBytesToLevel;
892 u32 i;
893 const struct svga3d_surface_desc *desc;
894 surf_size_struct mipSize;
895 u32 bytes;
896
897 desc = svga3dsurface_get_desc(format);
898
899 mipChainBytes = 0;
900 mipChainBytesToLevel = 0;
901 for (i = 0; i < numMipLevels; i++) {
902 mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
903 bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
904 mipChainBytes += bytes;
905 if (i < mip)
906 mipChainBytesToLevel += bytes;
907 }
908
909 offset = mipChainBytes * face + mipChainBytesToLevel;
910
911 return offset;
912}
diff --git a/drivers/gpu/drm/vmwgfx/svga_types.h b/drivers/gpu/drm/vmwgfx/svga_types.h
deleted file mode 100644
index 55836dedcfc2..000000000000
--- a/drivers/gpu/drm/vmwgfx/svga_types.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/**
29 * Silly typedefs for the svga headers. Currently the headers are shared
30 * between all components that talk to svga. And as such the headers are
31 * are in a completely different style and use weird defines.
32 *
33 * This file lets all the ugly be prefixed with svga*.
34 */
35
36#ifndef _SVGA_TYPES_H_
37#define _SVGA_TYPES_H_
38
39typedef uint16_t uint16;
40typedef uint32_t uint32;
41typedef uint8_t uint8;
42typedef int32_t int32;
43typedef bool Bool;
44
45#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
new file mode 100644
index 000000000000..9c42e96da510
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -0,0 +1,1294 @@
1/**************************************************************************
2 *
3 * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * This file implements the vmwgfx context binding manager,
29 * The sole reason for having to use this code is that vmware guest
30 * backed contexts can be swapped out to their backing mobs by the device
31 * at any time, also swapped in at any time. At swapin time, the device
32 * validates the context bindings to make sure they point to valid resources.
33 * It's this outside-of-drawcall validation (that can happen at any time),
34 * that makes this code necessary.
35 *
36 * We therefore need to kill any context bindings pointing to a resource
37 * when the resource is swapped out. Furthermore, if the vmwgfx driver has
38 * swapped out the context we can't swap it in again to kill bindings because
39 * of backing mob reservation lockdep violations, so as part of
40 * context swapout, also kill all bindings of a context, so that they are
41 * already killed if a resource to which a binding points
42 * needs to be swapped out.
43 *
44 * Note that a resource can be pointed to by bindings from multiple contexts,
45 * Therefore we can't easily protect this data by a per context mutex
46 * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
47 * to protect all binding manager data.
48 *
49 * Finally, any association between a context and a global resource
50 * (surface, shader or even DX query) is conceptually a context binding that
51 * needs to be tracked by this code.
52 */
53
54#include "vmwgfx_drv.h"
55#include "vmwgfx_binding.h"
56#include "device_include/svga3d_reg.h"
57
58#define VMW_BINDING_RT_BIT 0
59#define VMW_BINDING_PS_BIT 1
60#define VMW_BINDING_SO_BIT 2
61#define VMW_BINDING_VB_BIT 3
62#define VMW_BINDING_NUM_BITS 4
63
64#define VMW_BINDING_PS_SR_BIT 0
65
66/**
67 * struct vmw_ctx_binding_state - per context binding state
68 *
69 * @dev_priv: Pointer to device private structure.
70 * @list: linked list of individual active bindings.
71 * @render_targets: Render target bindings.
72 * @texture_units: Texture units bindings.
73 * @ds_view: Depth-stencil view binding.
74 * @so_targets: StreamOutput target bindings.
75 * @vertex_buffers: Vertex buffer bindings.
76 * @index_buffer: Index buffer binding.
77 * @per_shader: Per shader-type bindings.
78 * @dirty: Bitmap tracking per binding-type changes that have not yet
79 * been emitted to the device.
80 * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
81 * have not yet been emitted to the device.
82 * @bind_cmd_buffer: Scratch space used to construct binding commands.
83 * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
84 * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
85 * device binding slot of the first command data entry in @bind_cmd_buffer.
86 *
87 * Note that this structure also provides storage space for the individual
88 * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
89 * for individual bindings.
90 *
91 */
92struct vmw_ctx_binding_state {
93 struct vmw_private *dev_priv;
94 struct list_head list;
95 struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
96 struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
97 struct vmw_ctx_bindinfo_view ds_view;
98 struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS];
99 struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
100 struct vmw_ctx_bindinfo_ib index_buffer;
101 struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10];
102
103 unsigned long dirty;
104 DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
105
106 u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
107 u32 bind_cmd_count;
108 u32 bind_first_slot;
109};
110
111static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
112static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
113 bool rebind);
114static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
115static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
116static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
117static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
118static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
119static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
120static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
121 bool rebind);
122static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
123static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
124static void vmw_binding_build_asserts(void) __attribute__ ((unused));
125
126typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
127
128/**
129 * struct vmw_binding_info - Per binding type information for the binding
130 * manager
131 *
132 * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
133 * @offsets: array[shader_slot] of offsets to the array[slot]
134 * of struct bindings for the binding type.
135 * @scrub_func: Pointer to the scrub function for this binding type.
136 *
137 * Holds static information to help optimize the binding manager and avoid
138 * an excessive amount of switch statements.
139 */
140struct vmw_binding_info {
141 size_t size;
142 const size_t *offsets;
143 vmw_scrub_func scrub_func;
144};
145
146/*
147 * A number of static variables that help determine the scrub func and the
148 * location of the struct vmw_ctx_bindinfo slots for each binding type.
149 */
150static const size_t vmw_binding_shader_offsets[] = {
151 offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
152 offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
153 offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
154};
155static const size_t vmw_binding_rt_offsets[] = {
156 offsetof(struct vmw_ctx_binding_state, render_targets),
157};
158static const size_t vmw_binding_tex_offsets[] = {
159 offsetof(struct vmw_ctx_binding_state, texture_units),
160};
161static const size_t vmw_binding_cb_offsets[] = {
162 offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
163 offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
164 offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
165};
166static const size_t vmw_binding_dx_ds_offsets[] = {
167 offsetof(struct vmw_ctx_binding_state, ds_view),
168};
169static const size_t vmw_binding_sr_offsets[] = {
170 offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
171 offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
172 offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
173};
174static const size_t vmw_binding_so_offsets[] = {
175 offsetof(struct vmw_ctx_binding_state, so_targets),
176};
177static const size_t vmw_binding_vb_offsets[] = {
178 offsetof(struct vmw_ctx_binding_state, vertex_buffers),
179};
180static const size_t vmw_binding_ib_offsets[] = {
181 offsetof(struct vmw_ctx_binding_state, index_buffer),
182};
183
184static const struct vmw_binding_info vmw_binding_infos[] = {
185 [vmw_ctx_binding_shader] = {
186 .size = sizeof(struct vmw_ctx_bindinfo_shader),
187 .offsets = vmw_binding_shader_offsets,
188 .scrub_func = vmw_binding_scrub_shader},
189 [vmw_ctx_binding_rt] = {
190 .size = sizeof(struct vmw_ctx_bindinfo_view),
191 .offsets = vmw_binding_rt_offsets,
192 .scrub_func = vmw_binding_scrub_render_target},
193 [vmw_ctx_binding_tex] = {
194 .size = sizeof(struct vmw_ctx_bindinfo_tex),
195 .offsets = vmw_binding_tex_offsets,
196 .scrub_func = vmw_binding_scrub_texture},
197 [vmw_ctx_binding_cb] = {
198 .size = sizeof(struct vmw_ctx_bindinfo_cb),
199 .offsets = vmw_binding_cb_offsets,
200 .scrub_func = vmw_binding_scrub_cb},
201 [vmw_ctx_binding_dx_shader] = {
202 .size = sizeof(struct vmw_ctx_bindinfo_shader),
203 .offsets = vmw_binding_shader_offsets,
204 .scrub_func = vmw_binding_scrub_dx_shader},
205 [vmw_ctx_binding_dx_rt] = {
206 .size = sizeof(struct vmw_ctx_bindinfo_view),
207 .offsets = vmw_binding_rt_offsets,
208 .scrub_func = vmw_binding_scrub_dx_rt},
209 [vmw_ctx_binding_sr] = {
210 .size = sizeof(struct vmw_ctx_bindinfo_view),
211 .offsets = vmw_binding_sr_offsets,
212 .scrub_func = vmw_binding_scrub_sr},
213 [vmw_ctx_binding_ds] = {
214 .size = sizeof(struct vmw_ctx_bindinfo_view),
215 .offsets = vmw_binding_dx_ds_offsets,
216 .scrub_func = vmw_binding_scrub_dx_rt},
217 [vmw_ctx_binding_so] = {
218 .size = sizeof(struct vmw_ctx_bindinfo_so),
219 .offsets = vmw_binding_so_offsets,
220 .scrub_func = vmw_binding_scrub_so},
221 [vmw_ctx_binding_vb] = {
222 .size = sizeof(struct vmw_ctx_bindinfo_vb),
223 .offsets = vmw_binding_vb_offsets,
224 .scrub_func = vmw_binding_scrub_vb},
225 [vmw_ctx_binding_ib] = {
226 .size = sizeof(struct vmw_ctx_bindinfo_ib),
227 .offsets = vmw_binding_ib_offsets,
228 .scrub_func = vmw_binding_scrub_ib},
229};
230
231/**
232 * vmw_cbs_context - Return a pointer to the context resource of a
233 * context binding state tracker.
234 *
235 * @cbs: The context binding state tracker.
236 *
237 * Provided there are any active bindings, this function will return an
238 * unreferenced pointer to the context resource that owns the context
239 * binding state tracker. If there are no active bindings, this function
240 * will return NULL. Note that the caller must somehow ensure that a reference
241 * is held on the context resource prior to calling this function.
242 */
243static const struct vmw_resource *
244vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
245{
246 if (list_empty(&cbs->list))
247 return NULL;
248
249 return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
250 ctx_list)->ctx;
251}
252
253/**
254 * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
255 *
256 * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
257 * @bt: The binding type.
258 * @shader_slot: The shader slot of the binding. If none, then set to 0.
259 * @slot: The slot of the binding.
260 */
261static struct vmw_ctx_bindinfo *
262vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
263 enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
264{
265 const struct vmw_binding_info *b = &vmw_binding_infos[bt];
266 size_t offset = b->offsets[shader_slot] + b->size*slot;
267
268 return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
269}
270
271/**
272 * vmw_binding_drop: Stop tracking a context binding
273 *
274 * @bi: Pointer to binding tracker storage.
275 *
276 * Stops tracking a context binding, and re-initializes its storage.
277 * Typically used when the context binding is replaced with a binding to
278 * another (or the same, for that matter) resource.
279 */
280static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
281{
282 list_del(&bi->ctx_list);
283 if (!list_empty(&bi->res_list))
284 list_del(&bi->res_list);
285 bi->ctx = NULL;
286}
287
288/**
289 * vmw_binding_add: Start tracking a context binding
290 *
291 * @cbs: Pointer to the context binding state tracker.
292 * @bi: Information about the binding to track.
293 *
294 * Starts tracking the binding in the context binding
295 * state structure @cbs.
296 */
297void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
298 const struct vmw_ctx_bindinfo *bi,
299 u32 shader_slot, u32 slot)
300{
301 struct vmw_ctx_bindinfo *loc =
302 vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
303 const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
304
305 if (loc->ctx != NULL)
306 vmw_binding_drop(loc);
307
308 memcpy(loc, bi, b->size);
309 loc->scrubbed = false;
310 list_add(&loc->ctx_list, &cbs->list);
311 INIT_LIST_HEAD(&loc->res_list);
312}
313
314/**
315 * vmw_binding_transfer: Transfer a context binding tracking entry.
316 *
317 * @cbs: Pointer to the persistent context binding state tracker.
318 * @bi: Information about the binding to track.
319 *
320 */
321static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
322 const struct vmw_ctx_binding_state *from,
323 const struct vmw_ctx_bindinfo *bi)
324{
325 size_t offset = (unsigned long)bi - (unsigned long)from;
326 struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
327 ((unsigned long) cbs + offset);
328
329 if (loc->ctx != NULL) {
330 WARN_ON(bi->scrubbed);
331
332 vmw_binding_drop(loc);
333 }
334
335 if (bi->res != NULL) {
336 memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
337 list_add_tail(&loc->ctx_list, &cbs->list);
338 list_add_tail(&loc->res_list, &loc->res->binding_head);
339 }
340}
341
342/**
343 * vmw_binding_state_kill - Kill all bindings associated with a
344 * struct vmw_ctx_binding state structure, and re-initialize the structure.
345 *
346 * @cbs: Pointer to the context binding state tracker.
347 *
348 * Emits commands to scrub all bindings associated with the
349 * context binding state tracker. Then re-initializes the whole structure.
350 */
351void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
352{
353 struct vmw_ctx_bindinfo *entry, *next;
354
355 vmw_binding_state_scrub(cbs);
356 list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
357 vmw_binding_drop(entry);
358}
359
360/**
361 * vmw_binding_state_scrub - Scrub all bindings associated with a
362 * struct vmw_ctx_binding state structure.
363 *
364 * @cbs: Pointer to the context binding state tracker.
365 *
366 * Emits commands to scrub all bindings associated with the
367 * context binding state tracker.
368 */
369void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
370{
371 struct vmw_ctx_bindinfo *entry;
372
373 list_for_each_entry(entry, &cbs->list, ctx_list) {
374 if (!entry->scrubbed) {
375 (void) vmw_binding_infos[entry->bt].scrub_func
376 (entry, false);
377 entry->scrubbed = true;
378 }
379 }
380
381 (void) vmw_binding_emit_dirty(cbs);
382}
383
384/**
385 * vmw_binding_res_list_kill - Kill all bindings on a
386 * resource binding list
387 *
388 * @head: list head of resource binding list
389 *
390 * Kills all bindings associated with a specific resource. Typically
391 * called before the resource is destroyed.
392 */
393void vmw_binding_res_list_kill(struct list_head *head)
394{
395 struct vmw_ctx_bindinfo *entry, *next;
396
397 vmw_binding_res_list_scrub(head);
398 list_for_each_entry_safe(entry, next, head, res_list)
399 vmw_binding_drop(entry);
400}
401
402/**
403 * vmw_binding_res_list_scrub - Scrub all bindings on a
404 * resource binding list
405 *
406 * @head: list head of resource binding list
407 *
408 * Scrub all bindings associated with a specific resource. Typically
409 * called before the resource is evicted.
410 */
411void vmw_binding_res_list_scrub(struct list_head *head)
412{
413 struct vmw_ctx_bindinfo *entry;
414
415 list_for_each_entry(entry, head, res_list) {
416 if (!entry->scrubbed) {
417 (void) vmw_binding_infos[entry->bt].scrub_func
418 (entry, false);
419 entry->scrubbed = true;
420 }
421 }
422
423 list_for_each_entry(entry, head, res_list) {
424 struct vmw_ctx_binding_state *cbs =
425 vmw_context_binding_state(entry->ctx);
426
427 (void) vmw_binding_emit_dirty(cbs);
428 }
429}
430
431
432/**
433 * vmw_binding_state_commit - Commit staged binding info
434 *
435 * @ctx: Pointer to context to commit the staged binding info to.
436 * @from: Staged binding info built during execbuf.
437 * @scrubbed: Transfer only scrubbed bindings.
438 *
439 * Transfers binding info from a temporary structure
440 * (typically used by execbuf) to the persistent
441 * structure in the context. This can be done once commands have been
442 * submitted to hardware
443 */
444void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
445 struct vmw_ctx_binding_state *from)
446{
447 struct vmw_ctx_bindinfo *entry, *next;
448
449 list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
450 vmw_binding_transfer(to, from, entry);
451 vmw_binding_drop(entry);
452 }
453}
454
455/**
456 * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
457 *
458 * @ctx: The context resource
459 *
460 * Walks through the context binding list and rebinds all scrubbed
461 * resources.
462 */
463int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
464{
465 struct vmw_ctx_bindinfo *entry;
466 int ret;
467
468 list_for_each_entry(entry, &cbs->list, ctx_list) {
469 if (likely(!entry->scrubbed))
470 continue;
471
472 if ((entry->res == NULL || entry->res->id ==
473 SVGA3D_INVALID_ID))
474 continue;
475
476 ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
477 if (unlikely(ret != 0))
478 return ret;
479
480 entry->scrubbed = false;
481 }
482
483 return vmw_binding_emit_dirty(cbs);
484}
485
486/**
487 * vmw_binding_scrub_shader - scrub a shader binding from a context.
488 *
489 * @bi: single binding information.
490 * @rebind: Whether to issue a bind instead of scrub command.
491 */
492static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
493{
494 struct vmw_ctx_bindinfo_shader *binding =
495 container_of(bi, typeof(*binding), bi);
496 struct vmw_private *dev_priv = bi->ctx->dev_priv;
497 struct {
498 SVGA3dCmdHeader header;
499 SVGA3dCmdSetShader body;
500 } *cmd;
501
502 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
503 if (unlikely(cmd == NULL)) {
504 DRM_ERROR("Failed reserving FIFO space for shader "
505 "unbinding.\n");
506 return -ENOMEM;
507 }
508
509 cmd->header.id = SVGA_3D_CMD_SET_SHADER;
510 cmd->header.size = sizeof(cmd->body);
511 cmd->body.cid = bi->ctx->id;
512 cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
513 cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
514 vmw_fifo_commit(dev_priv, sizeof(*cmd));
515
516 return 0;
517}
518
519/**
520 * vmw_binding_scrub_render_target - scrub a render target binding
521 * from a context.
522 *
523 * @bi: single binding information.
524 * @rebind: Whether to issue a bind instead of scrub command.
525 */
526static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
527 bool rebind)
528{
529 struct vmw_ctx_bindinfo_view *binding =
530 container_of(bi, typeof(*binding), bi);
531 struct vmw_private *dev_priv = bi->ctx->dev_priv;
532 struct {
533 SVGA3dCmdHeader header;
534 SVGA3dCmdSetRenderTarget body;
535 } *cmd;
536
537 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
538 if (unlikely(cmd == NULL)) {
539 DRM_ERROR("Failed reserving FIFO space for render target "
540 "unbinding.\n");
541 return -ENOMEM;
542 }
543
544 cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
545 cmd->header.size = sizeof(cmd->body);
546 cmd->body.cid = bi->ctx->id;
547 cmd->body.type = binding->slot;
548 cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
549 cmd->body.target.face = 0;
550 cmd->body.target.mipmap = 0;
551 vmw_fifo_commit(dev_priv, sizeof(*cmd));
552
553 return 0;
554}
555
556/**
557 * vmw_binding_scrub_texture - scrub a texture binding from a context.
558 *
559 * @bi: single binding information.
560 * @rebind: Whether to issue a bind instead of scrub command.
561 *
562 * TODO: Possibly complement this function with a function that takes
563 * a list of texture bindings and combines them to a single command.
564 */
565static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
566 bool rebind)
567{
568 struct vmw_ctx_bindinfo_tex *binding =
569 container_of(bi, typeof(*binding), bi);
570 struct vmw_private *dev_priv = bi->ctx->dev_priv;
571 struct {
572 SVGA3dCmdHeader header;
573 struct {
574 SVGA3dCmdSetTextureState c;
575 SVGA3dTextureState s1;
576 } body;
577 } *cmd;
578
579 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
580 if (unlikely(cmd == NULL)) {
581 DRM_ERROR("Failed reserving FIFO space for texture "
582 "unbinding.\n");
583 return -ENOMEM;
584 }
585
586 cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
587 cmd->header.size = sizeof(cmd->body);
588 cmd->body.c.cid = bi->ctx->id;
589 cmd->body.s1.stage = binding->texture_stage;
590 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
591 cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
592 vmw_fifo_commit(dev_priv, sizeof(*cmd));
593
594 return 0;
595}
596
597/**
598 * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
599 *
600 * @bi: single binding information.
601 * @rebind: Whether to issue a bind instead of scrub command.
602 */
603static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
604{
605 struct vmw_ctx_bindinfo_shader *binding =
606 container_of(bi, typeof(*binding), bi);
607 struct vmw_private *dev_priv = bi->ctx->dev_priv;
608 struct {
609 SVGA3dCmdHeader header;
610 SVGA3dCmdDXSetShader body;
611 } *cmd;
612
613 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
614 if (unlikely(cmd == NULL)) {
615 DRM_ERROR("Failed reserving FIFO space for DX shader "
616 "unbinding.\n");
617 return -ENOMEM;
618 }
619 cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
620 cmd->header.size = sizeof(cmd->body);
621 cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
622 cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
623 vmw_fifo_commit(dev_priv, sizeof(*cmd));
624
625 return 0;
626}
627
628/**
629 * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
630 *
631 * @bi: single binding information.
632 * @rebind: Whether to issue a bind instead of scrub command.
633 */
634static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
635{
636 struct vmw_ctx_bindinfo_cb *binding =
637 container_of(bi, typeof(*binding), bi);
638 struct vmw_private *dev_priv = bi->ctx->dev_priv;
639 struct {
640 SVGA3dCmdHeader header;
641 SVGA3dCmdDXSetSingleConstantBuffer body;
642 } *cmd;
643
644 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
645 if (unlikely(cmd == NULL)) {
646 DRM_ERROR("Failed reserving FIFO space for DX shader "
647 "unbinding.\n");
648 return -ENOMEM;
649 }
650
651 cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
652 cmd->header.size = sizeof(cmd->body);
653 cmd->body.slot = binding->slot;
654 cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
655 if (rebind) {
656 cmd->body.offsetInBytes = binding->offset;
657 cmd->body.sizeInBytes = binding->size;
658 cmd->body.sid = bi->res->id;
659 } else {
660 cmd->body.offsetInBytes = 0;
661 cmd->body.sizeInBytes = 0;
662 cmd->body.sid = SVGA3D_INVALID_ID;
663 }
664 vmw_fifo_commit(dev_priv, sizeof(*cmd));
665
666 return 0;
667}
668
669/**
670 * vmw_collect_view_ids - Build view id data for a view binding command
671 * without checking which bindings actually need to be emitted
672 *
673 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
674 * @bi: Pointer to where the binding info array is stored in @cbs
675 * @max_num: Maximum number of entries in the @bi array.
676 *
677 * Scans the @bi array for bindings and builds a buffer of view id data.
678 * Stops at the first non-existing binding in the @bi array.
679 * On output, @cbs->bind_cmd_count contains the number of bindings to be
680 * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
681 * contains the command data.
682 */
683static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
684 const struct vmw_ctx_bindinfo *bi,
685 u32 max_num)
686{
687 const struct vmw_ctx_bindinfo_view *biv =
688 container_of(bi, struct vmw_ctx_bindinfo_view, bi);
689 unsigned long i;
690
691 cbs->bind_cmd_count = 0;
692 cbs->bind_first_slot = 0;
693
694 for (i = 0; i < max_num; ++i, ++biv) {
695 if (!biv->bi.ctx)
696 break;
697
698 cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
699 ((biv->bi.scrubbed) ?
700 SVGA3D_INVALID_ID : biv->bi.res->id);
701 }
702}
703
704/**
705 * vmw_collect_dirty_view_ids - Build view id data for a view binding command
706 *
707 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
708 * @bi: Pointer to where the binding info array is stored in @cbs
709 * @dirty: Bitmap indicating which bindings need to be emitted.
710 * @max_num: Maximum number of entries in the @bi array.
711 *
712 * Scans the @bi array for bindings that need to be emitted and
713 * builds a buffer of view id data.
714 * On output, @cbs->bind_cmd_count contains the number of bindings to be
715 * emitted, @cbs->bind_first_slot indicates the index of the first emitted
716 * binding, and @cbs->bind_cmd_buffer contains the command data.
717 */
718static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
719 const struct vmw_ctx_bindinfo *bi,
720 unsigned long *dirty,
721 u32 max_num)
722{
723 const struct vmw_ctx_bindinfo_view *biv =
724 container_of(bi, struct vmw_ctx_bindinfo_view, bi);
725 unsigned long i, next_bit;
726
727 cbs->bind_cmd_count = 0;
728 i = find_first_bit(dirty, max_num);
729 next_bit = i;
730 cbs->bind_first_slot = i;
731
732 biv += i;
733 for (; i < max_num; ++i, ++biv) {
734 cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
735 ((!biv->bi.ctx || biv->bi.scrubbed) ?
736 SVGA3D_INVALID_ID : biv->bi.res->id);
737
738 if (next_bit == i) {
739 next_bit = find_next_bit(dirty, max_num, i + 1);
740 if (next_bit >= max_num)
741 break;
742 }
743 }
744}
745
746/**
747 * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
748 *
749 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
750 */
751static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
752 int shader_slot)
753{
754 const struct vmw_ctx_bindinfo *loc =
755 &cbs->per_shader[shader_slot].shader_res[0].bi;
756 struct {
757 SVGA3dCmdHeader header;
758 SVGA3dCmdDXSetShaderResources body;
759 } *cmd;
760 size_t cmd_size, view_id_size;
761 const struct vmw_resource *ctx = vmw_cbs_context(cbs);
762
763 vmw_collect_dirty_view_ids(cbs, loc,
764 cbs->per_shader[shader_slot].dirty_sr,
765 SVGA3D_DX_MAX_SRVIEWS);
766 if (cbs->bind_cmd_count == 0)
767 return 0;
768
769 view_id_size = cbs->bind_cmd_count*sizeof(uint32);
770 cmd_size = sizeof(*cmd) + view_id_size;
771 cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
772 if (unlikely(cmd == NULL)) {
773 DRM_ERROR("Failed reserving FIFO space for DX shader"
774 " resource binding.\n");
775 return -ENOMEM;
776 }
777
778 cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
779 cmd->header.size = sizeof(cmd->body) + view_id_size;
780 cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
781 cmd->body.startView = cbs->bind_first_slot;
782
783 memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
784
785 vmw_fifo_commit(ctx->dev_priv, cmd_size);
786 bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
787 cbs->bind_first_slot, cbs->bind_cmd_count);
788
789 return 0;
790}
791
792/**
793 * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
794 *
795 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
796 */
797static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
798{
799 const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
800 struct {
801 SVGA3dCmdHeader header;
802 SVGA3dCmdDXSetRenderTargets body;
803 } *cmd;
804 size_t cmd_size, view_id_size;
805 const struct vmw_resource *ctx = vmw_cbs_context(cbs);
806
807 vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
808 view_id_size = cbs->bind_cmd_count*sizeof(uint32);
809 cmd_size = sizeof(*cmd) + view_id_size;
810 cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
811 if (unlikely(cmd == NULL)) {
812 DRM_ERROR("Failed reserving FIFO space for DX render-target"
813 " binding.\n");
814 return -ENOMEM;
815 }
816
817 cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
818 cmd->header.size = sizeof(cmd->body) + view_id_size;
819
820 if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
821 cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
822 else
823 cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
824
825 memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
826
827 vmw_fifo_commit(ctx->dev_priv, cmd_size);
828
829 return 0;
830
831}
832
833/**
834 * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
835 * without checking which bindings actually need to be emitted
836 *
837 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
838 * @bi: Pointer to where the binding info array is stored in @cbs
839 * @max_num: Maximum number of entries in the @bi array.
840 *
841 * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
842 * Stops at the first non-existing binding in the @bi array.
843 * On output, @cbs->bind_cmd_count contains the number of bindings to be
844 * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
845 * contains the command data.
846 */
847static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
848 const struct vmw_ctx_bindinfo *bi,
849 u32 max_num)
850{
851 const struct vmw_ctx_bindinfo_so *biso =
852 container_of(bi, struct vmw_ctx_bindinfo_so, bi);
853 unsigned long i;
854 SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
855
856 cbs->bind_cmd_count = 0;
857 cbs->bind_first_slot = 0;
858
859 for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
860 ++cbs->bind_cmd_count) {
861 if (!biso->bi.ctx)
862 break;
863
864 if (!biso->bi.scrubbed) {
865 so_buffer->sid = biso->bi.res->id;
866 so_buffer->offset = biso->offset;
867 so_buffer->sizeInBytes = biso->size;
868 } else {
869 so_buffer->sid = SVGA3D_INVALID_ID;
870 so_buffer->offset = 0;
871 so_buffer->sizeInBytes = 0;
872 }
873 }
874}
875
876/**
877 * vmw_binding_emit_set_so - Issue delayed streamout binding commands
878 *
879 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
880 */
881static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
882{
883 const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
884 struct {
885 SVGA3dCmdHeader header;
886 SVGA3dCmdDXSetSOTargets body;
887 } *cmd;
888 size_t cmd_size, so_target_size;
889 const struct vmw_resource *ctx = vmw_cbs_context(cbs);
890
891 vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
892 if (cbs->bind_cmd_count == 0)
893 return 0;
894
895 so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
896 cmd_size = sizeof(*cmd) + so_target_size;
897 cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
898 if (unlikely(cmd == NULL)) {
899 DRM_ERROR("Failed reserving FIFO space for DX SO target"
900 " binding.\n");
901 return -ENOMEM;
902 }
903
904 cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
905 cmd->header.size = sizeof(cmd->body) + so_target_size;
906 memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
907
908 vmw_fifo_commit(ctx->dev_priv, cmd_size);
909
910 return 0;
911
912}
913
914/**
915 * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
916 *
917 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
918 *
919 */
920static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
921{
922 struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
923 u32 i;
924 int ret;
925
926 for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
927 if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
928 continue;
929
930 ret = vmw_emit_set_sr(cbs, i);
931 if (ret)
932 break;
933
934 __clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
935 }
936
937 return 0;
938}
939
940/**
941 * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
942 * SVGA3dCmdDXSetVertexBuffers command
943 *
944 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
945 * @bi: Pointer to where the binding info array is stored in @cbs
946 * @dirty: Bitmap indicating which bindings need to be emitted.
947 * @max_num: Maximum number of entries in the @bi array.
948 *
949 * Scans the @bi array for bindings that need to be emitted and
950 * builds a buffer of SVGA3dVertexBuffer data.
951 * On output, @cbs->bind_cmd_count contains the number of bindings to be
952 * emitted, @cbs->bind_first_slot indicates the index of the first emitted
953 * binding, and @cbs->bind_cmd_buffer contains the command data.
954 */
955static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
956 const struct vmw_ctx_bindinfo *bi,
957 unsigned long *dirty,
958 u32 max_num)
959{
960 const struct vmw_ctx_bindinfo_vb *biv =
961 container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
962 unsigned long i, next_bit;
963 SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
964
965 cbs->bind_cmd_count = 0;
966 i = find_first_bit(dirty, max_num);
967 next_bit = i;
968 cbs->bind_first_slot = i;
969
970 biv += i;
971 for (; i < max_num; ++i, ++biv, ++vbs) {
972 if (!biv->bi.ctx || biv->bi.scrubbed) {
973 vbs->sid = SVGA3D_INVALID_ID;
974 vbs->stride = 0;
975 vbs->offset = 0;
976 } else {
977 vbs->sid = biv->bi.res->id;
978 vbs->stride = biv->stride;
979 vbs->offset = biv->offset;
980 }
981 cbs->bind_cmd_count++;
982 if (next_bit == i) {
983 next_bit = find_next_bit(dirty, max_num, i + 1);
984 if (next_bit >= max_num)
985 break;
986 }
987 }
988}
989
990/**
991 * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
992 *
993 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
994 *
995 */
996static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
997{
998 const struct vmw_ctx_bindinfo *loc =
999 &cbs->vertex_buffers[0].bi;
1000 struct {
1001 SVGA3dCmdHeader header;
1002 SVGA3dCmdDXSetVertexBuffers body;
1003 } *cmd;
1004 size_t cmd_size, set_vb_size;
1005 const struct vmw_resource *ctx = vmw_cbs_context(cbs);
1006
1007 vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
1008 SVGA3D_DX_MAX_VERTEXBUFFERS);
1009 if (cbs->bind_cmd_count == 0)
1010 return 0;
1011
1012 set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
1013 cmd_size = sizeof(*cmd) + set_vb_size;
1014 cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
1015 if (unlikely(cmd == NULL)) {
1016 DRM_ERROR("Failed reserving FIFO space for DX vertex buffer"
1017 " binding.\n");
1018 return -ENOMEM;
1019 }
1020
1021 cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
1022 cmd->header.size = sizeof(cmd->body) + set_vb_size;
1023 cmd->body.startBuffer = cbs->bind_first_slot;
1024
1025 memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
1026
1027 vmw_fifo_commit(ctx->dev_priv, cmd_size);
1028 bitmap_clear(cbs->dirty_vb,
1029 cbs->bind_first_slot, cbs->bind_cmd_count);
1030
1031 return 0;
1032}
1033
1034/**
1035 * vmw_binding_emit_dirty - Issue delayed binding commands
1036 *
1037 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
1038 *
1039 * This function issues the delayed binding commands that arise from
1040 * previous scrub / unscrub calls. These binding commands are typically
1041 * commands that batch a number of bindings and therefore it makes sense
1042 * to delay them.
1043 */
1044static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
1045{
1046 int ret = 0;
1047 unsigned long hit = 0;
1048
1049 while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
1050 < VMW_BINDING_NUM_BITS) {
1051
1052 switch (hit) {
1053 case VMW_BINDING_RT_BIT:
1054 ret = vmw_emit_set_rt(cbs);
1055 break;
1056 case VMW_BINDING_PS_BIT:
1057 ret = vmw_binding_emit_dirty_ps(cbs);
1058 break;
1059 case VMW_BINDING_SO_BIT:
1060 ret = vmw_emit_set_so(cbs);
1061 break;
1062 case VMW_BINDING_VB_BIT:
1063 ret = vmw_emit_set_vb(cbs);
1064 break;
1065 default:
1066 BUG();
1067 }
1068 if (ret)
1069 return ret;
1070
1071 __clear_bit(hit, &cbs->dirty);
1072 hit++;
1073 }
1074
1075 return 0;
1076}
1077
1078/**
1079 * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
1080 * scrub from a context
1081 *
1082 * @bi: single binding information.
1083 * @rebind: Whether to issue a bind instead of scrub command.
1084 */
1085static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
1086{
1087 struct vmw_ctx_bindinfo_view *biv =
1088 container_of(bi, struct vmw_ctx_bindinfo_view, bi);
1089 struct vmw_ctx_binding_state *cbs =
1090 vmw_context_binding_state(bi->ctx);
1091
1092 __set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
1093 __set_bit(VMW_BINDING_PS_SR_BIT,
1094 &cbs->per_shader[biv->shader_slot].dirty);
1095 __set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
1096
1097 return 0;
1098}
1099
1100/**
1101 * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
1102 * scrub from a context
1103 *
1104 * @bi: single binding information.
1105 * @rebind: Whether to issue a bind instead of scrub command.
1106 */
1107static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
1108{
1109 struct vmw_ctx_binding_state *cbs =
1110 vmw_context_binding_state(bi->ctx);
1111
1112 __set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
1113
1114 return 0;
1115}
1116
1117/**
1118 * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding
1119 * scrub from a context
1120 *
1121 * @bi: single binding information.
1122 * @rebind: Whether to issue a bind instead of scrub command.
1123 */
1124static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
1125{
1126 struct vmw_ctx_binding_state *cbs =
1127 vmw_context_binding_state(bi->ctx);
1128
1129 __set_bit(VMW_BINDING_SO_BIT, &cbs->dirty);
1130
1131 return 0;
1132}
1133
1134/**
1135 * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
1136 * scrub from a context
1137 *
1138 * @bi: single binding information.
1139 * @rebind: Whether to issue a bind instead of scrub command.
1140 */
1141static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
1142{
1143 struct vmw_ctx_bindinfo_vb *bivb =
1144 container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
1145 struct vmw_ctx_binding_state *cbs =
1146 vmw_context_binding_state(bi->ctx);
1147
1148 __set_bit(bivb->slot, cbs->dirty_vb);
1149 __set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
1150
1151 return 0;
1152}
1153
1154/**
1155 * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
1156 *
1157 * @bi: single binding information.
1158 * @rebind: Whether to issue a bind instead of scrub command.
1159 */
1160static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
1161{
1162 struct vmw_ctx_bindinfo_ib *binding =
1163 container_of(bi, typeof(*binding), bi);
1164 struct vmw_private *dev_priv = bi->ctx->dev_priv;
1165 struct {
1166 SVGA3dCmdHeader header;
1167 SVGA3dCmdDXSetIndexBuffer body;
1168 } *cmd;
1169
1170 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
1171 if (unlikely(cmd == NULL)) {
1172 DRM_ERROR("Failed reserving FIFO space for DX index buffer "
1173 "binding.\n");
1174 return -ENOMEM;
1175 }
1176 cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
1177 cmd->header.size = sizeof(cmd->body);
1178 if (rebind) {
1179 cmd->body.sid = bi->res->id;
1180 cmd->body.format = binding->format;
1181 cmd->body.offset = binding->offset;
1182 } else {
1183 cmd->body.sid = SVGA3D_INVALID_ID;
1184 cmd->body.format = 0;
1185 cmd->body.offset = 0;
1186 }
1187
1188 vmw_fifo_commit(dev_priv, sizeof(*cmd));
1189
1190 return 0;
1191}
1192
1193/**
1194 * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
1195 * memory accounting.
1196 *
1197 * @dev_priv: Pointer to a device private structure.
1198 *
1199 * Returns a pointer to a newly allocated struct or an error pointer on error.
1200 */
1201struct vmw_ctx_binding_state *
1202vmw_binding_state_alloc(struct vmw_private *dev_priv)
1203{
1204 struct vmw_ctx_binding_state *cbs;
1205 int ret;
1206
1207 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
1208 false, false);
1209 if (ret)
1210 return ERR_PTR(ret);
1211
1212 cbs = vzalloc(sizeof(*cbs));
1213 if (!cbs) {
1214 ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
1215 return ERR_PTR(-ENOMEM);
1216 }
1217
1218 cbs->dev_priv = dev_priv;
1219 INIT_LIST_HEAD(&cbs->list);
1220
1221 return cbs;
1222}
1223
1224/**
1225 * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
1226 * memory accounting info.
1227 *
1228 * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
1229 */
1230void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
1231{
1232 struct vmw_private *dev_priv = cbs->dev_priv;
1233
1234 vfree(cbs);
1235 ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
1236}
1237
1238/**
1239 * vmw_binding_state_list - Get the binding list of a
1240 * struct vmw_ctx_binding_state
1241 *
1242 * @cbs: Pointer to the struct vmw_ctx_binding_state
1243 *
1244 * Returns the binding list which can be used to traverse through the bindings
1245 * and access the resource information of all bindings.
1246 */
1247struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
1248{
1249 return &cbs->list;
1250}
1251
1252/**
1253 * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
1254 *
1255 * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
1256 *
1257 * Drops all bindings registered in @cbs. No device binding actions are
1258 * performed.
1259 */
1260void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
1261{
1262 struct vmw_ctx_bindinfo *entry, *next;
1263
1264 list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
1265 vmw_binding_drop(entry);
1266}
1267
1268/*
1269 * This function is unused at run-time, and only used to hold various build
1270 * asserts important for code optimization assumptions.
1271 */
1272static void vmw_binding_build_asserts(void)
1273{
1274 BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
1275 BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX);
1276 BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
1277
1278 /*
1279 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
1280 * view id arrays.
1281 */
1282 BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
1283 BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
1284 BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
1285
1286 /*
1287 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
1288 * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
1289 */
1290 BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
1291 VMW_MAX_VIEW_BINDINGS*sizeof(u32));
1292 BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
1293 VMW_MAX_VIEW_BINDINGS*sizeof(u32));
1294}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
new file mode 100644
index 000000000000..bf2e77ad5a20
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -0,0 +1,209 @@
1/**************************************************************************
2 *
3 * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27#ifndef _VMWGFX_BINDING_H_
28#define _VMWGFX_BINDING_H_
29
30#include "device_include/svga3d_reg.h"
31#include <linux/list.h>
32
33#define VMW_MAX_VIEW_BINDINGS 128
34
35struct vmw_private;
36struct vmw_ctx_binding_state;
37
38/*
39 * enum vmw_ctx_binding_type - abstract resource to context binding types
40 */
41enum vmw_ctx_binding_type {
42 vmw_ctx_binding_shader,
43 vmw_ctx_binding_rt,
44 vmw_ctx_binding_tex,
45 vmw_ctx_binding_cb,
46 vmw_ctx_binding_dx_shader,
47 vmw_ctx_binding_dx_rt,
48 vmw_ctx_binding_sr,
49 vmw_ctx_binding_ds,
50 vmw_ctx_binding_so,
51 vmw_ctx_binding_vb,
52 vmw_ctx_binding_ib,
53 vmw_ctx_binding_max
54};
55
56/**
57 * struct vmw_ctx_bindinfo - single binding metadata
58 *
59 * @ctx_list: List head for the context's list of bindings.
60 * @res_list: List head for a resource's list of bindings.
61 * @ctx: Non-refcounted pointer to the context that owns the binding. NULL
62 * indicates no binding present.
63 * @res: Non-refcounted pointer to the resource the binding points to. This
64 * is typically a surface or a view.
65 * @bt: Binding type.
66 * @scrubbed: Whether the binding has been scrubbed from the context.
67 */
68struct vmw_ctx_bindinfo {
69 struct list_head ctx_list;
70 struct list_head res_list;
71 struct vmw_resource *ctx;
72 struct vmw_resource *res;
73 enum vmw_ctx_binding_type bt;
74 bool scrubbed;
75};
76
77/**
78 * struct vmw_ctx_bindinfo_tex - texture stage binding metadata
79 *
80 * @bi: struct vmw_ctx_bindinfo we derive from.
81 * @texture_stage: Device data used to reconstruct binding command.
82 */
83struct vmw_ctx_bindinfo_tex {
84 struct vmw_ctx_bindinfo bi;
85 uint32 texture_stage;
86};
87
88/**
89 * struct vmw_ctx_bindinfo_shader - Shader binding metadata
90 *
91 * @bi: struct vmw_ctx_bindinfo we derive from.
92 * @shader_slot: Device data used to reconstruct binding command.
93 */
94struct vmw_ctx_bindinfo_shader {
95 struct vmw_ctx_bindinfo bi;
96 SVGA3dShaderType shader_slot;
97};
98
99/**
100 * struct vmw_ctx_bindinfo_cb - Constant buffer binding metadata
101 *
102 * @bi: struct vmw_ctx_bindinfo we derive from.
103 * @shader_slot: Device data used to reconstruct binding command.
104 * @offset: Device data used to reconstruct binding command.
105 * @size: Device data used to reconstruct binding command.
106 * @slot: Device data used to reconstruct binding command.
107 */
108struct vmw_ctx_bindinfo_cb {
109 struct vmw_ctx_bindinfo bi;
110 SVGA3dShaderType shader_slot;
111 uint32 offset;
112 uint32 size;
113 uint32 slot;
114};
115
116/**
117 * struct vmw_ctx_bindinfo_view - View binding metadata
118 *
119 * @bi: struct vmw_ctx_bindinfo we derive from.
120 * @shader_slot: Device data used to reconstruct binding command.
121 * @slot: Device data used to reconstruct binding command.
122 */
123struct vmw_ctx_bindinfo_view {
124 struct vmw_ctx_bindinfo bi;
125 SVGA3dShaderType shader_slot;
126 uint32 slot;
127};
128
129/**
130 * struct vmw_ctx_bindinfo_so - StreamOutput binding metadata
131 *
132 * @bi: struct vmw_ctx_bindinfo we derive from.
133 * @offset: Device data used to reconstruct binding command.
134 * @size: Device data used to reconstruct binding command.
135 * @slot: Device data used to reconstruct binding command.
136 */
137struct vmw_ctx_bindinfo_so {
138 struct vmw_ctx_bindinfo bi;
139 uint32 offset;
140 uint32 size;
141 uint32 slot;
142};
143
144/**
145 * struct vmw_ctx_bindinfo_vb - Vertex buffer binding metadata
146 *
147 * @bi: struct vmw_ctx_bindinfo we derive from.
148 * @offset: Device data used to reconstruct binding command.
149 * @stride: Device data used to reconstruct binding command.
150 * @slot: Device data used to reconstruct binding command.
151 */
152struct vmw_ctx_bindinfo_vb {
153 struct vmw_ctx_bindinfo bi;
154 uint32 offset;
155 uint32 stride;
156 uint32 slot;
157};
158
159/**
160 * struct vmw_ctx_bindinfo_ib - StreamOutput binding metadata
161 *
162 * @bi: struct vmw_ctx_bindinfo we derive from.
163 * @offset: Device data used to reconstruct binding command.
164 * @format: Device data used to reconstruct binding command.
165 */
166struct vmw_ctx_bindinfo_ib {
167 struct vmw_ctx_bindinfo bi;
168 uint32 offset;
169 uint32 format;
170};
171
172/**
173 * struct vmw_dx_shader_bindings - per shader type context binding state
174 *
175 * @shader: The shader binding for this shader type
176 * @const_buffer: Const buffer bindings for this shader type.
177 * @shader_res: Shader resource view bindings for this shader type.
178 * @dirty_sr: Bitmap tracking individual shader resource bindings changes
179 * that have not yet been emitted to the device.
180 * @dirty: Bitmap tracking per-binding type binding changes that have not
181 * yet been emitted to the device.
182 */
183struct vmw_dx_shader_bindings {
184 struct vmw_ctx_bindinfo_shader shader;
185 struct vmw_ctx_bindinfo_cb const_buffers[SVGA3D_DX_MAX_CONSTBUFFERS];
186 struct vmw_ctx_bindinfo_view shader_res[SVGA3D_DX_MAX_SRVIEWS];
187 DECLARE_BITMAP(dirty_sr, SVGA3D_DX_MAX_SRVIEWS);
188 unsigned long dirty;
189};
190
191extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
192 const struct vmw_ctx_bindinfo *ci,
193 u32 shader_slot, u32 slot);
194extern void
195vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
196 struct vmw_ctx_binding_state *from);
197extern void vmw_binding_res_list_kill(struct list_head *head);
198extern void vmw_binding_res_list_scrub(struct list_head *head);
199extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs);
200extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs);
201extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
202extern struct vmw_ctx_binding_state *
203vmw_binding_state_alloc(struct vmw_private *dev_priv);
204extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs);
205extern struct list_head *
206vmw_binding_state_list(struct vmw_ctx_binding_state *cbs);
207extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
208
209#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index cff2bf9db9d2..3329f623c8bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -72,6 +72,12 @@ static struct ttm_place mob_placement_flags = {
72 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED 72 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
73}; 73};
74 74
75static struct ttm_place mob_ne_placement_flags = {
76 .fpfn = 0,
77 .lpfn = 0,
78 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
79};
80
75struct ttm_placement vmw_vram_placement = { 81struct ttm_placement vmw_vram_placement = {
76 .num_placement = 1, 82 .num_placement = 1,
77 .placement = &vram_placement_flags, 83 .placement = &vram_placement_flags,
@@ -200,6 +206,13 @@ struct ttm_placement vmw_mob_placement = {
200 .busy_placement = &mob_placement_flags 206 .busy_placement = &mob_placement_flags
201}; 207};
202 208
209struct ttm_placement vmw_mob_ne_placement = {
210 .num_placement = 1,
211 .num_busy_placement = 1,
212 .placement = &mob_ne_placement_flags,
213 .busy_placement = &mob_ne_placement_flags
214};
215
203struct vmw_ttm_tt { 216struct vmw_ttm_tt {
204 struct ttm_dma_tt dma_ttm; 217 struct ttm_dma_tt dma_ttm;
205 struct vmw_private *dev_priv; 218 struct vmw_private *dev_priv;
@@ -804,9 +817,9 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
804/** 817/**
805 * vmw_move_notify - TTM move_notify_callback 818 * vmw_move_notify - TTM move_notify_callback
806 * 819 *
807 * @bo: The TTM buffer object about to move. 820 * @bo: The TTM buffer object about to move.
808 * @mem: The truct ttm_mem_reg indicating to what memory 821 * @mem: The struct ttm_mem_reg indicating to what memory
809 * region the move is taking place. 822 * region the move is taking place.
810 * 823 *
811 * Calls move_notify for all subsystems needing it. 824 * Calls move_notify for all subsystems needing it.
812 * (currently only resources). 825 * (currently only resources).
@@ -815,13 +828,14 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
815 struct ttm_mem_reg *mem) 828 struct ttm_mem_reg *mem)
816{ 829{
817 vmw_resource_move_notify(bo, mem); 830 vmw_resource_move_notify(bo, mem);
831 vmw_query_move_notify(bo, mem);
818} 832}
819 833
820 834
821/** 835/**
822 * vmw_swap_notify - TTM move_notify_callback 836 * vmw_swap_notify - TTM move_notify_callback
823 * 837 *
824 * @bo: The TTM buffer object about to be swapped out. 838 * @bo: The TTM buffer object about to be swapped out.
825 */ 839 */
826static void vmw_swap_notify(struct ttm_buffer_object *bo) 840static void vmw_swap_notify(struct ttm_buffer_object *bo)
827{ 841{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
new file mode 100644
index 000000000000..5ae8f921da2a
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -0,0 +1,1303 @@
1/**************************************************************************
2 *
3 * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "ttm/ttm_bo_api.h"
30
31/*
32 * Size of inline command buffers. Try to make sure that a page size is a
33 * multiple of the DMA pool allocation size.
34 */
35#define VMW_CMDBUF_INLINE_ALIGN 64
36#define VMW_CMDBUF_INLINE_SIZE \
37 (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
38
39/**
40 * struct vmw_cmdbuf_context - Command buffer context queues
41 *
42 * @submitted: List of command buffers that have been submitted to the
43 * manager but not yet submitted to hardware.
44 * @hw_submitted: List of command buffers submitted to hardware.
45 * @preempted: List of preempted command buffers.
46 * @num_hw_submitted: Number of buffers currently being processed by hardware
47 */
48struct vmw_cmdbuf_context {
49 struct list_head submitted;
50 struct list_head hw_submitted;
51 struct list_head preempted;
52 unsigned num_hw_submitted;
53};
54
55/**
56 * struct vmw_cmdbuf_man: - Command buffer manager
57 *
58 * @cur_mutex: Mutex protecting the command buffer used for incremental small
59 * kernel command submissions, @cur.
60 * @space_mutex: Mutex to protect against starvation when we allocate
61 * main pool buffer space.
62 * @work: A struct work_struct implementeing command buffer error handling.
63 * Immutable.
64 * @dev_priv: Pointer to the device private struct. Immutable.
65 * @ctx: Array of command buffer context queues. The queues and the context
66 * data is protected by @lock.
67 * @error: List of command buffers that have caused device errors.
68 * Protected by @lock.
69 * @mm: Range manager for the command buffer space. Manager allocations and
70 * frees are protected by @lock.
71 * @cmd_space: Buffer object for the command buffer space, unless we were
72 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
73 * @map_obj: Mapping state for @cmd_space. Immutable.
74 * @map: Pointer to command buffer space. May be a mapped buffer object or
75 * a contigous coherent DMA memory allocation. Immutable.
76 * @cur: Command buffer for small kernel command submissions. Protected by
77 * the @cur_mutex.
78 * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
79 * @default_size: Default size for the @cur command buffer. Immutable.
80 * @max_hw_submitted: Max number of in-flight command buffers the device can
81 * handle. Immutable.
82 * @lock: Spinlock protecting command submission queues.
83 * @header: Pool of DMA memory for device command buffer headers.
84 * Internal protection.
85 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
86 * space for inline data. Internal protection.
87 * @tasklet: Tasklet struct for irq processing. Immutable.
88 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
89 * space.
90 * @idle_queue: Wait queue for processes waiting for command buffer idle.
91 * @irq_on: Whether the process function has requested irq to be turned on.
92 * Protected by @lock.
93 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
94 * allocation. Immutable.
95 * @has_pool: Has a large pool of DMA memory which allows larger allocations.
96 * Typically this is false only during bootstrap.
97 * @handle: DMA address handle for the command buffer space if @using_mob is
98 * false. Immutable.
99 * @size: The size of the command buffer space. Immutable.
100 */
101struct vmw_cmdbuf_man {
102 struct mutex cur_mutex;
103 struct mutex space_mutex;
104 struct work_struct work;
105 struct vmw_private *dev_priv;
106 struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
107 struct list_head error;
108 struct drm_mm mm;
109 struct ttm_buffer_object *cmd_space;
110 struct ttm_bo_kmap_obj map_obj;
111 u8 *map;
112 struct vmw_cmdbuf_header *cur;
113 size_t cur_pos;
114 size_t default_size;
115 unsigned max_hw_submitted;
116 spinlock_t lock;
117 struct dma_pool *headers;
118 struct dma_pool *dheaders;
119 struct tasklet_struct tasklet;
120 wait_queue_head_t alloc_queue;
121 wait_queue_head_t idle_queue;
122 bool irq_on;
123 bool using_mob;
124 bool has_pool;
125 dma_addr_t handle;
126 size_t size;
127};
128
129/**
130 * struct vmw_cmdbuf_header - Command buffer metadata
131 *
132 * @man: The command buffer manager.
133 * @cb_header: Device command buffer header, allocated from a DMA pool.
134 * @cb_context: The device command buffer context.
135 * @list: List head for attaching to the manager lists.
136 * @node: The range manager node.
137 * @handle. The DMA address of @cb_header. Handed to the device on command
138 * buffer submission.
139 * @cmd: Pointer to the command buffer space of this buffer.
140 * @size: Size of the command buffer space of this buffer.
141 * @reserved: Reserved space of this buffer.
142 * @inline_space: Whether inline command buffer space is used.
143 */
144struct vmw_cmdbuf_header {
145 struct vmw_cmdbuf_man *man;
146 SVGACBHeader *cb_header;
147 SVGACBContext cb_context;
148 struct list_head list;
149 struct drm_mm_node node;
150 dma_addr_t handle;
151 u8 *cmd;
152 size_t size;
153 size_t reserved;
154 bool inline_space;
155};
156
157/**
158 * struct vmw_cmdbuf_dheader - Device command buffer header with inline
159 * command buffer space.
160 *
161 * @cb_header: Device command buffer header.
162 * @cmd: Inline command buffer space.
163 */
164struct vmw_cmdbuf_dheader {
165 SVGACBHeader cb_header;
166 u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
167};
168
169/**
170 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
171 *
172 * @page_size: Size of requested command buffer space in pages.
173 * @node: Pointer to the range manager node.
174 * @done: True if this allocation has succeeded.
175 */
176struct vmw_cmdbuf_alloc_info {
177 size_t page_size;
178 struct drm_mm_node *node;
179 bool done;
180};
181
182/* Loop over each context in the command buffer manager. */
183#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
184 for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
185 ++(_i), ++(_ctx))
186
187static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
188
189
190/**
191 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
192 *
193 * @man: The range manager.
194 * @interruptible: Whether to wait interruptible when locking.
195 */
196static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
197{
198 if (interruptible) {
199 if (mutex_lock_interruptible(&man->cur_mutex))
200 return -ERESTARTSYS;
201 } else {
202 mutex_lock(&man->cur_mutex);
203 }
204
205 return 0;
206}
207
208/**
209 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
210 *
211 * @man: The range manager.
212 */
213static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
214{
215 mutex_unlock(&man->cur_mutex);
216}
217
218/**
219 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
220 * been used for the device context with inline command buffers.
221 * Need not be called locked.
222 *
223 * @header: Pointer to the header to free.
224 */
225static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
226{
227 struct vmw_cmdbuf_dheader *dheader;
228
229 if (WARN_ON_ONCE(!header->inline_space))
230 return;
231
232 dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
233 cb_header);
234 dma_pool_free(header->man->dheaders, dheader, header->handle);
235 kfree(header);
236}
237
238/**
239 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
240 * associated structures.
241 *
242 * header: Pointer to the header to free.
243 *
244 * For internal use. Must be called with man::lock held.
245 */
246static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
247{
248 struct vmw_cmdbuf_man *man = header->man;
249
250 BUG_ON(!spin_is_locked(&man->lock));
251
252 if (header->inline_space) {
253 vmw_cmdbuf_header_inline_free(header);
254 return;
255 }
256
257 drm_mm_remove_node(&header->node);
258 wake_up_all(&man->alloc_queue);
259 if (header->cb_header)
260 dma_pool_free(man->headers, header->cb_header,
261 header->handle);
262 kfree(header);
263}
264
265/**
266 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
267 * associated structures.
268 *
269 * @header: Pointer to the header to free.
270 */
271void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
272{
273 struct vmw_cmdbuf_man *man = header->man;
274
275 /* Avoid locking if inline_space */
276 if (header->inline_space) {
277 vmw_cmdbuf_header_inline_free(header);
278 return;
279 }
280 spin_lock_bh(&man->lock);
281 __vmw_cmdbuf_header_free(header);
282 spin_unlock_bh(&man->lock);
283}
284
285
286/**
287 * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
288 *
289 * @header: The header of the buffer to submit.
290 */
291static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
292{
293 struct vmw_cmdbuf_man *man = header->man;
294 u32 val;
295
296 if (sizeof(header->handle) > 4)
297 val = (header->handle >> 32);
298 else
299 val = 0;
300 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
301
302 val = (header->handle & 0xFFFFFFFFULL);
303 val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
304 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
305
306 return header->cb_header->status;
307}
308
309/**
310 * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
311 *
312 * @ctx: The command buffer context to initialize
313 */
314static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
315{
316 INIT_LIST_HEAD(&ctx->hw_submitted);
317 INIT_LIST_HEAD(&ctx->submitted);
318 INIT_LIST_HEAD(&ctx->preempted);
319 ctx->num_hw_submitted = 0;
320}
321
322/**
323 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
324 * context.
325 *
326 * @man: The command buffer manager.
327 * @ctx: The command buffer context.
328 *
329 * Submits command buffers to hardware until there are no more command
330 * buffers to submit or the hardware can't handle more command buffers.
331 */
332static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
333 struct vmw_cmdbuf_context *ctx)
334{
335 while (ctx->num_hw_submitted < man->max_hw_submitted &&
336 !list_empty(&ctx->submitted)) {
337 struct vmw_cmdbuf_header *entry;
338 SVGACBStatus status;
339
340 entry = list_first_entry(&ctx->submitted,
341 struct vmw_cmdbuf_header,
342 list);
343
344 status = vmw_cmdbuf_header_submit(entry);
345
346 /* This should never happen */
347 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
348 entry->cb_header->status = SVGA_CB_STATUS_NONE;
349 break;
350 }
351
352 list_del(&entry->list);
353 list_add_tail(&entry->list, &ctx->hw_submitted);
354 ctx->num_hw_submitted++;
355 }
356
357}
358
359/**
360 * vmw_cmdbuf_ctx_submit: Process a command buffer context.
361 *
362 * @man: The command buffer manager.
363 * @ctx: The command buffer context.
364 *
365 * Submit command buffers to hardware if possible, and process finished
366 * buffers. Typically freeing them, but on preemption or error take
367 * appropriate action. Wake up waiters if appropriate.
368 */
369static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
370 struct vmw_cmdbuf_context *ctx,
371 int *notempty)
372{
373 struct vmw_cmdbuf_header *entry, *next;
374
375 vmw_cmdbuf_ctx_submit(man, ctx);
376
377 list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
378 SVGACBStatus status = entry->cb_header->status;
379
380 if (status == SVGA_CB_STATUS_NONE)
381 break;
382
383 list_del(&entry->list);
384 wake_up_all(&man->idle_queue);
385 ctx->num_hw_submitted--;
386 switch (status) {
387 case SVGA_CB_STATUS_COMPLETED:
388 __vmw_cmdbuf_header_free(entry);
389 break;
390 case SVGA_CB_STATUS_COMMAND_ERROR:
391 case SVGA_CB_STATUS_CB_HEADER_ERROR:
392 list_add_tail(&entry->list, &man->error);
393 schedule_work(&man->work);
394 break;
395 case SVGA_CB_STATUS_PREEMPTED:
396 list_add(&entry->list, &ctx->preempted);
397 break;
398 default:
399 WARN_ONCE(true, "Undefined command buffer status.\n");
400 __vmw_cmdbuf_header_free(entry);
401 break;
402 }
403 }
404
405 vmw_cmdbuf_ctx_submit(man, ctx);
406 if (!list_empty(&ctx->submitted))
407 (*notempty)++;
408}
409
410/**
411 * vmw_cmdbuf_man_process - Process all command buffer contexts and
412 * switch on and off irqs as appropriate.
413 *
414 * @man: The command buffer manager.
415 *
416 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
417 * command buffers left that are not submitted to hardware, Make sure
418 * IRQ handling is turned on. Otherwise, make sure it's turned off. This
419 * function may return -EAGAIN to indicate it should be rerun due to
420 * possibly missed IRQs if IRQs has just been turned on.
421 */
422static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
423{
424 int notempty = 0;
425 struct vmw_cmdbuf_context *ctx;
426 int i;
427
428 for_each_cmdbuf_ctx(man, i, ctx)
429 vmw_cmdbuf_ctx_process(man, ctx, &notempty);
430
431 if (man->irq_on && !notempty) {
432 vmw_generic_waiter_remove(man->dev_priv,
433 SVGA_IRQFLAG_COMMAND_BUFFER,
434 &man->dev_priv->cmdbuf_waiters);
435 man->irq_on = false;
436 } else if (!man->irq_on && notempty) {
437 vmw_generic_waiter_add(man->dev_priv,
438 SVGA_IRQFLAG_COMMAND_BUFFER,
439 &man->dev_priv->cmdbuf_waiters);
440 man->irq_on = true;
441
442 /* Rerun in case we just missed an irq. */
443 return -EAGAIN;
444 }
445
446 return 0;
447}
448
449/**
450 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
451 * command buffer context
452 *
453 * @man: The command buffer manager.
454 * @header: The header of the buffer to submit.
455 * @cb_context: The command buffer context to use.
456 *
457 * This function adds @header to the "submitted" queue of the command
458 * buffer context identified by @cb_context. It then calls the command buffer
459 * manager processing to potentially submit the buffer to hardware.
460 * @man->lock needs to be held when calling this function.
461 */
462static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
463 struct vmw_cmdbuf_header *header,
464 SVGACBContext cb_context)
465{
466 if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
467 header->cb_header->dxContext = 0;
468 header->cb_context = cb_context;
469 list_add_tail(&header->list, &man->ctx[cb_context].submitted);
470
471 if (vmw_cmdbuf_man_process(man) == -EAGAIN)
472 vmw_cmdbuf_man_process(man);
473}
474
475/**
476 * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
477 * handler implemented as a tasklet.
478 *
479 * @data: Tasklet closure. A pointer to the command buffer manager cast to
480 * an unsigned long.
481 *
482 * The bottom half (tasklet) of the interrupt handler simply calls into the
483 * command buffer processor to free finished buffers and submit any
484 * queued buffers to hardware.
485 */
486static void vmw_cmdbuf_man_tasklet(unsigned long data)
487{
488 struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
489
490 spin_lock(&man->lock);
491 if (vmw_cmdbuf_man_process(man) == -EAGAIN)
492 (void) vmw_cmdbuf_man_process(man);
493 spin_unlock(&man->lock);
494}
495
496/**
497 * vmw_cmdbuf_work_func - The deferred work function that handles
498 * command buffer errors.
499 *
500 * @work: The work func closure argument.
501 *
502 * Restarting the command buffer context after an error requires process
503 * context, so it is deferred to this work function.
504 */
505static void vmw_cmdbuf_work_func(struct work_struct *work)
506{
507 struct vmw_cmdbuf_man *man =
508 container_of(work, struct vmw_cmdbuf_man, work);
509 struct vmw_cmdbuf_header *entry, *next;
510 bool restart = false;
511
512 spin_lock_bh(&man->lock);
513 list_for_each_entry_safe(entry, next, &man->error, list) {
514 restart = true;
515 DRM_ERROR("Command buffer error.\n");
516
517 list_del(&entry->list);
518 __vmw_cmdbuf_header_free(entry);
519 wake_up_all(&man->idle_queue);
520 }
521 spin_unlock_bh(&man->lock);
522
523 if (restart && vmw_cmdbuf_startstop(man, true))
524 DRM_ERROR("Failed restarting command buffer context 0.\n");
525
526}
527
528/**
529 * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
530 *
531 * @man: The command buffer manager.
532 * @check_preempted: Check also the preempted queue for pending command buffers.
533 *
534 */
535static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
536 bool check_preempted)
537{
538 struct vmw_cmdbuf_context *ctx;
539 bool idle = false;
540 int i;
541
542 spin_lock_bh(&man->lock);
543 vmw_cmdbuf_man_process(man);
544 for_each_cmdbuf_ctx(man, i, ctx) {
545 if (!list_empty(&ctx->submitted) ||
546 !list_empty(&ctx->hw_submitted) ||
547 (check_preempted && !list_empty(&ctx->preempted)))
548 goto out_unlock;
549 }
550
551 idle = list_empty(&man->error);
552
553out_unlock:
554 spin_unlock_bh(&man->lock);
555
556 return idle;
557}
558
559/**
560 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
561 * command submissions
562 *
563 * @man: The command buffer manager.
564 *
565 * Flushes the current command buffer without allocating a new one. A new one
566 * is automatically allocated when needed. Call with @man->cur_mutex held.
567 */
568static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
569{
570 struct vmw_cmdbuf_header *cur = man->cur;
571
572 WARN_ON(!mutex_is_locked(&man->cur_mutex));
573
574 if (!cur)
575 return;
576
577 spin_lock_bh(&man->lock);
578 if (man->cur_pos == 0) {
579 __vmw_cmdbuf_header_free(cur);
580 goto out_unlock;
581 }
582
583 man->cur->cb_header->length = man->cur_pos;
584 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
585out_unlock:
586 spin_unlock_bh(&man->lock);
587 man->cur = NULL;
588 man->cur_pos = 0;
589}
590
591/**
592 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
593 * command submissions
594 *
595 * @man: The command buffer manager.
596 * @interruptible: Whether to sleep interruptible when sleeping.
597 *
598 * Flushes the current command buffer without allocating a new one. A new one
599 * is automatically allocated when needed.
600 */
601int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
602 bool interruptible)
603{
604 int ret = vmw_cmdbuf_cur_lock(man, interruptible);
605
606 if (ret)
607 return ret;
608
609 __vmw_cmdbuf_cur_flush(man);
610 vmw_cmdbuf_cur_unlock(man);
611
612 return 0;
613}
614
615/**
616 * vmw_cmdbuf_idle - Wait for command buffer manager idle.
617 *
618 * @man: The command buffer manager.
619 * @interruptible: Sleep interruptible while waiting.
620 * @timeout: Time out after this many ticks.
621 *
622 * Wait until the command buffer manager has processed all command buffers,
623 * or until a timeout occurs. If a timeout occurs, the function will return
624 * -EBUSY.
625 */
626int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
627 unsigned long timeout)
628{
629 int ret;
630
631 ret = vmw_cmdbuf_cur_flush(man, interruptible);
632 vmw_generic_waiter_add(man->dev_priv,
633 SVGA_IRQFLAG_COMMAND_BUFFER,
634 &man->dev_priv->cmdbuf_waiters);
635
636 if (interruptible) {
637 ret = wait_event_interruptible_timeout
638 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
639 timeout);
640 } else {
641 ret = wait_event_timeout
642 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
643 timeout);
644 }
645 vmw_generic_waiter_remove(man->dev_priv,
646 SVGA_IRQFLAG_COMMAND_BUFFER,
647 &man->dev_priv->cmdbuf_waiters);
648 if (ret == 0) {
649 if (!vmw_cmdbuf_man_idle(man, true))
650 ret = -EBUSY;
651 else
652 ret = 0;
653 }
654 if (ret > 0)
655 ret = 0;
656
657 return ret;
658}
659
660/**
661 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
662 *
663 * @man: The command buffer manager.
664 * @info: Allocation info. Will hold the size on entry and allocated mm node
665 * on successful return.
666 *
667 * Try to allocate buffer space from the main pool. Returns true if succeeded.
668 * If a fatal error was hit, the error code is returned in @info->ret.
669 */
670static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
671 struct vmw_cmdbuf_alloc_info *info)
672{
673 int ret;
674
675 if (info->done)
676 return true;
677
678 memset(info->node, 0, sizeof(*info->node));
679 spin_lock_bh(&man->lock);
680 ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
681 0, 0,
682 DRM_MM_SEARCH_DEFAULT,
683 DRM_MM_CREATE_DEFAULT);
684 spin_unlock_bh(&man->lock);
685 info->done = !ret;
686
687 return info->done;
688}
689
690/**
691 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
692 *
693 * @man: The command buffer manager.
694 * @node: Pointer to pre-allocated range-manager node.
695 * @size: The size of the allocation.
696 * @interruptible: Whether to sleep interruptible while waiting for space.
697 *
698 * This function allocates buffer space from the main pool, and if there is
699 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
700 * become available.
701 */
702static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
703 struct drm_mm_node *node,
704 size_t size,
705 bool interruptible)
706{
707 struct vmw_cmdbuf_alloc_info info;
708
709 info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
710 info.node = node;
711 info.done = false;
712
713 /*
714 * To prevent starvation of large requests, only one allocating call
715 * at a time waiting for space.
716 */
717 if (interruptible) {
718 if (mutex_lock_interruptible(&man->space_mutex))
719 return -ERESTARTSYS;
720 } else {
721 mutex_lock(&man->space_mutex);
722 }
723
724 /* Try to allocate space without waiting. */
725 if (vmw_cmdbuf_try_alloc(man, &info))
726 goto out_unlock;
727
728 vmw_generic_waiter_add(man->dev_priv,
729 SVGA_IRQFLAG_COMMAND_BUFFER,
730 &man->dev_priv->cmdbuf_waiters);
731
732 if (interruptible) {
733 int ret;
734
735 ret = wait_event_interruptible
736 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
737 if (ret) {
738 vmw_generic_waiter_remove
739 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
740 &man->dev_priv->cmdbuf_waiters);
741 mutex_unlock(&man->space_mutex);
742 return ret;
743 }
744 } else {
745 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
746 }
747 vmw_generic_waiter_remove(man->dev_priv,
748 SVGA_IRQFLAG_COMMAND_BUFFER,
749 &man->dev_priv->cmdbuf_waiters);
750
751out_unlock:
752 mutex_unlock(&man->space_mutex);
753
754 return 0;
755}
756
757/**
758 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
759 * space from the main pool.
760 *
761 * @man: The command buffer manager.
762 * @header: Pointer to the header to set up.
763 * @size: The requested size of the buffer space.
764 * @interruptible: Whether to sleep interruptible while waiting for space.
765 */
766static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
767 struct vmw_cmdbuf_header *header,
768 size_t size,
769 bool interruptible)
770{
771 SVGACBHeader *cb_hdr;
772 size_t offset;
773 int ret;
774
775 if (!man->has_pool)
776 return -ENOMEM;
777
778 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
779
780 if (ret)
781 return ret;
782
783 header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
784 &header->handle);
785 if (!header->cb_header) {
786 ret = -ENOMEM;
787 goto out_no_cb_header;
788 }
789
790 header->size = header->node.size << PAGE_SHIFT;
791 cb_hdr = header->cb_header;
792 offset = header->node.start << PAGE_SHIFT;
793 header->cmd = man->map + offset;
794 memset(cb_hdr, 0, sizeof(*cb_hdr));
795 if (man->using_mob) {
796 cb_hdr->flags = SVGA_CB_FLAG_MOB;
797 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
798 cb_hdr->ptr.mob.mobOffset = offset;
799 } else {
800 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
801 }
802
803 return 0;
804
805out_no_cb_header:
806 spin_lock_bh(&man->lock);
807 drm_mm_remove_node(&header->node);
808 spin_unlock_bh(&man->lock);
809
810 return ret;
811}
812
813/**
814 * vmw_cmdbuf_space_inline - Set up a command buffer header with
815 * inline command buffer space.
816 *
817 * @man: The command buffer manager.
818 * @header: Pointer to the header to set up.
819 * @size: The requested size of the buffer space.
820 */
821static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
822 struct vmw_cmdbuf_header *header,
823 int size)
824{
825 struct vmw_cmdbuf_dheader *dheader;
826 SVGACBHeader *cb_hdr;
827
828 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
829 return -ENOMEM;
830
831 dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
832 &header->handle);
833 if (!dheader)
834 return -ENOMEM;
835
836 header->inline_space = true;
837 header->size = VMW_CMDBUF_INLINE_SIZE;
838 cb_hdr = &dheader->cb_header;
839 header->cb_header = cb_hdr;
840 header->cmd = dheader->cmd;
841 memset(dheader, 0, sizeof(*dheader));
842 cb_hdr->status = SVGA_CB_STATUS_NONE;
843 cb_hdr->flags = SVGA_CB_FLAG_NONE;
844 cb_hdr->ptr.pa = (u64)header->handle +
845 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
846
847 return 0;
848}
849
850/**
851 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
852 * command buffer space.
853 *
854 * @man: The command buffer manager.
855 * @size: The requested size of the buffer space.
856 * @interruptible: Whether to sleep interruptible while waiting for space.
857 * @p_header: points to a header pointer to populate on successful return.
858 *
859 * Returns a pointer to command buffer space if successful. Otherwise
860 * returns an error pointer. The header pointer returned in @p_header should
861 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
862 */
863void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
864 size_t size, bool interruptible,
865 struct vmw_cmdbuf_header **p_header)
866{
867 struct vmw_cmdbuf_header *header;
868 int ret = 0;
869
870 *p_header = NULL;
871
872 header = kzalloc(sizeof(*header), GFP_KERNEL);
873 if (!header)
874 return ERR_PTR(-ENOMEM);
875
876 if (size <= VMW_CMDBUF_INLINE_SIZE)
877 ret = vmw_cmdbuf_space_inline(man, header, size);
878 else
879 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
880
881 if (ret) {
882 kfree(header);
883 return ERR_PTR(ret);
884 }
885
886 header->man = man;
887 INIT_LIST_HEAD(&header->list);
888 header->cb_header->status = SVGA_CB_STATUS_NONE;
889 *p_header = header;
890
891 return header->cmd;
892}
893
894/**
895 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
896 * command buffer.
897 *
898 * @man: The command buffer manager.
899 * @size: The requested size of the commands.
900 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
901 * @interruptible: Whether to sleep interruptible while waiting for space.
902 *
903 * Returns a pointer to command buffer space if successful. Otherwise
904 * returns an error pointer.
905 */
906static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
907 size_t size,
908 int ctx_id,
909 bool interruptible)
910{
911 struct vmw_cmdbuf_header *cur;
912 void *ret;
913
914 if (vmw_cmdbuf_cur_lock(man, interruptible))
915 return ERR_PTR(-ERESTARTSYS);
916
917 cur = man->cur;
918 if (cur && (size + man->cur_pos > cur->size ||
919 ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
920 ctx_id != cur->cb_header->dxContext)))
921 __vmw_cmdbuf_cur_flush(man);
922
923 if (!man->cur) {
924 ret = vmw_cmdbuf_alloc(man,
925 max_t(size_t, size, man->default_size),
926 interruptible, &man->cur);
927 if (IS_ERR(ret)) {
928 vmw_cmdbuf_cur_unlock(man);
929 return ret;
930 }
931
932 cur = man->cur;
933 }
934
935 if (ctx_id != SVGA3D_INVALID_ID) {
936 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
937 cur->cb_header->dxContext = ctx_id;
938 }
939
940 cur->reserved = size;
941
942 return (void *) (man->cur->cmd + man->cur_pos);
943}
944
945/**
946 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
947 *
948 * @man: The command buffer manager.
949 * @size: The size of the commands actually written.
950 * @flush: Whether to flush the command buffer immediately.
951 */
952static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
953 size_t size, bool flush)
954{
955 struct vmw_cmdbuf_header *cur = man->cur;
956
957 WARN_ON(!mutex_is_locked(&man->cur_mutex));
958
959 WARN_ON(size > cur->reserved);
960 man->cur_pos += size;
961 if (!size)
962 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
963 if (flush)
964 __vmw_cmdbuf_cur_flush(man);
965 vmw_cmdbuf_cur_unlock(man);
966}
967
968/**
969 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
970 *
971 * @man: The command buffer manager.
972 * @size: The requested size of the commands.
973 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
974 * @interruptible: Whether to sleep interruptible while waiting for space.
975 * @header: Header of the command buffer. NULL if the current command buffer
976 * should be used.
977 *
978 * Returns a pointer to command buffer space if successful. Otherwise
979 * returns an error pointer.
980 */
981void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
982 int ctx_id, bool interruptible,
983 struct vmw_cmdbuf_header *header)
984{
985 if (!header)
986 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
987
988 if (size > header->size)
989 return ERR_PTR(-EINVAL);
990
991 if (ctx_id != SVGA3D_INVALID_ID) {
992 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
993 header->cb_header->dxContext = ctx_id;
994 }
995
996 header->reserved = size;
997 return header->cmd;
998}
999
1000/**
1001 * vmw_cmdbuf_commit - Commit commands in a command buffer.
1002 *
1003 * @man: The command buffer manager.
1004 * @size: The size of the commands actually written.
1005 * @header: Header of the command buffer. NULL if the current command buffer
1006 * should be used.
1007 * @flush: Whether to flush the command buffer immediately.
1008 */
1009void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1010 struct vmw_cmdbuf_header *header, bool flush)
1011{
1012 if (!header) {
1013 vmw_cmdbuf_commit_cur(man, size, flush);
1014 return;
1015 }
1016
1017 (void) vmw_cmdbuf_cur_lock(man, false);
1018 __vmw_cmdbuf_cur_flush(man);
1019 WARN_ON(size > header->reserved);
1020 man->cur = header;
1021 man->cur_pos = size;
1022 if (!size)
1023 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1024 if (flush)
1025 __vmw_cmdbuf_cur_flush(man);
1026 vmw_cmdbuf_cur_unlock(man);
1027}
1028
1029/**
1030 * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
1031 *
1032 * @man: The command buffer manager.
1033 */
1034void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
1035{
1036 if (!man)
1037 return;
1038
1039 tasklet_schedule(&man->tasklet);
1040}
1041
1042/**
1043 * vmw_cmdbuf_send_device_command - Send a command through the device context.
1044 *
1045 * @man: The command buffer manager.
1046 * @command: Pointer to the command to send.
1047 * @size: Size of the command.
1048 *
1049 * Synchronously sends a device context command.
1050 */
1051static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1052 const void *command,
1053 size_t size)
1054{
1055 struct vmw_cmdbuf_header *header;
1056 int status;
1057 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1058
1059 if (IS_ERR(cmd))
1060 return PTR_ERR(cmd);
1061
1062 memcpy(cmd, command, size);
1063 header->cb_header->length = size;
1064 header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1065 spin_lock_bh(&man->lock);
1066 status = vmw_cmdbuf_header_submit(header);
1067 spin_unlock_bh(&man->lock);
1068 vmw_cmdbuf_header_free(header);
1069
1070 if (status != SVGA_CB_STATUS_COMPLETED) {
1071 DRM_ERROR("Device context command failed with status %d\n",
1072 status);
1073 return -EINVAL;
1074 }
1075
1076 return 0;
1077}
1078
1079/**
1080 * vmw_cmdbuf_startstop - Send a start / stop command through the device
1081 * context.
1082 *
1083 * @man: The command buffer manager.
1084 * @enable: Whether to enable or disable the context.
1085 *
1086 * Synchronously sends a device start / stop context command.
1087 */
1088static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
1089 bool enable)
1090{
1091 struct {
1092 uint32 id;
1093 SVGADCCmdStartStop body;
1094 } __packed cmd;
1095
1096 cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1097 cmd.body.enable = (enable) ? 1 : 0;
1098 cmd.body.context = SVGA_CB_CONTEXT_0;
1099
1100 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1101}
1102
1103/**
1104 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1105 *
1106 * @man: The command buffer manager.
1107 * @size: The size of the main space pool.
1108 * @default_size: The default size of the command buffer for small kernel
1109 * submissions.
1110 *
1111 * Set the size and allocate the main command buffer space pool,
1112 * as well as the default size of the command buffer for
1113 * small kernel submissions. If successful, this enables large command
1114 * submissions. Note that this function requires that rudimentary command
1115 * submission is already available and that the MOB memory manager is alive.
1116 * Returns 0 on success. Negative error code on failure.
1117 */
1118int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1119 size_t size, size_t default_size)
1120{
1121 struct vmw_private *dev_priv = man->dev_priv;
1122 bool dummy;
1123 int ret;
1124
1125 if (man->has_pool)
1126 return -EINVAL;
1127
1128 /* First, try to allocate a huge chunk of DMA memory */
1129 size = PAGE_ALIGN(size);
1130 man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1131 &man->handle, GFP_KERNEL);
1132 if (man->map) {
1133 man->using_mob = false;
1134 } else {
1135 /*
1136 * DMA memory failed. If we can have command buffers in a
1137 * MOB, try to use that instead. Note that this will
1138 * actually call into the already enabled manager, when
1139 * binding the MOB.
1140 */
1141 if (!(dev_priv->capabilities & SVGA_CAP_DX))
1142 return -ENOMEM;
1143
1144 ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1145 &vmw_mob_ne_placement, 0, false, NULL,
1146 &man->cmd_space);
1147 if (ret)
1148 return ret;
1149
1150 man->using_mob = true;
1151 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1152 &man->map_obj);
1153 if (ret)
1154 goto out_no_map;
1155
1156 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1157 }
1158
1159 man->size = size;
1160 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1161
1162 man->has_pool = true;
1163 man->default_size = default_size;
1164 DRM_INFO("Using command buffers with %s pool.\n",
1165 (man->using_mob) ? "MOB" : "DMA");
1166
1167 return 0;
1168
1169out_no_map:
1170 if (man->using_mob)
1171 ttm_bo_unref(&man->cmd_space);
1172
1173 return ret;
1174}
1175
1176/**
1177 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1178 * inline command buffer submissions only.
1179 *
1180 * @dev_priv: Pointer to device private structure.
1181 *
1182 * Returns a pointer to a cummand buffer manager to success or error pointer
1183 * on failure. The command buffer manager will be enabled for submissions of
1184 * size VMW_CMDBUF_INLINE_SIZE only.
1185 */
1186struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1187{
1188 struct vmw_cmdbuf_man *man;
1189 struct vmw_cmdbuf_context *ctx;
1190 int i;
1191 int ret;
1192
1193 if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1194 return ERR_PTR(-ENOSYS);
1195
1196 man = kzalloc(sizeof(*man), GFP_KERNEL);
1197 if (!man)
1198 return ERR_PTR(-ENOMEM);
1199
1200 man->headers = dma_pool_create("vmwgfx cmdbuf",
1201 &dev_priv->dev->pdev->dev,
1202 sizeof(SVGACBHeader),
1203 64, PAGE_SIZE);
1204 if (!man->headers) {
1205 ret = -ENOMEM;
1206 goto out_no_pool;
1207 }
1208
1209 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1210 &dev_priv->dev->pdev->dev,
1211 sizeof(struct vmw_cmdbuf_dheader),
1212 64, PAGE_SIZE);
1213 if (!man->dheaders) {
1214 ret = -ENOMEM;
1215 goto out_no_dpool;
1216 }
1217
1218 for_each_cmdbuf_ctx(man, i, ctx)
1219 vmw_cmdbuf_ctx_init(ctx);
1220
1221 INIT_LIST_HEAD(&man->error);
1222 spin_lock_init(&man->lock);
1223 mutex_init(&man->cur_mutex);
1224 mutex_init(&man->space_mutex);
1225 tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
1226 (unsigned long) man);
1227 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1228 init_waitqueue_head(&man->alloc_queue);
1229 init_waitqueue_head(&man->idle_queue);
1230 man->dev_priv = dev_priv;
1231 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1232 INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1233 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1234 &dev_priv->error_waiters);
1235 ret = vmw_cmdbuf_startstop(man, true);
1236 if (ret) {
1237 DRM_ERROR("Failed starting command buffer context 0.\n");
1238 vmw_cmdbuf_man_destroy(man);
1239 return ERR_PTR(ret);
1240 }
1241
1242 return man;
1243
1244out_no_dpool:
1245 dma_pool_destroy(man->headers);
1246out_no_pool:
1247 kfree(man);
1248
1249 return ERR_PTR(ret);
1250}
1251
1252/**
1253 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1254 *
1255 * @man: Pointer to a command buffer manager.
1256 *
1257 * This function removes the main buffer space pool, and should be called
1258 * before MOB memory management is removed. When this function has been called,
1259 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1260 * less are allowed, and the default size of the command buffer for small kernel
1261 * submissions is also set to this size.
1262 */
1263void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1264{
1265 if (!man->has_pool)
1266 return;
1267
1268 man->has_pool = false;
1269 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1270 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1271 if (man->using_mob) {
1272 (void) ttm_bo_kunmap(&man->map_obj);
1273 ttm_bo_unref(&man->cmd_space);
1274 } else {
1275 dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1276 man->size, man->map, man->handle);
1277 }
1278}
1279
1280/**
1281 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1282 *
1283 * @man: Pointer to a command buffer manager.
1284 *
1285 * This function idles and then destroys a command buffer manager.
1286 */
1287void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1288{
1289 WARN_ON_ONCE(man->has_pool);
1290 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1291 if (vmw_cmdbuf_startstop(man, false))
1292 DRM_ERROR("Failed stopping command buffer context 0.\n");
1293
1294 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1295 &man->dev_priv->error_waiters);
1296 tasklet_kill(&man->tasklet);
1297 (void) cancel_work_sync(&man->work);
1298 dma_pool_destroy(man->dheaders);
1299 dma_pool_destroy(man->headers);
1300 mutex_destroy(&man->cur_mutex);
1301 mutex_destroy(&man->space_mutex);
1302 kfree(man);
1303}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 21e9b7f8dad0..13db8a2851ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,15 +26,10 @@
26 **************************************************************************/ 26 **************************************************************************/
27 27
28#include "vmwgfx_drv.h" 28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
29 30
30#define VMW_CMDBUF_RES_MAN_HT_ORDER 12 31#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
31 32
32enum vmw_cmdbuf_res_state {
33 VMW_CMDBUF_RES_COMMITED,
34 VMW_CMDBUF_RES_ADD,
35 VMW_CMDBUF_RES_DEL
36};
37
38/** 33/**
39 * struct vmw_cmdbuf_res - Command buffer managed resource entry. 34 * struct vmw_cmdbuf_res - Command buffer managed resource entry.
40 * 35 *
@@ -132,9 +127,12 @@ void vmw_cmdbuf_res_commit(struct list_head *list)
132 127
133 list_for_each_entry_safe(entry, next, list, head) { 128 list_for_each_entry_safe(entry, next, list, head) {
134 list_del(&entry->head); 129 list_del(&entry->head);
130 if (entry->res->func->commit_notify)
131 entry->res->func->commit_notify(entry->res,
132 entry->state);
135 switch (entry->state) { 133 switch (entry->state) {
136 case VMW_CMDBUF_RES_ADD: 134 case VMW_CMDBUF_RES_ADD:
137 entry->state = VMW_CMDBUF_RES_COMMITED; 135 entry->state = VMW_CMDBUF_RES_COMMITTED;
138 list_add_tail(&entry->head, &entry->man->list); 136 list_add_tail(&entry->head, &entry->man->list);
139 break; 137 break;
140 case VMW_CMDBUF_RES_DEL: 138 case VMW_CMDBUF_RES_DEL:
@@ -175,7 +173,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
175 &entry->hash); 173 &entry->hash);
176 list_del(&entry->head); 174 list_del(&entry->head);
177 list_add_tail(&entry->head, &entry->man->list); 175 list_add_tail(&entry->head, &entry->man->list);
178 entry->state = VMW_CMDBUF_RES_COMMITED; 176 entry->state = VMW_CMDBUF_RES_COMMITTED;
179 break; 177 break;
180 default: 178 default:
181 BUG(); 179 BUG();
@@ -231,6 +229,9 @@ out_invalid_key:
231 * @res_type: The resource type. 229 * @res_type: The resource type.
232 * @user_key: The user-space id of the resource. 230 * @user_key: The user-space id of the resource.
233 * @list: The staging list. 231 * @list: The staging list.
232 * @res_p: If the resource is in an already committed state, points to the
233 * struct vmw_resource on successful return. The pointer will be
234 * non ref-counted.
234 * 235 *
235 * This function looks up the struct vmw_cmdbuf_res entry from the manager 236 * This function looks up the struct vmw_cmdbuf_res entry from the manager
236 * hash table and, if it exists, removes it. Depending on its current staging 237 * hash table and, if it exists, removes it. Depending on its current staging
@@ -240,7 +241,8 @@ out_invalid_key:
240int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, 241int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
241 enum vmw_cmdbuf_res_type res_type, 242 enum vmw_cmdbuf_res_type res_type,
242 u32 user_key, 243 u32 user_key,
243 struct list_head *list) 244 struct list_head *list,
245 struct vmw_resource **res_p)
244{ 246{
245 struct vmw_cmdbuf_res *entry; 247 struct vmw_cmdbuf_res *entry;
246 struct drm_hash_item *hash; 248 struct drm_hash_item *hash;
@@ -256,12 +258,14 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
256 switch (entry->state) { 258 switch (entry->state) {
257 case VMW_CMDBUF_RES_ADD: 259 case VMW_CMDBUF_RES_ADD:
258 vmw_cmdbuf_res_free(man, entry); 260 vmw_cmdbuf_res_free(man, entry);
261 *res_p = NULL;
259 break; 262 break;
260 case VMW_CMDBUF_RES_COMMITED: 263 case VMW_CMDBUF_RES_COMMITTED:
261 (void) drm_ht_remove_item(&man->resources, &entry->hash); 264 (void) drm_ht_remove_item(&man->resources, &entry->hash);
262 list_del(&entry->head); 265 list_del(&entry->head);
263 entry->state = VMW_CMDBUF_RES_DEL; 266 entry->state = VMW_CMDBUF_RES_DEL;
264 list_add_tail(&entry->head, list); 267 list_add_tail(&entry->head, list);
268 *res_p = entry->res;
265 break; 269 break;
266 default: 270 default:
267 BUG(); 271 BUG();
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 44e6ecba3de7..443d1ed00de7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,19 +27,19 @@
27 27
28#include "vmwgfx_drv.h" 28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h" 29#include "vmwgfx_resource_priv.h"
30#include "vmwgfx_binding.h"
30#include "ttm/ttm_placement.h" 31#include "ttm/ttm_placement.h"
31 32
32struct vmw_user_context { 33struct vmw_user_context {
33 struct ttm_base_object base; 34 struct ttm_base_object base;
34 struct vmw_resource res; 35 struct vmw_resource res;
35 struct vmw_ctx_binding_state cbs; 36 struct vmw_ctx_binding_state *cbs;
36 struct vmw_cmdbuf_res_manager *man; 37 struct vmw_cmdbuf_res_manager *man;
38 struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
39 spinlock_t cotable_lock;
40 struct vmw_dma_buffer *dx_query_mob;
37}; 41};
38 42
39
40
41typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
42
43static void vmw_user_context_free(struct vmw_resource *res); 43static void vmw_user_context_free(struct vmw_resource *res);
44static struct vmw_resource * 44static struct vmw_resource *
45vmw_user_context_base_to_res(struct ttm_base_object *base); 45vmw_user_context_base_to_res(struct ttm_base_object *base);
@@ -51,12 +51,14 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
51 bool readback, 51 bool readback,
52 struct ttm_validate_buffer *val_buf); 52 struct ttm_validate_buffer *val_buf);
53static int vmw_gb_context_destroy(struct vmw_resource *res); 53static int vmw_gb_context_destroy(struct vmw_resource *res);
54static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); 54static int vmw_dx_context_create(struct vmw_resource *res);
55static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, 55static int vmw_dx_context_bind(struct vmw_resource *res,
56 bool rebind); 56 struct ttm_validate_buffer *val_buf);
57static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); 57static int vmw_dx_context_unbind(struct vmw_resource *res,
58static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs); 58 bool readback,
59static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); 59 struct ttm_validate_buffer *val_buf);
60static int vmw_dx_context_destroy(struct vmw_resource *res);
61
60static uint64_t vmw_user_context_size; 62static uint64_t vmw_user_context_size;
61 63
62static const struct vmw_user_resource_conv user_context_conv = { 64static const struct vmw_user_resource_conv user_context_conv = {
@@ -93,15 +95,38 @@ static const struct vmw_res_func vmw_gb_context_func = {
93 .unbind = vmw_gb_context_unbind 95 .unbind = vmw_gb_context_unbind
94}; 96};
95 97
96static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = { 98static const struct vmw_res_func vmw_dx_context_func = {
97 [vmw_ctx_binding_shader] = vmw_context_scrub_shader, 99 .res_type = vmw_res_dx_context,
98 [vmw_ctx_binding_rt] = vmw_context_scrub_render_target, 100 .needs_backup = true,
99 [vmw_ctx_binding_tex] = vmw_context_scrub_texture }; 101 .may_evict = true,
102 .type_name = "dx contexts",
103 .backup_placement = &vmw_mob_placement,
104 .create = vmw_dx_context_create,
105 .destroy = vmw_dx_context_destroy,
106 .bind = vmw_dx_context_bind,
107 .unbind = vmw_dx_context_unbind
108};
100 109
101/** 110/**
102 * Context management: 111 * Context management:
103 */ 112 */
104 113
114static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
115{
116 struct vmw_resource *res;
117 int i;
118
119 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
120 spin_lock(&uctx->cotable_lock);
121 res = uctx->cotables[i];
122 uctx->cotables[i] = NULL;
123 spin_unlock(&uctx->cotable_lock);
124
125 if (res)
126 vmw_resource_unreference(&res);
127 }
128}
129
105static void vmw_hw_context_destroy(struct vmw_resource *res) 130static void vmw_hw_context_destroy(struct vmw_resource *res)
106{ 131{
107 struct vmw_user_context *uctx = 132 struct vmw_user_context *uctx =
@@ -113,17 +138,19 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
113 } *cmd; 138 } *cmd;
114 139
115 140
116 if (res->func->destroy == vmw_gb_context_destroy) { 141 if (res->func->destroy == vmw_gb_context_destroy ||
142 res->func->destroy == vmw_dx_context_destroy) {
117 mutex_lock(&dev_priv->cmdbuf_mutex); 143 mutex_lock(&dev_priv->cmdbuf_mutex);
118 vmw_cmdbuf_res_man_destroy(uctx->man); 144 vmw_cmdbuf_res_man_destroy(uctx->man);
119 mutex_lock(&dev_priv->binding_mutex); 145 mutex_lock(&dev_priv->binding_mutex);
120 (void) vmw_context_binding_state_kill(&uctx->cbs); 146 vmw_binding_state_kill(uctx->cbs);
121 (void) vmw_gb_context_destroy(res); 147 (void) res->func->destroy(res);
122 mutex_unlock(&dev_priv->binding_mutex); 148 mutex_unlock(&dev_priv->binding_mutex);
123 if (dev_priv->pinned_bo != NULL && 149 if (dev_priv->pinned_bo != NULL &&
124 !dev_priv->query_cid_valid) 150 !dev_priv->query_cid_valid)
125 __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 151 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
126 mutex_unlock(&dev_priv->cmdbuf_mutex); 152 mutex_unlock(&dev_priv->cmdbuf_mutex);
153 vmw_context_cotables_unref(uctx);
127 return; 154 return;
128 } 155 }
129 156
@@ -135,25 +162,29 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
135 return; 162 return;
136 } 163 }
137 164
138 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY); 165 cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
139 cmd->header.size = cpu_to_le32(sizeof(cmd->body)); 166 cmd->header.size = sizeof(cmd->body);
140 cmd->body.cid = cpu_to_le32(res->id); 167 cmd->body.cid = res->id;
141 168
142 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 169 vmw_fifo_commit(dev_priv, sizeof(*cmd));
143 vmw_3d_resource_dec(dev_priv, false); 170 vmw_fifo_resource_dec(dev_priv);
144} 171}
145 172
146static int vmw_gb_context_init(struct vmw_private *dev_priv, 173static int vmw_gb_context_init(struct vmw_private *dev_priv,
174 bool dx,
147 struct vmw_resource *res, 175 struct vmw_resource *res,
148 void (*res_free) (struct vmw_resource *res)) 176 void (*res_free)(struct vmw_resource *res))
149{ 177{
150 int ret; 178 int ret, i;
151 struct vmw_user_context *uctx = 179 struct vmw_user_context *uctx =
152 container_of(res, struct vmw_user_context, res); 180 container_of(res, struct vmw_user_context, res);
153 181
182 res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
183 SVGA3D_CONTEXT_DATA_SIZE);
154 ret = vmw_resource_init(dev_priv, res, true, 184 ret = vmw_resource_init(dev_priv, res, true,
155 res_free, &vmw_gb_context_func); 185 res_free,
156 res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; 186 dx ? &vmw_dx_context_func :
187 &vmw_gb_context_func);
157 if (unlikely(ret != 0)) 188 if (unlikely(ret != 0))
158 goto out_err; 189 goto out_err;
159 190
@@ -166,12 +197,32 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
166 } 197 }
167 } 198 }
168 199
169 memset(&uctx->cbs, 0, sizeof(uctx->cbs)); 200 uctx->cbs = vmw_binding_state_alloc(dev_priv);
170 INIT_LIST_HEAD(&uctx->cbs.list); 201 if (IS_ERR(uctx->cbs)) {
202 ret = PTR_ERR(uctx->cbs);
203 goto out_err;
204 }
205
206 spin_lock_init(&uctx->cotable_lock);
207
208 if (dx) {
209 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
210 uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
211 &uctx->res, i);
212 if (unlikely(uctx->cotables[i] == NULL)) {
213 ret = -ENOMEM;
214 goto out_cotables;
215 }
216 }
217 }
218
219
171 220
172 vmw_resource_activate(res, vmw_hw_context_destroy); 221 vmw_resource_activate(res, vmw_hw_context_destroy);
173 return 0; 222 return 0;
174 223
224out_cotables:
225 vmw_context_cotables_unref(uctx);
175out_err: 226out_err:
176 if (res_free) 227 if (res_free)
177 res_free(res); 228 res_free(res);
@@ -182,7 +233,8 @@ out_err:
182 233
183static int vmw_context_init(struct vmw_private *dev_priv, 234static int vmw_context_init(struct vmw_private *dev_priv,
184 struct vmw_resource *res, 235 struct vmw_resource *res,
185 void (*res_free) (struct vmw_resource *res)) 236 void (*res_free)(struct vmw_resource *res),
237 bool dx)
186{ 238{
187 int ret; 239 int ret;
188 240
@@ -192,7 +244,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
192 } *cmd; 244 } *cmd;
193 245
194 if (dev_priv->has_mob) 246 if (dev_priv->has_mob)
195 return vmw_gb_context_init(dev_priv, res, res_free); 247 return vmw_gb_context_init(dev_priv, dx, res, res_free);
196 248
197 ret = vmw_resource_init(dev_priv, res, false, 249 ret = vmw_resource_init(dev_priv, res, false,
198 res_free, &vmw_legacy_context_func); 250 res_free, &vmw_legacy_context_func);
@@ -215,12 +267,12 @@ static int vmw_context_init(struct vmw_private *dev_priv,
215 return -ENOMEM; 267 return -ENOMEM;
216 } 268 }
217 269
218 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE); 270 cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
219 cmd->header.size = cpu_to_le32(sizeof(cmd->body)); 271 cmd->header.size = sizeof(cmd->body);
220 cmd->body.cid = cpu_to_le32(res->id); 272 cmd->body.cid = res->id;
221 273
222 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 274 vmw_fifo_commit(dev_priv, sizeof(*cmd));
223 (void) vmw_3d_resource_inc(dev_priv, false); 275 vmw_fifo_resource_inc(dev_priv);
224 vmw_resource_activate(res, vmw_hw_context_destroy); 276 vmw_resource_activate(res, vmw_hw_context_destroy);
225 return 0; 277 return 0;
226 278
@@ -232,19 +284,10 @@ out_early:
232 return ret; 284 return ret;
233} 285}
234 286
235struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
236{
237 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
238 int ret;
239
240 if (unlikely(res == NULL))
241 return NULL;
242
243 ret = vmw_context_init(dev_priv, res, NULL);
244
245 return (ret == 0) ? res : NULL;
246}
247 287
288/*
289 * GB context.
290 */
248 291
249static int vmw_gb_context_create(struct vmw_resource *res) 292static int vmw_gb_context_create(struct vmw_resource *res)
250{ 293{
@@ -281,7 +324,7 @@ static int vmw_gb_context_create(struct vmw_resource *res)
281 cmd->header.size = sizeof(cmd->body); 324 cmd->header.size = sizeof(cmd->body);
282 cmd->body.cid = res->id; 325 cmd->body.cid = res->id;
283 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 326 vmw_fifo_commit(dev_priv, sizeof(*cmd));
284 (void) vmw_3d_resource_inc(dev_priv, false); 327 vmw_fifo_resource_inc(dev_priv);
285 328
286 return 0; 329 return 0;
287 330
@@ -309,7 +352,6 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
309 "binding.\n"); 352 "binding.\n");
310 return -ENOMEM; 353 return -ENOMEM;
311 } 354 }
312
313 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; 355 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
314 cmd->header.size = sizeof(cmd->body); 356 cmd->header.size = sizeof(cmd->body);
315 cmd->body.cid = res->id; 357 cmd->body.cid = res->id;
@@ -346,7 +388,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
346 BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 388 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
347 389
348 mutex_lock(&dev_priv->binding_mutex); 390 mutex_lock(&dev_priv->binding_mutex);
349 vmw_context_binding_state_scrub(&uctx->cbs); 391 vmw_binding_state_scrub(uctx->cbs);
350 392
351 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); 393 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
352 394
@@ -414,7 +456,231 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
414 if (dev_priv->query_cid == res->id) 456 if (dev_priv->query_cid == res->id)
415 dev_priv->query_cid_valid = false; 457 dev_priv->query_cid_valid = false;
416 vmw_resource_release_id(res); 458 vmw_resource_release_id(res);
417 vmw_3d_resource_dec(dev_priv, false); 459 vmw_fifo_resource_dec(dev_priv);
460
461 return 0;
462}
463
464/*
465 * DX context.
466 */
467
468static int vmw_dx_context_create(struct vmw_resource *res)
469{
470 struct vmw_private *dev_priv = res->dev_priv;
471 int ret;
472 struct {
473 SVGA3dCmdHeader header;
474 SVGA3dCmdDXDefineContext body;
475 } *cmd;
476
477 if (likely(res->id != -1))
478 return 0;
479
480 ret = vmw_resource_alloc_id(res);
481 if (unlikely(ret != 0)) {
482 DRM_ERROR("Failed to allocate a context id.\n");
483 goto out_no_id;
484 }
485
486 if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
487 ret = -EBUSY;
488 goto out_no_fifo;
489 }
490
491 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
492 if (unlikely(cmd == NULL)) {
493 DRM_ERROR("Failed reserving FIFO space for context "
494 "creation.\n");
495 ret = -ENOMEM;
496 goto out_no_fifo;
497 }
498
499 cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
500 cmd->header.size = sizeof(cmd->body);
501 cmd->body.cid = res->id;
502 vmw_fifo_commit(dev_priv, sizeof(*cmd));
503 vmw_fifo_resource_inc(dev_priv);
504
505 return 0;
506
507out_no_fifo:
508 vmw_resource_release_id(res);
509out_no_id:
510 return ret;
511}
512
513static int vmw_dx_context_bind(struct vmw_resource *res,
514 struct ttm_validate_buffer *val_buf)
515{
516 struct vmw_private *dev_priv = res->dev_priv;
517 struct {
518 SVGA3dCmdHeader header;
519 SVGA3dCmdDXBindContext body;
520 } *cmd;
521 struct ttm_buffer_object *bo = val_buf->bo;
522
523 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
524
525 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
526 if (unlikely(cmd == NULL)) {
527 DRM_ERROR("Failed reserving FIFO space for context "
528 "binding.\n");
529 return -ENOMEM;
530 }
531
532 cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
533 cmd->header.size = sizeof(cmd->body);
534 cmd->body.cid = res->id;
535 cmd->body.mobid = bo->mem.start;
536 cmd->body.validContents = res->backup_dirty;
537 res->backup_dirty = false;
538 vmw_fifo_commit(dev_priv, sizeof(*cmd));
539
540
541 return 0;
542}
543
544/**
545 * vmw_dx_context_scrub_cotables - Scrub all bindings and
546 * cotables from a context
547 *
548 * @ctx: Pointer to the context resource
549 * @readback: Whether to save the otable contents on scrubbing.
550 *
551 * COtables must be unbound before their context, but unbinding requires
552 * the backup buffer being reserved, whereas scrubbing does not.
553 * This function scrubs all cotables of a context, potentially reading back
554 * the contents into their backup buffers. However, scrubbing cotables
555 * also makes the device context invalid, so scrub all bindings first so
556 * that doesn't have to be done later with an invalid context.
557 */
558void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
559 bool readback)
560{
561 struct vmw_user_context *uctx =
562 container_of(ctx, struct vmw_user_context, res);
563 int i;
564
565 vmw_binding_state_scrub(uctx->cbs);
566 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
567 struct vmw_resource *res;
568
569 /* Avoid racing with ongoing cotable destruction. */
570 spin_lock(&uctx->cotable_lock);
571 res = uctx->cotables[vmw_cotable_scrub_order[i]];
572 if (res)
573 res = vmw_resource_reference_unless_doomed(res);
574 spin_unlock(&uctx->cotable_lock);
575 if (!res)
576 continue;
577
578 WARN_ON(vmw_cotable_scrub(res, readback));
579 vmw_resource_unreference(&res);
580 }
581}
582
583static int vmw_dx_context_unbind(struct vmw_resource *res,
584 bool readback,
585 struct ttm_validate_buffer *val_buf)
586{
587 struct vmw_private *dev_priv = res->dev_priv;
588 struct ttm_buffer_object *bo = val_buf->bo;
589 struct vmw_fence_obj *fence;
590 struct vmw_user_context *uctx =
591 container_of(res, struct vmw_user_context, res);
592
593 struct {
594 SVGA3dCmdHeader header;
595 SVGA3dCmdDXReadbackContext body;
596 } *cmd1;
597 struct {
598 SVGA3dCmdHeader header;
599 SVGA3dCmdDXBindContext body;
600 } *cmd2;
601 uint32_t submit_size;
602 uint8_t *cmd;
603
604
605 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
606
607 mutex_lock(&dev_priv->binding_mutex);
608 vmw_dx_context_scrub_cotables(res, readback);
609
610 if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
611 readback) {
612 WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
613 if (vmw_query_readback_all(uctx->dx_query_mob))
614 DRM_ERROR("Failed to read back query states\n");
615 }
616
617 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
618
619 cmd = vmw_fifo_reserve(dev_priv, submit_size);
620 if (unlikely(cmd == NULL)) {
621 DRM_ERROR("Failed reserving FIFO space for context "
622 "unbinding.\n");
623 mutex_unlock(&dev_priv->binding_mutex);
624 return -ENOMEM;
625 }
626
627 cmd2 = (void *) cmd;
628 if (readback) {
629 cmd1 = (void *) cmd;
630 cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
631 cmd1->header.size = sizeof(cmd1->body);
632 cmd1->body.cid = res->id;
633 cmd2 = (void *) (&cmd1[1]);
634 }
635 cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
636 cmd2->header.size = sizeof(cmd2->body);
637 cmd2->body.cid = res->id;
638 cmd2->body.mobid = SVGA3D_INVALID_ID;
639
640 vmw_fifo_commit(dev_priv, submit_size);
641 mutex_unlock(&dev_priv->binding_mutex);
642
643 /*
644 * Create a fence object and fence the backup buffer.
645 */
646
647 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
648 &fence, NULL);
649
650 vmw_fence_single_bo(bo, fence);
651
652 if (likely(fence != NULL))
653 vmw_fence_obj_unreference(&fence);
654
655 return 0;
656}
657
658static int vmw_dx_context_destroy(struct vmw_resource *res)
659{
660 struct vmw_private *dev_priv = res->dev_priv;
661 struct {
662 SVGA3dCmdHeader header;
663 SVGA3dCmdDXDestroyContext body;
664 } *cmd;
665
666 if (likely(res->id == -1))
667 return 0;
668
669 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
670 if (unlikely(cmd == NULL)) {
671 DRM_ERROR("Failed reserving FIFO space for context "
672 "destruction.\n");
673 return -ENOMEM;
674 }
675
676 cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
677 cmd->header.size = sizeof(cmd->body);
678 cmd->body.cid = res->id;
679 vmw_fifo_commit(dev_priv, sizeof(*cmd));
680 if (dev_priv->query_cid == res->id)
681 dev_priv->query_cid_valid = false;
682 vmw_resource_release_id(res);
683 vmw_fifo_resource_dec(dev_priv);
418 684
419 return 0; 685 return 0;
420} 686}
@@ -435,6 +701,11 @@ static void vmw_user_context_free(struct vmw_resource *res)
435 container_of(res, struct vmw_user_context, res); 701 container_of(res, struct vmw_user_context, res);
436 struct vmw_private *dev_priv = res->dev_priv; 702 struct vmw_private *dev_priv = res->dev_priv;
437 703
704 if (ctx->cbs)
705 vmw_binding_state_free(ctx->cbs);
706
707 (void) vmw_context_bind_dx_query(res, NULL);
708
438 ttm_base_object_kfree(ctx, base); 709 ttm_base_object_kfree(ctx, base);
439 ttm_mem_global_free(vmw_mem_glob(dev_priv), 710 ttm_mem_global_free(vmw_mem_glob(dev_priv),
440 vmw_user_context_size); 711 vmw_user_context_size);
@@ -465,8 +736,8 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
465 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); 736 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
466} 737}
467 738
468int vmw_context_define_ioctl(struct drm_device *dev, void *data, 739static int vmw_context_define(struct drm_device *dev, void *data,
469 struct drm_file *file_priv) 740 struct drm_file *file_priv, bool dx)
470{ 741{
471 struct vmw_private *dev_priv = vmw_priv(dev); 742 struct vmw_private *dev_priv = vmw_priv(dev);
472 struct vmw_user_context *ctx; 743 struct vmw_user_context *ctx;
@@ -476,6 +747,10 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
476 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 747 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
477 int ret; 748 int ret;
478 749
750 if (!dev_priv->has_dx && dx) {
751 DRM_ERROR("DX contexts not supported by device.\n");
752 return -EINVAL;
753 }
479 754
480 /* 755 /*
481 * Approximate idr memory usage with 128 bytes. It will be limited 756 * Approximate idr memory usage with 128 bytes. It will be limited
@@ -516,7 +791,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
516 * From here on, the destructor takes over resource freeing. 791 * From here on, the destructor takes over resource freeing.
517 */ 792 */
518 793
519 ret = vmw_context_init(dev_priv, res, vmw_user_context_free); 794 ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
520 if (unlikely(ret != 0)) 795 if (unlikely(ret != 0))
521 goto out_unlock; 796 goto out_unlock;
522 797
@@ -535,387 +810,128 @@ out_err:
535out_unlock: 810out_unlock:
536 ttm_read_unlock(&dev_priv->reservation_sem); 811 ttm_read_unlock(&dev_priv->reservation_sem);
537 return ret; 812 return ret;
538
539}
540
541/**
542 * vmw_context_scrub_shader - scrub a shader binding from a context.
543 *
544 * @bi: single binding information.
545 * @rebind: Whether to issue a bind instead of scrub command.
546 */
547static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
548{
549 struct vmw_private *dev_priv = bi->ctx->dev_priv;
550 struct {
551 SVGA3dCmdHeader header;
552 SVGA3dCmdSetShader body;
553 } *cmd;
554
555 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
556 if (unlikely(cmd == NULL)) {
557 DRM_ERROR("Failed reserving FIFO space for shader "
558 "unbinding.\n");
559 return -ENOMEM;
560 }
561
562 cmd->header.id = SVGA_3D_CMD_SET_SHADER;
563 cmd->header.size = sizeof(cmd->body);
564 cmd->body.cid = bi->ctx->id;
565 cmd->body.type = bi->i1.shader_type;
566 cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
567 vmw_fifo_commit(dev_priv, sizeof(*cmd));
568
569 return 0;
570}
571
572/**
573 * vmw_context_scrub_render_target - scrub a render target binding
574 * from a context.
575 *
576 * @bi: single binding information.
577 * @rebind: Whether to issue a bind instead of scrub command.
578 */
579static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
580 bool rebind)
581{
582 struct vmw_private *dev_priv = bi->ctx->dev_priv;
583 struct {
584 SVGA3dCmdHeader header;
585 SVGA3dCmdSetRenderTarget body;
586 } *cmd;
587
588 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
589 if (unlikely(cmd == NULL)) {
590 DRM_ERROR("Failed reserving FIFO space for render target "
591 "unbinding.\n");
592 return -ENOMEM;
593 }
594
595 cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
596 cmd->header.size = sizeof(cmd->body);
597 cmd->body.cid = bi->ctx->id;
598 cmd->body.type = bi->i1.rt_type;
599 cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
600 cmd->body.target.face = 0;
601 cmd->body.target.mipmap = 0;
602 vmw_fifo_commit(dev_priv, sizeof(*cmd));
603
604 return 0;
605} 813}
606 814
607/** 815int vmw_context_define_ioctl(struct drm_device *dev, void *data,
608 * vmw_context_scrub_texture - scrub a texture binding from a context. 816 struct drm_file *file_priv)
609 *
610 * @bi: single binding information.
611 * @rebind: Whether to issue a bind instead of scrub command.
612 *
613 * TODO: Possibly complement this function with a function that takes
614 * a list of texture bindings and combines them to a single command.
615 */
616static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
617 bool rebind)
618{
619 struct vmw_private *dev_priv = bi->ctx->dev_priv;
620 struct {
621 SVGA3dCmdHeader header;
622 struct {
623 SVGA3dCmdSetTextureState c;
624 SVGA3dTextureState s1;
625 } body;
626 } *cmd;
627
628 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
629 if (unlikely(cmd == NULL)) {
630 DRM_ERROR("Failed reserving FIFO space for texture "
631 "unbinding.\n");
632 return -ENOMEM;
633 }
634
635
636 cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
637 cmd->header.size = sizeof(cmd->body);
638 cmd->body.c.cid = bi->ctx->id;
639 cmd->body.s1.stage = bi->i1.texture_stage;
640 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
641 cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
642 vmw_fifo_commit(dev_priv, sizeof(*cmd));
643
644 return 0;
645}
646
647/**
648 * vmw_context_binding_drop: Stop tracking a context binding
649 *
650 * @cb: Pointer to binding tracker storage.
651 *
652 * Stops tracking a context binding, and re-initializes its storage.
653 * Typically used when the context binding is replaced with a binding to
654 * another (or the same, for that matter) resource.
655 */
656static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
657{ 817{
658 list_del(&cb->ctx_list); 818 return vmw_context_define(dev, data, file_priv, false);
659 if (!list_empty(&cb->res_list))
660 list_del(&cb->res_list);
661 cb->bi.ctx = NULL;
662} 819}
663 820
664/** 821int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
665 * vmw_context_binding_add: Start tracking a context binding 822 struct drm_file *file_priv)
666 *
667 * @cbs: Pointer to the context binding state tracker.
668 * @bi: Information about the binding to track.
669 *
670 * Performs basic checks on the binding to make sure arguments are within
671 * bounds and then starts tracking the binding in the context binding
672 * state structure @cbs.
673 */
674int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
675 const struct vmw_ctx_bindinfo *bi)
676{ 823{
677 struct vmw_ctx_binding *loc; 824 union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
678 825 struct drm_vmw_context_arg *rep = &arg->rep;
679 switch (bi->bt) { 826
680 case vmw_ctx_binding_rt: 827 switch (arg->req) {
681 if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) { 828 case drm_vmw_context_legacy:
682 DRM_ERROR("Illegal render target type %u.\n", 829 return vmw_context_define(dev, rep, file_priv, false);
683 (unsigned) bi->i1.rt_type); 830 case drm_vmw_context_dx:
684 return -EINVAL; 831 return vmw_context_define(dev, rep, file_priv, true);
685 }
686 loc = &cbs->render_targets[bi->i1.rt_type];
687 break;
688 case vmw_ctx_binding_tex:
689 if (unlikely((unsigned)bi->i1.texture_stage >=
690 SVGA3D_NUM_TEXTURE_UNITS)) {
691 DRM_ERROR("Illegal texture/sampler unit %u.\n",
692 (unsigned) bi->i1.texture_stage);
693 return -EINVAL;
694 }
695 loc = &cbs->texture_units[bi->i1.texture_stage];
696 break;
697 case vmw_ctx_binding_shader:
698 if (unlikely((unsigned)bi->i1.shader_type >=
699 SVGA3D_SHADERTYPE_MAX)) {
700 DRM_ERROR("Illegal shader type %u.\n",
701 (unsigned) bi->i1.shader_type);
702 return -EINVAL;
703 }
704 loc = &cbs->shaders[bi->i1.shader_type];
705 break;
706 default: 832 default:
707 BUG();
708 }
709
710 if (loc->bi.ctx != NULL)
711 vmw_context_binding_drop(loc);
712
713 loc->bi = *bi;
714 loc->bi.scrubbed = false;
715 list_add_tail(&loc->ctx_list, &cbs->list);
716 INIT_LIST_HEAD(&loc->res_list);
717
718 return 0;
719}
720
721/**
722 * vmw_context_binding_transfer: Transfer a context binding tracking entry.
723 *
724 * @cbs: Pointer to the persistent context binding state tracker.
725 * @bi: Information about the binding to track.
726 *
727 */
728static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
729 const struct vmw_ctx_bindinfo *bi)
730{
731 struct vmw_ctx_binding *loc;
732
733 switch (bi->bt) {
734 case vmw_ctx_binding_rt:
735 loc = &cbs->render_targets[bi->i1.rt_type];
736 break; 833 break;
737 case vmw_ctx_binding_tex:
738 loc = &cbs->texture_units[bi->i1.texture_stage];
739 break;
740 case vmw_ctx_binding_shader:
741 loc = &cbs->shaders[bi->i1.shader_type];
742 break;
743 default:
744 BUG();
745 }
746
747 if (loc->bi.ctx != NULL)
748 vmw_context_binding_drop(loc);
749
750 if (bi->res != NULL) {
751 loc->bi = *bi;
752 list_add_tail(&loc->ctx_list, &cbs->list);
753 list_add_tail(&loc->res_list, &bi->res->binding_head);
754 } 834 }
835 return -EINVAL;
755} 836}
756 837
757/** 838/**
758 * vmw_context_binding_kill - Kill a binding on the device 839 * vmw_context_binding_list - Return a list of context bindings
759 * and stop tracking it.
760 *
761 * @cb: Pointer to binding tracker storage.
762 *
763 * Emits FIFO commands to scrub a binding represented by @cb.
764 * Then stops tracking the binding and re-initializes its storage.
765 */
766static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
767{
768 if (!cb->bi.scrubbed) {
769 (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
770 cb->bi.scrubbed = true;
771 }
772 vmw_context_binding_drop(cb);
773}
774
775/**
776 * vmw_context_binding_state_kill - Kill all bindings associated with a
777 * struct vmw_ctx_binding state structure, and re-initialize the structure.
778 * 840 *
779 * @cbs: Pointer to the context binding state tracker. 841 * @ctx: The context resource
780 * 842 *
781 * Emits commands to scrub all bindings associated with the 843 * Returns the current list of bindings of the given context. Note that
782 * context binding state tracker. Then re-initializes the whole structure. 844 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
783 */ 845 */
784static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) 846struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
785{ 847{
786 struct vmw_ctx_binding *entry, *next; 848 struct vmw_user_context *uctx =
849 container_of(ctx, struct vmw_user_context, res);
787 850
788 list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) 851 return vmw_binding_state_list(uctx->cbs);
789 vmw_context_binding_kill(entry);
790} 852}
791 853
792/** 854struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
793 * vmw_context_binding_state_scrub - Scrub all bindings associated with a
794 * struct vmw_ctx_binding state structure.
795 *
796 * @cbs: Pointer to the context binding state tracker.
797 *
798 * Emits commands to scrub all bindings associated with the
799 * context binding state tracker.
800 */
801static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
802{ 855{
803 struct vmw_ctx_binding *entry; 856 return container_of(ctx, struct vmw_user_context, res)->man;
804
805 list_for_each_entry(entry, &cbs->list, ctx_list) {
806 if (!entry->bi.scrubbed) {
807 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
808 entry->bi.scrubbed = true;
809 }
810 }
811} 857}
812 858
813/** 859struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
814 * vmw_context_binding_res_list_kill - Kill all bindings on a 860 SVGACOTableType cotable_type)
815 * resource binding list
816 *
817 * @head: list head of resource binding list
818 *
819 * Kills all bindings associated with a specific resource. Typically
820 * called before the resource is destroyed.
821 */
822void vmw_context_binding_res_list_kill(struct list_head *head)
823{ 861{
824 struct vmw_ctx_binding *entry, *next; 862 if (cotable_type >= SVGA_COTABLE_DX10_MAX)
863 return ERR_PTR(-EINVAL);
825 864
826 list_for_each_entry_safe(entry, next, head, res_list) 865 return vmw_resource_reference
827 vmw_context_binding_kill(entry); 866 (container_of(ctx, struct vmw_user_context, res)->
867 cotables[cotable_type]);
828} 868}
829 869
830/** 870/**
831 * vmw_context_binding_res_list_scrub - Scrub all bindings on a 871 * vmw_context_binding_state -
832 * resource binding list 872 * Return a pointer to a context binding state structure
833 * 873 *
834 * @head: list head of resource binding list 874 * @ctx: The context resource
835 * 875 *
836 * Scrub all bindings associated with a specific resource. Typically 876 * Returns the current state of bindings of the given context. Note that
837 * called before the resource is evicted. 877 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
838 */ 878 */
839void vmw_context_binding_res_list_scrub(struct list_head *head) 879struct vmw_ctx_binding_state *
880vmw_context_binding_state(struct vmw_resource *ctx)
840{ 881{
841 struct vmw_ctx_binding *entry; 882 return container_of(ctx, struct vmw_user_context, res)->cbs;
842
843 list_for_each_entry(entry, head, res_list) {
844 if (!entry->bi.scrubbed) {
845 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
846 entry->bi.scrubbed = true;
847 }
848 }
849} 883}
850 884
851/** 885/**
852 * vmw_context_binding_state_transfer - Commit staged binding info 886 * vmw_context_bind_dx_query -
887 * Sets query MOB for the context. If @mob is NULL, then this function will
888 * remove the association between the MOB and the context. This function
889 * assumes the binding_mutex is held.
853 * 890 *
854 * @ctx: Pointer to context to commit the staged binding info to. 891 * @ctx_res: The context resource
855 * @from: Staged binding info built during execbuf. 892 * @mob: a reference to the query MOB
856 * 893 *
857 * Transfers binding info from a temporary structure to the persistent 894 * Returns -EINVAL if a MOB has already been set and does not match the one
858 * structure in the context. This can be done once commands 895 * specified in the parameter. 0 otherwise.
859 */ 896 */
860void vmw_context_binding_state_transfer(struct vmw_resource *ctx, 897int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
861 struct vmw_ctx_binding_state *from) 898 struct vmw_dma_buffer *mob)
862{ 899{
863 struct vmw_user_context *uctx = 900 struct vmw_user_context *uctx =
864 container_of(ctx, struct vmw_user_context, res); 901 container_of(ctx_res, struct vmw_user_context, res);
865 struct vmw_ctx_binding *entry, *next;
866
867 list_for_each_entry_safe(entry, next, &from->list, ctx_list)
868 vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
869}
870 902
871/** 903 if (mob == NULL) {
872 * vmw_context_rebind_all - Rebind all scrubbed bindings of a context 904 if (uctx->dx_query_mob) {
873 * 905 uctx->dx_query_mob->dx_query_ctx = NULL;
874 * @ctx: The context resource 906 vmw_dmabuf_unreference(&uctx->dx_query_mob);
875 * 907 uctx->dx_query_mob = NULL;
876 * Walks through the context binding list and rebinds all scrubbed 908 }
877 * resources.
878 */
879int vmw_context_rebind_all(struct vmw_resource *ctx)
880{
881 struct vmw_ctx_binding *entry;
882 struct vmw_user_context *uctx =
883 container_of(ctx, struct vmw_user_context, res);
884 struct vmw_ctx_binding_state *cbs = &uctx->cbs;
885 int ret;
886 909
887 list_for_each_entry(entry, &cbs->list, ctx_list) { 910 return 0;
888 if (likely(!entry->bi.scrubbed)) 911 }
889 continue;
890 912
891 if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id == 913 /* Can only have one MOB per context for queries */
892 SVGA3D_INVALID_ID)) 914 if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
893 continue; 915 return -EINVAL;
894 916
895 ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true); 917 mob->dx_query_ctx = ctx_res;
896 if (unlikely(ret != 0))
897 return ret;
898 918
899 entry->bi.scrubbed = false; 919 if (!uctx->dx_query_mob)
900 } 920 uctx->dx_query_mob = vmw_dmabuf_reference(mob);
901 921
902 return 0; 922 return 0;
903} 923}
904 924
905/** 925/**
906 * vmw_context_binding_list - Return a list of context bindings 926 * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
907 *
908 * @ctx: The context resource
909 * 927 *
910 * Returns the current list of bindings of the given context. Note that 928 * @ctx_res: The context resource
911 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
912 */ 929 */
913struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) 930struct vmw_dma_buffer *
931vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
914{ 932{
915 return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); 933 struct vmw_user_context *uctx =
916} 934 container_of(ctx_res, struct vmw_user_context, res);
917 935
918struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx) 936 return uctx->dx_query_mob;
919{
920 return container_of(ctx, struct vmw_user_context, res)->man;
921} 937}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
new file mode 100644
index 000000000000..ce659a125f2b
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -0,0 +1,662 @@
1/**************************************************************************
2 *
3 * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Treat context OTables as resources to make use of the resource
29 * backing MOB eviction mechanism, that is used to read back the COTable
30 * whenever the backing MOB is evicted.
31 */
32
33#include "vmwgfx_drv.h"
34#include "vmwgfx_resource_priv.h"
35#include <ttm/ttm_placement.h>
36#include "vmwgfx_so.h"
37
38/**
39 * struct vmw_cotable - Context Object Table resource
40 *
41 * @res: struct vmw_resource we are deriving from.
42 * @ctx: non-refcounted pointer to the owning context.
43 * @size_read_back: Size of data read back during eviction.
44 * @seen_entries: Seen entries in command stream for this cotable.
45 * @type: The cotable type.
46 * @scrubbed: Whether the cotable has been scrubbed.
47 * @resource_list: List of resources in the cotable.
48 */
49struct vmw_cotable {
50 struct vmw_resource res;
51 struct vmw_resource *ctx;
52 size_t size_read_back;
53 int seen_entries;
54 u32 type;
55 bool scrubbed;
56 struct list_head resource_list;
57};
58
59/**
60 * struct vmw_cotable_info - Static info about cotable types
61 *
62 * @min_initial_entries: Min number of initial intries at cotable allocation
63 * for this cotable type.
64 * @size: Size of each entry.
65 */
66struct vmw_cotable_info {
67 u32 min_initial_entries;
68 u32 size;
69 void (*unbind_func)(struct vmw_private *, struct list_head *,
70 bool);
71};
72
73static const struct vmw_cotable_info co_info[] = {
74 {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
75 {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
76 {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
77 {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
78 {1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
79 {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
80 {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
81 {1, sizeof(SVGACOTableDXSamplerEntry), NULL},
82 {1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
83 {1, sizeof(SVGACOTableDXQueryEntry), NULL},
84 {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
85};
86
87/*
88 * Cotables with bindings that we remove must be scrubbed first,
89 * otherwise, the device will swap in an invalid context when we remove
90 * bindings before scrubbing a cotable...
91 */
92const SVGACOTableType vmw_cotable_scrub_order[] = {
93 SVGA_COTABLE_RTVIEW,
94 SVGA_COTABLE_DSVIEW,
95 SVGA_COTABLE_SRVIEW,
96 SVGA_COTABLE_DXSHADER,
97 SVGA_COTABLE_ELEMENTLAYOUT,
98 SVGA_COTABLE_BLENDSTATE,
99 SVGA_COTABLE_DEPTHSTENCIL,
100 SVGA_COTABLE_RASTERIZERSTATE,
101 SVGA_COTABLE_SAMPLER,
102 SVGA_COTABLE_STREAMOUTPUT,
103 SVGA_COTABLE_DXQUERY,
104};
105
106static int vmw_cotable_bind(struct vmw_resource *res,
107 struct ttm_validate_buffer *val_buf);
108static int vmw_cotable_unbind(struct vmw_resource *res,
109 bool readback,
110 struct ttm_validate_buffer *val_buf);
111static int vmw_cotable_create(struct vmw_resource *res);
112static int vmw_cotable_destroy(struct vmw_resource *res);
113
114static const struct vmw_res_func vmw_cotable_func = {
115 .res_type = vmw_res_cotable,
116 .needs_backup = true,
117 .may_evict = true,
118 .type_name = "context guest backed object tables",
119 .backup_placement = &vmw_mob_placement,
120 .create = vmw_cotable_create,
121 .destroy = vmw_cotable_destroy,
122 .bind = vmw_cotable_bind,
123 .unbind = vmw_cotable_unbind,
124};
125
126/**
127 * vmw_cotable - Convert a struct vmw_resource pointer to a struct
128 * vmw_cotable pointer
129 *
130 * @res: Pointer to the resource.
131 */
132static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
133{
134 return container_of(res, struct vmw_cotable, res);
135}
136
137/**
138 * vmw_cotable_destroy - Cotable resource destroy callback
139 *
140 * @res: Pointer to the cotable resource.
141 *
142 * There is no device cotable destroy command, so this function only
143 * makes sure that the resource id is set to invalid.
144 */
145static int vmw_cotable_destroy(struct vmw_resource *res)
146{
147 res->id = -1;
148 return 0;
149}
150
151/**
152 * vmw_cotable_unscrub - Undo a cotable unscrub operation
153 *
154 * @res: Pointer to the cotable resource
155 *
156 * This function issues commands to (re)bind the cotable to
157 * its backing mob, which needs to be validated and reserved at this point.
158 * This is identical to bind() except the function interface looks different.
159 */
160static int vmw_cotable_unscrub(struct vmw_resource *res)
161{
162 struct vmw_cotable *vcotbl = vmw_cotable(res);
163 struct vmw_private *dev_priv = res->dev_priv;
164 struct ttm_buffer_object *bo = &res->backup->base;
165 struct {
166 SVGA3dCmdHeader header;
167 SVGA3dCmdDXSetCOTable body;
168 } *cmd;
169
170 WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
171 lockdep_assert_held(&bo->resv->lock.base);
172
173 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID);
174 if (!cmd) {
175 DRM_ERROR("Failed reserving FIFO space for cotable "
176 "binding.\n");
177 return -ENOMEM;
178 }
179
180 WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
181 WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
182 cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
183 cmd->header.size = sizeof(cmd->body);
184 cmd->body.cid = vcotbl->ctx->id;
185 cmd->body.type = vcotbl->type;
186 cmd->body.mobid = bo->mem.start;
187 cmd->body.validSizeInBytes = vcotbl->size_read_back;
188
189 vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
190 vcotbl->scrubbed = false;
191
192 return 0;
193}
194
195/**
196 * vmw_cotable_bind - Undo a cotable unscrub operation
197 *
198 * @res: Pointer to the cotable resource
199 * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
200 * for convenience / fencing.
201 *
202 * This function issues commands to (re)bind the cotable to
203 * its backing mob, which needs to be validated and reserved at this point.
204 */
205static int vmw_cotable_bind(struct vmw_resource *res,
206 struct ttm_validate_buffer *val_buf)
207{
208 /*
209 * The create() callback may have changed @res->backup without
210 * the caller noticing, and with val_buf->bo still pointing to
211 * the old backup buffer. Although hackish, and not used currently,
212 * take the opportunity to correct the value here so that it's not
213 * misused in the future.
214 */
215 val_buf->bo = &res->backup->base;
216
217 return vmw_cotable_unscrub(res);
218}
219
220/**
221 * vmw_cotable_scrub - Scrub the cotable from the device.
222 *
223 * @res: Pointer to the cotable resource.
224 * @readback: Whether initiate a readback of the cotable data to the backup
225 * buffer.
226 *
227 * In some situations (context swapouts) it might be desirable to make the
228 * device forget about the cotable without performing a full unbind. A full
229 * unbind requires reserved backup buffers and it might not be possible to
230 * reserve them due to locking order violation issues. The vmw_cotable_scrub
231 * function implements a partial unbind() without that requirement but with the
232 * following restrictions.
233 * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
234 * be called.
235 * 2) Before the cotable backing buffer is used by the CPU, or during the
236 * resource destruction, vmw_cotable_unbind() must be called.
237 */
238int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
239{
240 struct vmw_cotable *vcotbl = vmw_cotable(res);
241 struct vmw_private *dev_priv = res->dev_priv;
242 size_t submit_size;
243
244 struct {
245 SVGA3dCmdHeader header;
246 SVGA3dCmdDXReadbackCOTable body;
247 } *cmd0;
248 struct {
249 SVGA3dCmdHeader header;
250 SVGA3dCmdDXSetCOTable body;
251 } *cmd1;
252
253 if (vcotbl->scrubbed)
254 return 0;
255
256 if (co_info[vcotbl->type].unbind_func)
257 co_info[vcotbl->type].unbind_func(dev_priv,
258 &vcotbl->resource_list,
259 readback);
260 submit_size = sizeof(*cmd1);
261 if (readback)
262 submit_size += sizeof(*cmd0);
263
264 cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID);
265 if (!cmd1) {
266 DRM_ERROR("Failed reserving FIFO space for cotable "
267 "unbinding.\n");
268 return -ENOMEM;
269 }
270
271 vcotbl->size_read_back = 0;
272 if (readback) {
273 cmd0 = (void *) cmd1;
274 cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
275 cmd0->header.size = sizeof(cmd0->body);
276 cmd0->body.cid = vcotbl->ctx->id;
277 cmd0->body.type = vcotbl->type;
278 cmd1 = (void *) &cmd0[1];
279 vcotbl->size_read_back = res->backup_size;
280 }
281 cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
282 cmd1->header.size = sizeof(cmd1->body);
283 cmd1->body.cid = vcotbl->ctx->id;
284 cmd1->body.type = vcotbl->type;
285 cmd1->body.mobid = SVGA3D_INVALID_ID;
286 cmd1->body.validSizeInBytes = 0;
287 vmw_fifo_commit_flush(dev_priv, submit_size);
288 vcotbl->scrubbed = true;
289
290 /* Trigger a create() on next validate. */
291 res->id = -1;
292
293 return 0;
294}
295
296/**
297 * vmw_cotable_unbind - Cotable resource unbind callback
298 *
299 * @res: Pointer to the cotable resource.
300 * @readback: Whether to read back cotable data to the backup buffer.
301 * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
302 * for convenience / fencing.
303 *
304 * Unbinds the cotable from the device and fences the backup buffer.
305 */
306static int vmw_cotable_unbind(struct vmw_resource *res,
307 bool readback,
308 struct ttm_validate_buffer *val_buf)
309{
310 struct vmw_cotable *vcotbl = vmw_cotable(res);
311 struct vmw_private *dev_priv = res->dev_priv;
312 struct ttm_buffer_object *bo = val_buf->bo;
313 struct vmw_fence_obj *fence;
314 int ret;
315
316 if (list_empty(&res->mob_head))
317 return 0;
318
319 WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
320 lockdep_assert_held(&bo->resv->lock.base);
321
322 mutex_lock(&dev_priv->binding_mutex);
323 if (!vcotbl->scrubbed)
324 vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
325 mutex_unlock(&dev_priv->binding_mutex);
326 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
327 vmw_fence_single_bo(bo, fence);
328 if (likely(fence != NULL))
329 vmw_fence_obj_unreference(&fence);
330
331 return ret;
332}
333
334/**
335 * vmw_cotable_readback - Read back a cotable without unbinding.
336 *
337 * @res: The cotable resource.
338 *
339 * Reads back a cotable to its backing mob without scrubbing the MOB from
340 * the cotable. The MOB is fenced for subsequent CPU access.
341 */
342static int vmw_cotable_readback(struct vmw_resource *res)
343{
344 struct vmw_cotable *vcotbl = vmw_cotable(res);
345 struct vmw_private *dev_priv = res->dev_priv;
346
347 struct {
348 SVGA3dCmdHeader header;
349 SVGA3dCmdDXReadbackCOTable body;
350 } *cmd;
351 struct vmw_fence_obj *fence;
352
353 if (!vcotbl->scrubbed) {
354 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
355 SVGA3D_INVALID_ID);
356 if (!cmd) {
357 DRM_ERROR("Failed reserving FIFO space for cotable "
358 "readback.\n");
359 return -ENOMEM;
360 }
361 cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
362 cmd->header.size = sizeof(cmd->body);
363 cmd->body.cid = vcotbl->ctx->id;
364 cmd->body.type = vcotbl->type;
365 vcotbl->size_read_back = res->backup_size;
366 vmw_fifo_commit(dev_priv, sizeof(*cmd));
367 }
368
369 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
370 vmw_fence_single_bo(&res->backup->base, fence);
371 vmw_fence_obj_unreference(&fence);
372
373 return 0;
374}
375
376/**
377 * vmw_cotable_resize - Resize a cotable.
378 *
379 * @res: The cotable resource.
380 * @new_size: The new size.
381 *
382 * Resizes a cotable and binds the new backup buffer.
383 * On failure the cotable is left intact.
384 * Important! This function may not fail once the MOB switch has been
385 * committed to hardware. That would put the device context in an
386 * invalid state which we can't currently recover from.
387 */
388static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
389{
390 struct vmw_private *dev_priv = res->dev_priv;
391 struct vmw_cotable *vcotbl = vmw_cotable(res);
392 struct vmw_dma_buffer *buf, *old_buf = res->backup;
393 struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
394 size_t old_size = res->backup_size;
395 size_t old_size_read_back = vcotbl->size_read_back;
396 size_t cur_size_read_back;
397 struct ttm_bo_kmap_obj old_map, new_map;
398 int ret;
399 size_t i;
400
401 ret = vmw_cotable_readback(res);
402 if (ret)
403 return ret;
404
405 cur_size_read_back = vcotbl->size_read_back;
406 vcotbl->size_read_back = old_size_read_back;
407
408 /*
409 * While device is processing, Allocate and reserve a buffer object
410 * for the new COTable. Initially pin the buffer object to make sure
411 * we can use tryreserve without failure.
412 */
413 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
414 if (!buf)
415 return -ENOMEM;
416
417 ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
418 true, vmw_dmabuf_bo_free);
419 if (ret) {
420 DRM_ERROR("Failed initializing new cotable MOB.\n");
421 return ret;
422 }
423
424 bo = &buf->base;
425 WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL));
426
427 ret = ttm_bo_wait(old_bo, false, false, false);
428 if (unlikely(ret != 0)) {
429 DRM_ERROR("Failed waiting for cotable unbind.\n");
430 goto out_wait;
431 }
432
433 /*
434 * Do a page by page copy of COTables. This eliminates slow vmap()s.
435 * This should really be a TTM utility.
436 */
437 for (i = 0; i < old_bo->num_pages; ++i) {
438 bool dummy;
439
440 ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
441 if (unlikely(ret != 0)) {
442 DRM_ERROR("Failed mapping old COTable on resize.\n");
443 goto out_wait;
444 }
445 ret = ttm_bo_kmap(bo, i, 1, &new_map);
446 if (unlikely(ret != 0)) {
447 DRM_ERROR("Failed mapping new COTable on resize.\n");
448 goto out_map_new;
449 }
450 memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
451 ttm_kmap_obj_virtual(&old_map, &dummy),
452 PAGE_SIZE);
453 ttm_bo_kunmap(&new_map);
454 ttm_bo_kunmap(&old_map);
455 }
456
457 /* Unpin new buffer, and switch backup buffers. */
458 ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false);
459 if (unlikely(ret != 0)) {
460 DRM_ERROR("Failed validating new COTable backup buffer.\n");
461 goto out_wait;
462 }
463
464 res->backup = buf;
465 res->backup_size = new_size;
466 vcotbl->size_read_back = cur_size_read_back;
467
468 /*
469 * Now tell the device to switch. If this fails, then we need to
470 * revert the full resize.
471 */
472 ret = vmw_cotable_unscrub(res);
473 if (ret) {
474 DRM_ERROR("Failed switching COTable backup buffer.\n");
475 res->backup = old_buf;
476 res->backup_size = old_size;
477 vcotbl->size_read_back = old_size_read_back;
478 goto out_wait;
479 }
480
481 /* Let go of the old mob. */
482 list_del(&res->mob_head);
483 list_add_tail(&res->mob_head, &buf->res_list);
484 vmw_dmabuf_unreference(&old_buf);
485 res->id = vcotbl->type;
486
487 return 0;
488
489out_map_new:
490 ttm_bo_kunmap(&old_map);
491out_wait:
492 ttm_bo_unreserve(bo);
493 vmw_dmabuf_unreference(&buf);
494
495 return ret;
496}
497
498/**
499 * vmw_cotable_create - Cotable resource create callback
500 *
501 * @res: Pointer to a cotable resource.
502 *
503 * There is no separate create command for cotables, so this callback, which
504 * is called before bind() in the validation sequence is instead used for two
505 * things.
506 * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
507 * buffer, that is, if @res->mob_head is non-empty.
508 * 2) Resize the cotable if needed.
509 */
510static int vmw_cotable_create(struct vmw_resource *res)
511{
512 struct vmw_cotable *vcotbl = vmw_cotable(res);
513 size_t new_size = res->backup_size;
514 size_t needed_size;
515 int ret;
516
517 /* Check whether we need to resize the cotable */
518 needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
519 while (needed_size > new_size)
520 new_size *= 2;
521
522 if (likely(new_size <= res->backup_size)) {
523 if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
524 ret = vmw_cotable_unscrub(res);
525 if (ret)
526 return ret;
527 }
528 res->id = vcotbl->type;
529 return 0;
530 }
531
532 return vmw_cotable_resize(res, new_size);
533}
534
535/**
536 * vmw_hw_cotable_destroy - Cotable hw_destroy callback
537 *
538 * @res: Pointer to a cotable resource.
539 *
540 * The final (part of resource destruction) destroy callback.
541 */
542static void vmw_hw_cotable_destroy(struct vmw_resource *res)
543{
544 (void) vmw_cotable_destroy(res);
545}
546
547static size_t cotable_acc_size;
548
549/**
550 * vmw_cotable_free - Cotable resource destructor
551 *
552 * @res: Pointer to a cotable resource.
553 */
554static void vmw_cotable_free(struct vmw_resource *res)
555{
556 struct vmw_private *dev_priv = res->dev_priv;
557
558 kfree(res);
559 ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
560}
561
562/**
563 * vmw_cotable_alloc - Create a cotable resource
564 *
565 * @dev_priv: Pointer to a device private struct.
566 * @ctx: Pointer to the context resource.
567 * The cotable resource will not add a refcount.
568 * @type: The cotable type.
569 */
570struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
571 struct vmw_resource *ctx,
572 u32 type)
573{
574 struct vmw_cotable *vcotbl;
575 int ret;
576 u32 num_entries;
577
578 if (unlikely(cotable_acc_size == 0))
579 cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
580
581 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
582 cotable_acc_size, false, true);
583 if (unlikely(ret))
584 return ERR_PTR(ret);
585
586 vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
587 if (unlikely(vcotbl == NULL)) {
588 ret = -ENOMEM;
589 goto out_no_alloc;
590 }
591
592 ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
593 vmw_cotable_free, &vmw_cotable_func);
594 if (unlikely(ret != 0))
595 goto out_no_init;
596
597 INIT_LIST_HEAD(&vcotbl->resource_list);
598 vcotbl->res.id = type;
599 vcotbl->res.backup_size = PAGE_SIZE;
600 num_entries = PAGE_SIZE / co_info[type].size;
601 if (num_entries < co_info[type].min_initial_entries) {
602 vcotbl->res.backup_size = co_info[type].min_initial_entries *
603 co_info[type].size;
604 vcotbl->res.backup_size =
605 (vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
606 }
607
608 vcotbl->scrubbed = true;
609 vcotbl->seen_entries = -1;
610 vcotbl->type = type;
611 vcotbl->ctx = ctx;
612
613 vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy);
614
615 return &vcotbl->res;
616
617out_no_init:
618 kfree(vcotbl);
619out_no_alloc:
620 ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
621 return ERR_PTR(ret);
622}
623
624/**
625 * vmw_cotable_notify - Notify the cotable about an item creation
626 *
627 * @res: Pointer to a cotable resource.
628 * @id: Item id.
629 */
630int vmw_cotable_notify(struct vmw_resource *res, int id)
631{
632 struct vmw_cotable *vcotbl = vmw_cotable(res);
633
634 if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
635 DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
636 (unsigned) vcotbl->type, id);
637 return -EINVAL;
638 }
639
640 if (vcotbl->seen_entries < id) {
641 /* Trigger a call to create() on next validate */
642 res->id = -1;
643 vcotbl->seen_entries = id;
644 }
645
646 return 0;
647}
648
649/**
650 * vmw_cotable_add_view - add a view to the cotable's list of active views.
651 *
652 * @res: pointer struct vmw_resource representing the cotable.
653 * @head: pointer to the struct list_head member of the resource, dedicated
654 * to the cotable active resource list.
655 */
656void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
657{
658 struct vmw_cotable *vcotbl =
659 container_of(res, struct vmw_cotable, res);
660
661 list_add_tail(head, &vcotbl->resource_list);
662}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 914b375763dc..299925a1f6c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,25 +32,20 @@
32 32
33 33
34/** 34/**
35 * vmw_dmabuf_to_placement - Validate a buffer to placement. 35 * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
36 * 36 *
37 * @dev_priv: Driver private. 37 * @dev_priv: Driver private.
38 * @buf: DMA buffer to move. 38 * @buf: DMA buffer to move.
39 * @pin: Pin buffer if true. 39 * @placement: The placement to pin it.
40 * @interruptible: Use interruptible wait. 40 * @interruptible: Use interruptible wait.
41 * 41 *
42 * May only be called by the current master since it assumes that the
43 * master lock is the current master's lock.
44 * This function takes the master's lock in write mode.
45 * Flushes and unpins the query bo to avoid failures.
46 *
47 * Returns 42 * Returns
48 * -ERESTARTSYS if interrupted by a signal. 43 * -ERESTARTSYS if interrupted by a signal.
49 */ 44 */
50int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, 45int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
51 struct vmw_dma_buffer *buf, 46 struct vmw_dma_buffer *buf,
52 struct ttm_placement *placement, 47 struct ttm_placement *placement,
53 bool interruptible) 48 bool interruptible)
54{ 49{
55 struct ttm_buffer_object *bo = &buf->base; 50 struct ttm_buffer_object *bo = &buf->base;
56 int ret; 51 int ret;
@@ -66,6 +61,8 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
66 goto err; 61 goto err;
67 62
68 ret = ttm_bo_validate(bo, placement, interruptible, false); 63 ret = ttm_bo_validate(bo, placement, interruptible, false);
64 if (!ret)
65 vmw_bo_pin_reserved(buf, true);
69 66
70 ttm_bo_unreserve(bo); 67 ttm_bo_unreserve(bo);
71 68
@@ -75,12 +72,10 @@ err:
75} 72}
76 73
77/** 74/**
78 * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr. 75 * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
79 * 76 *
80 * May only be called by the current master since it assumes that the 77 * This function takes the reservation_sem in write mode.
81 * master lock is the current master's lock. 78 * Flushes and unpins the query bo to avoid failures.
82 * This function takes the master's lock in write mode.
83 * Flushes and unpins the query bo if @pin == true to avoid failures.
84 * 79 *
85 * @dev_priv: Driver private. 80 * @dev_priv: Driver private.
86 * @buf: DMA buffer to move. 81 * @buf: DMA buffer to move.
@@ -90,55 +85,34 @@ err:
90 * Returns 85 * Returns
91 * -ERESTARTSYS if interrupted by a signal. 86 * -ERESTARTSYS if interrupted by a signal.
92 */ 87 */
93int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, 88int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
94 struct vmw_dma_buffer *buf, 89 struct vmw_dma_buffer *buf,
95 bool pin, bool interruptible) 90 bool interruptible)
96{ 91{
97 struct ttm_buffer_object *bo = &buf->base; 92 struct ttm_buffer_object *bo = &buf->base;
98 struct ttm_placement *placement;
99 int ret; 93 int ret;
100 94
101 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); 95 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
102 if (unlikely(ret != 0)) 96 if (unlikely(ret != 0))
103 return ret; 97 return ret;
104 98
105 if (pin) 99 vmw_execbuf_release_pinned_bo(dev_priv);
106 vmw_execbuf_release_pinned_bo(dev_priv);
107 100
108 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); 101 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
109 if (unlikely(ret != 0)) 102 if (unlikely(ret != 0))
110 goto err; 103 goto err;
111 104
112 /** 105 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
113 * Put BO in VRAM if there is space, otherwise as a GMR. 106 false);
114 * If there is no space in VRAM and GMR ids are all used up,
115 * start evicting GMRs to make room. If the DMA buffer can't be
116 * used as a GMR, this will return -ENOMEM.
117 */
118
119 if (pin)
120 placement = &vmw_vram_gmr_ne_placement;
121 else
122 placement = &vmw_vram_gmr_placement;
123
124 ret = ttm_bo_validate(bo, placement, interruptible, false);
125 if (likely(ret == 0) || ret == -ERESTARTSYS) 107 if (likely(ret == 0) || ret == -ERESTARTSYS)
126 goto err_unreserve; 108 goto out_unreserve;
127
128 109
129 /** 110 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
130 * If that failed, try VRAM again, this time evicting
131 * previous contents.
132 */
133
134 if (pin)
135 placement = &vmw_vram_ne_placement;
136 else
137 placement = &vmw_vram_placement;
138 111
139 ret = ttm_bo_validate(bo, placement, interruptible, false); 112out_unreserve:
113 if (!ret)
114 vmw_bo_pin_reserved(buf, true);
140 115
141err_unreserve:
142 ttm_bo_unreserve(bo); 116 ttm_bo_unreserve(bo);
143err: 117err:
144 ttm_write_unlock(&dev_priv->reservation_sem); 118 ttm_write_unlock(&dev_priv->reservation_sem);
@@ -146,67 +120,50 @@ err:
146} 120}
147 121
148/** 122/**
149 * vmw_dmabuf_to_vram - Move a buffer to vram. 123 * vmw_dmabuf_pin_in_vram - Move a buffer to vram.
150 * 124 *
151 * May only be called by the current master since it assumes that the 125 * This function takes the reservation_sem in write mode.
152 * master lock is the current master's lock. 126 * Flushes and unpins the query bo to avoid failures.
153 * This function takes the master's lock in write mode.
154 * 127 *
155 * @dev_priv: Driver private. 128 * @dev_priv: Driver private.
156 * @buf: DMA buffer to move. 129 * @buf: DMA buffer to move.
157 * @pin: Pin buffer in vram if true.
158 * @interruptible: Use interruptible wait. 130 * @interruptible: Use interruptible wait.
159 * 131 *
160 * Returns 132 * Returns
161 * -ERESTARTSYS if interrupted by a signal. 133 * -ERESTARTSYS if interrupted by a signal.
162 */ 134 */
163int vmw_dmabuf_to_vram(struct vmw_private *dev_priv, 135int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
164 struct vmw_dma_buffer *buf, 136 struct vmw_dma_buffer *buf,
165 bool pin, bool interruptible) 137 bool interruptible)
166{ 138{
167 struct ttm_placement *placement; 139 return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
168 140 interruptible);
169 if (pin)
170 placement = &vmw_vram_ne_placement;
171 else
172 placement = &vmw_vram_placement;
173
174 return vmw_dmabuf_to_placement(dev_priv, buf,
175 placement,
176 interruptible);
177} 141}
178 142
179/** 143/**
180 * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram. 144 * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
181 * 145 *
182 * May only be called by the current master since it assumes that the 146 * This function takes the reservation_sem in write mode.
183 * master lock is the current master's lock. 147 * Flushes and unpins the query bo to avoid failures.
184 * This function takes the master's lock in write mode.
185 * Flushes and unpins the query bo if @pin == true to avoid failures.
186 * 148 *
187 * @dev_priv: Driver private. 149 * @dev_priv: Driver private.
188 * @buf: DMA buffer to move. 150 * @buf: DMA buffer to pin.
189 * @pin: Pin buffer in vram if true.
190 * @interruptible: Use interruptible wait. 151 * @interruptible: Use interruptible wait.
191 * 152 *
192 * Returns 153 * Returns
193 * -ERESTARTSYS if interrupted by a signal. 154 * -ERESTARTSYS if interrupted by a signal.
194 */ 155 */
195int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, 156int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
196 struct vmw_dma_buffer *buf, 157 struct vmw_dma_buffer *buf,
197 bool pin, bool interruptible) 158 bool interruptible)
198{ 159{
199 struct ttm_buffer_object *bo = &buf->base; 160 struct ttm_buffer_object *bo = &buf->base;
200 struct ttm_placement placement; 161 struct ttm_placement placement;
201 struct ttm_place place; 162 struct ttm_place place;
202 int ret = 0; 163 int ret = 0;
203 164
204 if (pin) 165 place = vmw_vram_placement.placement[0];
205 place = vmw_vram_ne_placement.placement[0];
206 else
207 place = vmw_vram_placement.placement[0];
208 place.lpfn = bo->num_pages; 166 place.lpfn = bo->num_pages;
209
210 placement.num_placement = 1; 167 placement.num_placement = 1;
211 placement.placement = &place; 168 placement.placement = &place;
212 placement.num_busy_placement = 1; 169 placement.num_busy_placement = 1;
@@ -216,13 +173,16 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
216 if (unlikely(ret != 0)) 173 if (unlikely(ret != 0))
217 return ret; 174 return ret;
218 175
219 if (pin) 176 vmw_execbuf_release_pinned_bo(dev_priv);
220 vmw_execbuf_release_pinned_bo(dev_priv);
221 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); 177 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
222 if (unlikely(ret != 0)) 178 if (unlikely(ret != 0))
223 goto err_unlock; 179 goto err_unlock;
224 180
225 /* Is this buffer already in vram but not at the start of it? */ 181 /*
182 * Is this buffer already in vram but not at the start of it?
183 * In that case, evict it first because TTM isn't good at handling
184 * that situation.
185 */
226 if (bo->mem.mem_type == TTM_PL_VRAM && 186 if (bo->mem.mem_type == TTM_PL_VRAM &&
227 bo->mem.start < bo->num_pages && 187 bo->mem.start < bo->num_pages &&
228 bo->mem.start > 0) 188 bo->mem.start > 0)
@@ -230,8 +190,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
230 190
231 ret = ttm_bo_validate(bo, &placement, interruptible, false); 191 ret = ttm_bo_validate(bo, &placement, interruptible, false);
232 192
233 /* For some reason we didn't up at the start of vram */ 193 /* For some reason we didn't end up at the start of vram */
234 WARN_ON(ret == 0 && bo->offset != 0); 194 WARN_ON(ret == 0 && bo->offset != 0);
195 if (!ret)
196 vmw_bo_pin_reserved(buf, true);
235 197
236 ttm_bo_unreserve(bo); 198 ttm_bo_unreserve(bo);
237err_unlock: 199err_unlock:
@@ -240,13 +202,10 @@ err_unlock:
240 return ret; 202 return ret;
241} 203}
242 204
243
244/** 205/**
245 * vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer. 206 * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
246 * 207 *
247 * May only be called by the current master since it assumes that the 208 * This function takes the reservation_sem in write mode.
248 * master lock is the current master's lock.
249 * This function takes the master's lock in write mode.
250 * 209 *
251 * @dev_priv: Driver private. 210 * @dev_priv: Driver private.
252 * @buf: DMA buffer to unpin. 211 * @buf: DMA buffer to unpin.
@@ -259,16 +218,25 @@ int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
259 struct vmw_dma_buffer *buf, 218 struct vmw_dma_buffer *buf,
260 bool interruptible) 219 bool interruptible)
261{ 220{
262 /* 221 struct ttm_buffer_object *bo = &buf->base;
263 * We could in theory early out if the buffer is 222 int ret;
264 * unpinned but we need to lock and reserve the buffer 223
265 * anyways so we don't gain much by that. 224 ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
266 */ 225 if (unlikely(ret != 0))
267 return vmw_dmabuf_to_placement(dev_priv, buf, 226 return ret;
268 &vmw_evictable_placement,
269 interruptible);
270}
271 227
228 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
229 if (unlikely(ret != 0))
230 goto err;
231
232 vmw_bo_pin_reserved(buf, false);
233
234 ttm_bo_unreserve(bo);
235
236err:
237 ttm_read_unlock(&dev_priv->reservation_sem);
238 return ret;
239}
272 240
273/** 241/**
274 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement 242 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
@@ -291,21 +259,31 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
291 259
292 260
293/** 261/**
294 * vmw_bo_pin - Pin or unpin a buffer object without moving it. 262 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
295 * 263 *
296 * @bo: The buffer object. Must be reserved. 264 * @vbo: The buffer object. Must be reserved.
297 * @pin: Whether to pin or unpin. 265 * @pin: Whether to pin or unpin.
298 * 266 *
299 */ 267 */
300void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) 268void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
301{ 269{
302 struct ttm_place pl; 270 struct ttm_place pl;
303 struct ttm_placement placement; 271 struct ttm_placement placement;
272 struct ttm_buffer_object *bo = &vbo->base;
304 uint32_t old_mem_type = bo->mem.mem_type; 273 uint32_t old_mem_type = bo->mem.mem_type;
305 int ret; 274 int ret;
306 275
307 lockdep_assert_held(&bo->resv->lock.base); 276 lockdep_assert_held(&bo->resv->lock.base);
308 277
278 if (pin) {
279 if (vbo->pin_count++ > 0)
280 return;
281 } else {
282 WARN_ON(vbo->pin_count <= 0);
283 if (--vbo->pin_count > 0)
284 return;
285 }
286
309 pl.fpfn = 0; 287 pl.fpfn = 0;
310 pl.lpfn = 0; 288 pl.lpfn = 0;
311 pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB 289 pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 6218a36cf01a..f97ec5686cbc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,6 +28,7 @@
28 28
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include "vmwgfx_drv.h" 30#include "vmwgfx_drv.h"
31#include "vmwgfx_binding.h"
31#include <drm/ttm/ttm_placement.h> 32#include <drm/ttm/ttm_placement.h>
32#include <drm/ttm/ttm_bo_driver.h> 33#include <drm/ttm/ttm_bo_driver.h>
33#include <drm/ttm/ttm_object.h> 34#include <drm/ttm/ttm_object.h>
@@ -127,6 +128,9 @@
127#define DRM_IOCTL_VMW_SYNCCPU \ 128#define DRM_IOCTL_VMW_SYNCCPU \
128 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ 129 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
129 struct drm_vmw_synccpu_arg) 130 struct drm_vmw_synccpu_arg)
131#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
132 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
133 struct drm_vmw_context_arg)
130 134
131/** 135/**
132 * The core DRM version of this macro doesn't account for 136 * The core DRM version of this macro doesn't account for
@@ -168,8 +172,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
168 DRM_UNLOCKED | DRM_RENDER_ALLOW), 172 DRM_UNLOCKED | DRM_RENDER_ALLOW),
169 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, 173 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
170 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 174 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
171 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, 175 VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED |
172 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 176 DRM_RENDER_ALLOW),
173 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, 177 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
174 DRM_UNLOCKED | DRM_RENDER_ALLOW), 178 DRM_UNLOCKED | DRM_RENDER_ALLOW),
175 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, 179 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
@@ -206,6 +210,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
206 VMW_IOCTL_DEF(VMW_SYNCCPU, 210 VMW_IOCTL_DEF(VMW_SYNCCPU,
207 vmw_user_dmabuf_synccpu_ioctl, 211 vmw_user_dmabuf_synccpu_ioctl,
208 DRM_UNLOCKED | DRM_RENDER_ALLOW), 212 DRM_UNLOCKED | DRM_RENDER_ALLOW),
213 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
214 vmw_extended_context_define_ioctl,
215 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
209}; 216};
210 217
211static struct pci_device_id vmw_pci_id_list[] = { 218static struct pci_device_id vmw_pci_id_list[] = {
@@ -278,6 +285,8 @@ static void vmw_print_capabilities(uint32_t capabilities)
278 DRM_INFO(" Command Buffers 2.\n"); 285 DRM_INFO(" Command Buffers 2.\n");
279 if (capabilities & SVGA_CAP_GBOBJECTS) 286 if (capabilities & SVGA_CAP_GBOBJECTS)
280 DRM_INFO(" Guest Backed Resources.\n"); 287 DRM_INFO(" Guest Backed Resources.\n");
288 if (capabilities & SVGA_CAP_DX)
289 DRM_INFO(" DX Features.\n");
281} 290}
282 291
283/** 292/**
@@ -296,30 +305,31 @@ static void vmw_print_capabilities(uint32_t capabilities)
296static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) 305static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
297{ 306{
298 int ret; 307 int ret;
299 struct ttm_buffer_object *bo; 308 struct vmw_dma_buffer *vbo;
300 struct ttm_bo_kmap_obj map; 309 struct ttm_bo_kmap_obj map;
301 volatile SVGA3dQueryResult *result; 310 volatile SVGA3dQueryResult *result;
302 bool dummy; 311 bool dummy;
303 312
304 /* 313 /*
305 * Create the bo as pinned, so that a tryreserve will 314 * Create the vbo as pinned, so that a tryreserve will
306 * immediately succeed. This is because we're the only 315 * immediately succeed. This is because we're the only
307 * user of the bo currently. 316 * user of the bo currently.
308 */ 317 */
309 ret = ttm_bo_create(&dev_priv->bdev, 318 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
310 PAGE_SIZE, 319 if (!vbo)
311 ttm_bo_type_device, 320 return -ENOMEM;
312 &vmw_sys_ne_placement,
313 0, false, NULL,
314 &bo);
315 321
322 ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
323 &vmw_sys_ne_placement, false,
324 &vmw_dmabuf_bo_free);
316 if (unlikely(ret != 0)) 325 if (unlikely(ret != 0))
317 return ret; 326 return ret;
318 327
319 ret = ttm_bo_reserve(bo, false, true, false, NULL); 328 ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
320 BUG_ON(ret != 0); 329 BUG_ON(ret != 0);
330 vmw_bo_pin_reserved(vbo, true);
321 331
322 ret = ttm_bo_kmap(bo, 0, 1, &map); 332 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
323 if (likely(ret == 0)) { 333 if (likely(ret == 0)) {
324 result = ttm_kmap_obj_virtual(&map, &dummy); 334 result = ttm_kmap_obj_virtual(&map, &dummy);
325 result->totalSize = sizeof(*result); 335 result->totalSize = sizeof(*result);
@@ -327,18 +337,55 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
327 result->result32 = 0xff; 337 result->result32 = 0xff;
328 ttm_bo_kunmap(&map); 338 ttm_bo_kunmap(&map);
329 } 339 }
330 vmw_bo_pin(bo, false); 340 vmw_bo_pin_reserved(vbo, false);
331 ttm_bo_unreserve(bo); 341 ttm_bo_unreserve(&vbo->base);
332 342
333 if (unlikely(ret != 0)) { 343 if (unlikely(ret != 0)) {
334 DRM_ERROR("Dummy query buffer map failed.\n"); 344 DRM_ERROR("Dummy query buffer map failed.\n");
335 ttm_bo_unref(&bo); 345 vmw_dmabuf_unreference(&vbo);
336 } else 346 } else
337 dev_priv->dummy_query_bo = bo; 347 dev_priv->dummy_query_bo = vbo;
338 348
339 return ret; 349 return ret;
340} 350}
341 351
352/**
353 * vmw_request_device_late - Perform late device setup
354 *
355 * @dev_priv: Pointer to device private.
356 *
357 * This function performs setup of otables and enables large command
358 * buffer submission. These tasks are split out to a separate function
359 * because it reverts vmw_release_device_early and is intended to be used
360 * by an error path in the hibernation code.
361 */
362static int vmw_request_device_late(struct vmw_private *dev_priv)
363{
364 int ret;
365
366 if (dev_priv->has_mob) {
367 ret = vmw_otables_setup(dev_priv);
368 if (unlikely(ret != 0)) {
369 DRM_ERROR("Unable to initialize "
370 "guest Memory OBjects.\n");
371 return ret;
372 }
373 }
374
375 if (dev_priv->cman) {
376 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
377 256*4096, 2*4096);
378 if (ret) {
379 struct vmw_cmdbuf_man *man = dev_priv->cman;
380
381 dev_priv->cman = NULL;
382 vmw_cmdbuf_man_destroy(man);
383 }
384 }
385
386 return 0;
387}
388
342static int vmw_request_device(struct vmw_private *dev_priv) 389static int vmw_request_device(struct vmw_private *dev_priv)
343{ 390{
344 int ret; 391 int ret;
@@ -349,14 +396,16 @@ static int vmw_request_device(struct vmw_private *dev_priv)
349 return ret; 396 return ret;
350 } 397 }
351 vmw_fence_fifo_up(dev_priv->fman); 398 vmw_fence_fifo_up(dev_priv->fman);
352 if (dev_priv->has_mob) { 399 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
353 ret = vmw_otables_setup(dev_priv); 400 if (IS_ERR(dev_priv->cman)) {
354 if (unlikely(ret != 0)) { 401 dev_priv->cman = NULL;
355 DRM_ERROR("Unable to initialize " 402 dev_priv->has_dx = false;
356 "guest Memory OBjects.\n");
357 goto out_no_mob;
358 }
359 } 403 }
404
405 ret = vmw_request_device_late(dev_priv);
406 if (ret)
407 goto out_no_mob;
408
360 ret = vmw_dummy_query_bo_create(dev_priv); 409 ret = vmw_dummy_query_bo_create(dev_priv);
361 if (unlikely(ret != 0)) 410 if (unlikely(ret != 0))
362 goto out_no_query_bo; 411 goto out_no_query_bo;
@@ -364,15 +413,29 @@ static int vmw_request_device(struct vmw_private *dev_priv)
364 return 0; 413 return 0;
365 414
366out_no_query_bo: 415out_no_query_bo:
367 if (dev_priv->has_mob) 416 if (dev_priv->cman)
417 vmw_cmdbuf_remove_pool(dev_priv->cman);
418 if (dev_priv->has_mob) {
419 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
368 vmw_otables_takedown(dev_priv); 420 vmw_otables_takedown(dev_priv);
421 }
422 if (dev_priv->cman)
423 vmw_cmdbuf_man_destroy(dev_priv->cman);
369out_no_mob: 424out_no_mob:
370 vmw_fence_fifo_down(dev_priv->fman); 425 vmw_fence_fifo_down(dev_priv->fman);
371 vmw_fifo_release(dev_priv, &dev_priv->fifo); 426 vmw_fifo_release(dev_priv, &dev_priv->fifo);
372 return ret; 427 return ret;
373} 428}
374 429
375static void vmw_release_device(struct vmw_private *dev_priv) 430/**
431 * vmw_release_device_early - Early part of fifo takedown.
432 *
433 * @dev_priv: Pointer to device private struct.
434 *
435 * This is the first part of command submission takedown, to be called before
436 * buffer management is taken down.
437 */
438static void vmw_release_device_early(struct vmw_private *dev_priv)
376{ 439{
377 /* 440 /*
378 * Previous destructions should've released 441 * Previous destructions should've released
@@ -381,65 +444,31 @@ static void vmw_release_device(struct vmw_private *dev_priv)
381 444
382 BUG_ON(dev_priv->pinned_bo != NULL); 445 BUG_ON(dev_priv->pinned_bo != NULL);
383 446
384 ttm_bo_unref(&dev_priv->dummy_query_bo); 447 vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
385 if (dev_priv->has_mob) 448 if (dev_priv->cman)
386 vmw_otables_takedown(dev_priv); 449 vmw_cmdbuf_remove_pool(dev_priv->cman);
387 vmw_fence_fifo_down(dev_priv->fman);
388 vmw_fifo_release(dev_priv, &dev_priv->fifo);
389}
390
391
392/**
393 * Increase the 3d resource refcount.
394 * If the count was prevously zero, initialize the fifo, switching to svga
395 * mode. Note that the master holds a ref as well, and may request an
396 * explicit switch to svga mode if fb is not running, using @unhide_svga.
397 */
398int vmw_3d_resource_inc(struct vmw_private *dev_priv,
399 bool unhide_svga)
400{
401 int ret = 0;
402 450
403 mutex_lock(&dev_priv->release_mutex); 451 if (dev_priv->has_mob) {
404 if (unlikely(dev_priv->num_3d_resources++ == 0)) { 452 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
405 ret = vmw_request_device(dev_priv); 453 vmw_otables_takedown(dev_priv);
406 if (unlikely(ret != 0))
407 --dev_priv->num_3d_resources;
408 } else if (unhide_svga) {
409 vmw_write(dev_priv, SVGA_REG_ENABLE,
410 vmw_read(dev_priv, SVGA_REG_ENABLE) &
411 ~SVGA_REG_ENABLE_HIDE);
412 } 454 }
413
414 mutex_unlock(&dev_priv->release_mutex);
415 return ret;
416} 455}
417 456
418/** 457/**
419 * Decrease the 3d resource refcount. 458 * vmw_release_device_late - Late part of fifo takedown.
420 * If the count reaches zero, disable the fifo, switching to vga mode. 459 *
421 * Note that the master holds a refcount as well, and may request an 460 * @dev_priv: Pointer to device private struct.
422 * explicit switch to vga mode when it releases its refcount to account 461 *
423 * for the situation of an X server vt switch to VGA with 3d resources 462 * This is the last part of the command submission takedown, to be called when
424 * active. 463 * command submission is no longer needed. It may wait on pending fences.
425 */ 464 */
426void vmw_3d_resource_dec(struct vmw_private *dev_priv, 465static void vmw_release_device_late(struct vmw_private *dev_priv)
427 bool hide_svga)
428{ 466{
429 int32_t n3d; 467 vmw_fence_fifo_down(dev_priv->fman);
430 468 if (dev_priv->cman)
431 mutex_lock(&dev_priv->release_mutex); 469 vmw_cmdbuf_man_destroy(dev_priv->cman);
432 if (unlikely(--dev_priv->num_3d_resources == 0))
433 vmw_release_device(dev_priv);
434 else if (hide_svga)
435 vmw_write(dev_priv, SVGA_REG_ENABLE,
436 vmw_read(dev_priv, SVGA_REG_ENABLE) |
437 SVGA_REG_ENABLE_HIDE);
438
439 n3d = (int32_t) dev_priv->num_3d_resources;
440 mutex_unlock(&dev_priv->release_mutex);
441 470
442 BUG_ON(n3d < 0); 471 vmw_fifo_release(dev_priv, &dev_priv->fifo);
443} 472}
444 473
445/** 474/**
@@ -603,6 +632,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
603 spin_lock_init(&dev_priv->hw_lock); 632 spin_lock_init(&dev_priv->hw_lock);
604 spin_lock_init(&dev_priv->waiter_lock); 633 spin_lock_init(&dev_priv->waiter_lock);
605 spin_lock_init(&dev_priv->cap_lock); 634 spin_lock_init(&dev_priv->cap_lock);
635 spin_lock_init(&dev_priv->svga_lock);
606 636
607 for (i = vmw_res_context; i < vmw_res_max; ++i) { 637 for (i = vmw_res_context; i < vmw_res_max; ++i) {
608 idr_init(&dev_priv->res_idr[i]); 638 idr_init(&dev_priv->res_idr[i]);
@@ -673,22 +703,31 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
673 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); 703 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
674 dev_priv->max_mob_size = 704 dev_priv->max_mob_size =
675 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); 705 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
676 } else 706 dev_priv->stdu_max_width =
707 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
708 dev_priv->stdu_max_height =
709 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
710
711 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
712 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
713 dev_priv->texture_max_width = vmw_read(dev_priv,
714 SVGA_REG_DEV_CAP);
715 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
716 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
717 dev_priv->texture_max_height = vmw_read(dev_priv,
718 SVGA_REG_DEV_CAP);
719 } else {
720 dev_priv->texture_max_width = 8192;
721 dev_priv->texture_max_height = 8192;
677 dev_priv->prim_bb_mem = dev_priv->vram_size; 722 dev_priv->prim_bb_mem = dev_priv->vram_size;
723 }
724
725 vmw_print_capabilities(dev_priv->capabilities);
678 726
679 ret = vmw_dma_masks(dev_priv); 727 ret = vmw_dma_masks(dev_priv);
680 if (unlikely(ret != 0)) 728 if (unlikely(ret != 0))
681 goto out_err0; 729 goto out_err0;
682 730
683 /*
684 * Limit back buffer size to VRAM size. Remove this once
685 * screen targets are implemented.
686 */
687 if (dev_priv->prim_bb_mem > dev_priv->vram_size)
688 dev_priv->prim_bb_mem = dev_priv->vram_size;
689
690 vmw_print_capabilities(dev_priv->capabilities);
691
692 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 731 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
693 DRM_INFO("Max GMR ids is %u\n", 732 DRM_INFO("Max GMR ids is %u\n",
694 (unsigned)dev_priv->max_gmr_ids); 733 (unsigned)dev_priv->max_gmr_ids);
@@ -714,17 +753,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
714 dev_priv->active_master = &dev_priv->fbdev_master; 753 dev_priv->active_master = &dev_priv->fbdev_master;
715 754
716 755
717 ret = ttm_bo_device_init(&dev_priv->bdev,
718 dev_priv->bo_global_ref.ref.object,
719 &vmw_bo_driver,
720 dev->anon_inode->i_mapping,
721 VMWGFX_FILE_PAGE_OFFSET,
722 false);
723 if (unlikely(ret != 0)) {
724 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
725 goto out_err1;
726 }
727
728 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, 756 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
729 dev_priv->mmio_size); 757 dev_priv->mmio_size);
730 758
@@ -787,13 +815,28 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
787 goto out_no_fman; 815 goto out_no_fman;
788 } 816 }
789 817
818 ret = ttm_bo_device_init(&dev_priv->bdev,
819 dev_priv->bo_global_ref.ref.object,
820 &vmw_bo_driver,
821 dev->anon_inode->i_mapping,
822 VMWGFX_FILE_PAGE_OFFSET,
823 false);
824 if (unlikely(ret != 0)) {
825 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
826 goto out_no_bdev;
827 }
790 828
829 /*
830 * Enable VRAM, but initially don't use it until SVGA is enabled and
831 * unhidden.
832 */
791 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, 833 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
792 (dev_priv->vram_size >> PAGE_SHIFT)); 834 (dev_priv->vram_size >> PAGE_SHIFT));
793 if (unlikely(ret != 0)) { 835 if (unlikely(ret != 0)) {
794 DRM_ERROR("Failed initializing memory manager for VRAM.\n"); 836 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
795 goto out_no_vram; 837 goto out_no_vram;
796 } 838 }
839 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
797 840
798 dev_priv->has_gmr = true; 841 dev_priv->has_gmr = true;
799 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || 842 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
@@ -814,18 +857,28 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
814 } 857 }
815 } 858 }
816 859
817 vmw_kms_save_vga(dev_priv); 860 if (dev_priv->has_mob) {
861 spin_lock(&dev_priv->cap_lock);
862 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
863 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
864 spin_unlock(&dev_priv->cap_lock);
865 }
866
818 867
819 /* Start kms and overlay systems, needs fifo. */
820 ret = vmw_kms_init(dev_priv); 868 ret = vmw_kms_init(dev_priv);
821 if (unlikely(ret != 0)) 869 if (unlikely(ret != 0))
822 goto out_no_kms; 870 goto out_no_kms;
823 vmw_overlay_init(dev_priv); 871 vmw_overlay_init(dev_priv);
824 872
873 ret = vmw_request_device(dev_priv);
874 if (ret)
875 goto out_no_fifo;
876
877 DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
878
825 if (dev_priv->enable_fb) { 879 if (dev_priv->enable_fb) {
826 ret = vmw_3d_resource_inc(dev_priv, true); 880 vmw_fifo_resource_inc(dev_priv);
827 if (unlikely(ret != 0)) 881 vmw_svga_enable(dev_priv);
828 goto out_no_fifo;
829 vmw_fb_init(dev_priv); 882 vmw_fb_init(dev_priv);
830 } 883 }
831 884
@@ -838,13 +891,14 @@ out_no_fifo:
838 vmw_overlay_close(dev_priv); 891 vmw_overlay_close(dev_priv);
839 vmw_kms_close(dev_priv); 892 vmw_kms_close(dev_priv);
840out_no_kms: 893out_no_kms:
841 vmw_kms_restore_vga(dev_priv);
842 if (dev_priv->has_mob) 894 if (dev_priv->has_mob)
843 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); 895 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
844 if (dev_priv->has_gmr) 896 if (dev_priv->has_gmr)
845 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); 897 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
846 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 898 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
847out_no_vram: 899out_no_vram:
900 (void)ttm_bo_device_release(&dev_priv->bdev);
901out_no_bdev:
848 vmw_fence_manager_takedown(dev_priv->fman); 902 vmw_fence_manager_takedown(dev_priv->fman);
849out_no_fman: 903out_no_fman:
850 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 904 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -860,13 +914,13 @@ out_err4:
860 iounmap(dev_priv->mmio_virt); 914 iounmap(dev_priv->mmio_virt);
861out_err3: 915out_err3:
862 arch_phys_wc_del(dev_priv->mmio_mtrr); 916 arch_phys_wc_del(dev_priv->mmio_mtrr);
863 (void)ttm_bo_device_release(&dev_priv->bdev);
864out_err1:
865 vmw_ttm_global_release(dev_priv); 917 vmw_ttm_global_release(dev_priv);
866out_err0: 918out_err0:
867 for (i = vmw_res_context; i < vmw_res_max; ++i) 919 for (i = vmw_res_context; i < vmw_res_max; ++i)
868 idr_destroy(&dev_priv->res_idr[i]); 920 idr_destroy(&dev_priv->res_idr[i]);
869 921
922 if (dev_priv->ctx.staged_bindings)
923 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
870 kfree(dev_priv); 924 kfree(dev_priv);
871 return ret; 925 return ret;
872} 926}
@@ -882,19 +936,24 @@ static int vmw_driver_unload(struct drm_device *dev)
882 drm_ht_remove(&dev_priv->ctx.res_ht); 936 drm_ht_remove(&dev_priv->ctx.res_ht);
883 vfree(dev_priv->ctx.cmd_bounce); 937 vfree(dev_priv->ctx.cmd_bounce);
884 if (dev_priv->enable_fb) { 938 if (dev_priv->enable_fb) {
939 vmw_fb_off(dev_priv);
885 vmw_fb_close(dev_priv); 940 vmw_fb_close(dev_priv);
886 vmw_kms_restore_vga(dev_priv); 941 vmw_fifo_resource_dec(dev_priv);
887 vmw_3d_resource_dec(dev_priv, false); 942 vmw_svga_disable(dev_priv);
888 } 943 }
944
889 vmw_kms_close(dev_priv); 945 vmw_kms_close(dev_priv);
890 vmw_overlay_close(dev_priv); 946 vmw_overlay_close(dev_priv);
891 947
892 if (dev_priv->has_mob)
893 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
894 if (dev_priv->has_gmr) 948 if (dev_priv->has_gmr)
895 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); 949 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
896 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 950 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
897 951
952 vmw_release_device_early(dev_priv);
953 if (dev_priv->has_mob)
954 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
955 (void) ttm_bo_device_release(&dev_priv->bdev);
956 vmw_release_device_late(dev_priv);
898 vmw_fence_manager_takedown(dev_priv->fman); 957 vmw_fence_manager_takedown(dev_priv->fman);
899 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 958 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
900 drm_irq_uninstall(dev_priv->dev); 959 drm_irq_uninstall(dev_priv->dev);
@@ -907,6 +966,8 @@ static int vmw_driver_unload(struct drm_device *dev)
907 iounmap(dev_priv->mmio_virt); 966 iounmap(dev_priv->mmio_virt);
908 arch_phys_wc_del(dev_priv->mmio_mtrr); 967 arch_phys_wc_del(dev_priv->mmio_mtrr);
909 (void)ttm_bo_device_release(&dev_priv->bdev); 968 (void)ttm_bo_device_release(&dev_priv->bdev);
969 if (dev_priv->ctx.staged_bindings)
970 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
910 vmw_ttm_global_release(dev_priv); 971 vmw_ttm_global_release(dev_priv);
911 972
912 for (i = vmw_res_context; i < vmw_res_max; ++i) 973 for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -1044,11 +1105,21 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1044 const struct drm_ioctl_desc *ioctl = 1105 const struct drm_ioctl_desc *ioctl =
1045 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 1106 &vmw_ioctls[nr - DRM_COMMAND_BASE];
1046 1107
1047 if (unlikely(ioctl->cmd != cmd)) { 1108 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1048 DRM_ERROR("Invalid command format, ioctl %d\n", 1109 ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1049 nr - DRM_COMMAND_BASE); 1110 if (unlikely(ret != 0))
1050 return -EINVAL; 1111 return ret;
1112
1113 if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1114 goto out_io_encoding;
1115
1116 return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1117 _IOC_SIZE(cmd));
1051 } 1118 }
1119
1120 if (unlikely(ioctl->cmd != cmd))
1121 goto out_io_encoding;
1122
1052 flags = ioctl->flags; 1123 flags = ioctl->flags;
1053 } else if (!drm_ioctl_flags(nr, &flags)) 1124 } else if (!drm_ioctl_flags(nr, &flags))
1054 return -EINVAL; 1125 return -EINVAL;
@@ -1068,6 +1139,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1068 ttm_read_unlock(&vmaster->lock); 1139 ttm_read_unlock(&vmaster->lock);
1069 1140
1070 return ret; 1141 return ret;
1142
1143out_io_encoding:
1144 DRM_ERROR("Invalid command format, ioctl %d\n",
1145 nr - DRM_COMMAND_BASE);
1146
1147 return -EINVAL;
1071} 1148}
1072 1149
1073static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, 1150static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
@@ -1086,30 +1163,11 @@ static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1086 1163
1087static void vmw_lastclose(struct drm_device *dev) 1164static void vmw_lastclose(struct drm_device *dev)
1088{ 1165{
1089 struct drm_crtc *crtc;
1090 struct drm_mode_set set;
1091 int ret;
1092
1093 set.x = 0;
1094 set.y = 0;
1095 set.fb = NULL;
1096 set.mode = NULL;
1097 set.connectors = NULL;
1098 set.num_connectors = 0;
1099
1100 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1101 set.crtc = crtc;
1102 ret = drm_mode_set_config_internal(&set);
1103 WARN_ON(ret != 0);
1104 }
1105
1106} 1166}
1107 1167
1108static void vmw_master_init(struct vmw_master *vmaster) 1168static void vmw_master_init(struct vmw_master *vmaster)
1109{ 1169{
1110 ttm_lock_init(&vmaster->lock); 1170 ttm_lock_init(&vmaster->lock);
1111 INIT_LIST_HEAD(&vmaster->fb_surf);
1112 mutex_init(&vmaster->fb_surf_mutex);
1113} 1171}
1114 1172
1115static int vmw_master_create(struct drm_device *dev, 1173static int vmw_master_create(struct drm_device *dev,
@@ -1137,7 +1195,6 @@ static void vmw_master_destroy(struct drm_device *dev,
1137 kfree(vmaster); 1195 kfree(vmaster);
1138} 1196}
1139 1197
1140
1141static int vmw_master_set(struct drm_device *dev, 1198static int vmw_master_set(struct drm_device *dev,
1142 struct drm_file *file_priv, 1199 struct drm_file *file_priv,
1143 bool from_open) 1200 bool from_open)
@@ -1148,27 +1205,13 @@ static int vmw_master_set(struct drm_device *dev,
1148 struct vmw_master *vmaster = vmw_master(file_priv->master); 1205 struct vmw_master *vmaster = vmw_master(file_priv->master);
1149 int ret = 0; 1206 int ret = 0;
1150 1207
1151 if (!dev_priv->enable_fb) {
1152 ret = vmw_3d_resource_inc(dev_priv, true);
1153 if (unlikely(ret != 0))
1154 return ret;
1155 vmw_kms_save_vga(dev_priv);
1156 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
1157 }
1158
1159 if (active) { 1208 if (active) {
1160 BUG_ON(active != &dev_priv->fbdev_master); 1209 BUG_ON(active != &dev_priv->fbdev_master);
1161 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); 1210 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1162 if (unlikely(ret != 0)) 1211 if (unlikely(ret != 0))
1163 goto out_no_active_lock; 1212 return ret;
1164 1213
1165 ttm_lock_set_kill(&active->lock, true, SIGTERM); 1214 ttm_lock_set_kill(&active->lock, true, SIGTERM);
1166 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
1167 if (unlikely(ret != 0)) {
1168 DRM_ERROR("Unable to clean VRAM on "
1169 "master drop.\n");
1170 }
1171
1172 dev_priv->active_master = NULL; 1215 dev_priv->active_master = NULL;
1173 } 1216 }
1174 1217
@@ -1182,14 +1225,6 @@ static int vmw_master_set(struct drm_device *dev,
1182 dev_priv->active_master = vmaster; 1225 dev_priv->active_master = vmaster;
1183 1226
1184 return 0; 1227 return 0;
1185
1186out_no_active_lock:
1187 if (!dev_priv->enable_fb) {
1188 vmw_kms_restore_vga(dev_priv);
1189 vmw_3d_resource_dec(dev_priv, true);
1190 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1191 }
1192 return ret;
1193} 1228}
1194 1229
1195static void vmw_master_drop(struct drm_device *dev, 1230static void vmw_master_drop(struct drm_device *dev,
@@ -1214,16 +1249,9 @@ static void vmw_master_drop(struct drm_device *dev,
1214 } 1249 }
1215 1250
1216 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); 1251 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1217 vmw_execbuf_release_pinned_bo(dev_priv);
1218 1252
1219 if (!dev_priv->enable_fb) { 1253 if (!dev_priv->enable_fb)
1220 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); 1254 vmw_svga_disable(dev_priv);
1221 if (unlikely(ret != 0))
1222 DRM_ERROR("Unable to clean VRAM on master drop.\n");
1223 vmw_kms_restore_vga(dev_priv);
1224 vmw_3d_resource_dec(dev_priv, true);
1225 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1226 }
1227 1255
1228 dev_priv->active_master = &dev_priv->fbdev_master; 1256 dev_priv->active_master = &dev_priv->fbdev_master;
1229 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 1257 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
@@ -1233,6 +1261,76 @@ static void vmw_master_drop(struct drm_device *dev,
1233 vmw_fb_on(dev_priv); 1261 vmw_fb_on(dev_priv);
1234} 1262}
1235 1263
1264/**
1265 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1266 *
1267 * @dev_priv: Pointer to device private struct.
1268 * Needs the reservation sem to be held in non-exclusive mode.
1269 */
1270static void __vmw_svga_enable(struct vmw_private *dev_priv)
1271{
1272 spin_lock(&dev_priv->svga_lock);
1273 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1274 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1275 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1276 }
1277 spin_unlock(&dev_priv->svga_lock);
1278}
1279
1280/**
1281 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1282 *
1283 * @dev_priv: Pointer to device private struct.
1284 */
1285void vmw_svga_enable(struct vmw_private *dev_priv)
1286{
1287 ttm_read_lock(&dev_priv->reservation_sem, false);
1288 __vmw_svga_enable(dev_priv);
1289 ttm_read_unlock(&dev_priv->reservation_sem);
1290}
1291
1292/**
1293 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1294 *
1295 * @dev_priv: Pointer to device private struct.
1296 * Needs the reservation sem to be held in exclusive mode.
1297 * Will not empty VRAM. VRAM must be emptied by caller.
1298 */
1299static void __vmw_svga_disable(struct vmw_private *dev_priv)
1300{
1301 spin_lock(&dev_priv->svga_lock);
1302 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1303 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1304 vmw_write(dev_priv, SVGA_REG_ENABLE,
1305 SVGA_REG_ENABLE_HIDE |
1306 SVGA_REG_ENABLE_ENABLE);
1307 }
1308 spin_unlock(&dev_priv->svga_lock);
1309}
1310
1311/**
1312 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1313 * running.
1314 *
1315 * @dev_priv: Pointer to device private struct.
1316 * Will empty VRAM.
1317 */
1318void vmw_svga_disable(struct vmw_private *dev_priv)
1319{
1320 ttm_write_lock(&dev_priv->reservation_sem, false);
1321 spin_lock(&dev_priv->svga_lock);
1322 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1323 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1324 spin_unlock(&dev_priv->svga_lock);
1325 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1326 DRM_ERROR("Failed evicting VRAM buffers.\n");
1327 vmw_write(dev_priv, SVGA_REG_ENABLE,
1328 SVGA_REG_ENABLE_HIDE |
1329 SVGA_REG_ENABLE_ENABLE);
1330 } else
1331 spin_unlock(&dev_priv->svga_lock);
1332 ttm_write_unlock(&dev_priv->reservation_sem);
1333}
1236 1334
1237static void vmw_remove(struct pci_dev *pdev) 1335static void vmw_remove(struct pci_dev *pdev)
1238{ 1336{
@@ -1250,23 +1348,26 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1250 1348
1251 switch (val) { 1349 switch (val) {
1252 case PM_HIBERNATION_PREPARE: 1350 case PM_HIBERNATION_PREPARE:
1253 case PM_SUSPEND_PREPARE: 1351 if (dev_priv->enable_fb)
1352 vmw_fb_off(dev_priv);
1254 ttm_suspend_lock(&dev_priv->reservation_sem); 1353 ttm_suspend_lock(&dev_priv->reservation_sem);
1255 1354
1256 /** 1355 /*
1257 * This empties VRAM and unbinds all GMR bindings. 1356 * This empties VRAM and unbinds all GMR bindings.
1258 * Buffer contents is moved to swappable memory. 1357 * Buffer contents is moved to swappable memory.
1259 */ 1358 */
1260 vmw_execbuf_release_pinned_bo(dev_priv); 1359 vmw_execbuf_release_pinned_bo(dev_priv);
1261 vmw_resource_evict_all(dev_priv); 1360 vmw_resource_evict_all(dev_priv);
1361 vmw_release_device_early(dev_priv);
1262 ttm_bo_swapout_all(&dev_priv->bdev); 1362 ttm_bo_swapout_all(&dev_priv->bdev);
1263 1363 vmw_fence_fifo_down(dev_priv->fman);
1264 break; 1364 break;
1265 case PM_POST_HIBERNATION: 1365 case PM_POST_HIBERNATION:
1266 case PM_POST_SUSPEND:
1267 case PM_POST_RESTORE: 1366 case PM_POST_RESTORE:
1367 vmw_fence_fifo_up(dev_priv->fman);
1268 ttm_suspend_unlock(&dev_priv->reservation_sem); 1368 ttm_suspend_unlock(&dev_priv->reservation_sem);
1269 1369 if (dev_priv->enable_fb)
1370 vmw_fb_on(dev_priv);
1270 break; 1371 break;
1271 case PM_RESTORE_PREPARE: 1372 case PM_RESTORE_PREPARE:
1272 break; 1373 break;
@@ -1276,20 +1377,13 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1276 return 0; 1377 return 0;
1277} 1378}
1278 1379
1279/**
1280 * These might not be needed with the virtual SVGA device.
1281 */
1282
1283static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 1380static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1284{ 1381{
1285 struct drm_device *dev = pci_get_drvdata(pdev); 1382 struct drm_device *dev = pci_get_drvdata(pdev);
1286 struct vmw_private *dev_priv = vmw_priv(dev); 1383 struct vmw_private *dev_priv = vmw_priv(dev);
1287 1384
1288 if (dev_priv->num_3d_resources != 0) { 1385 if (dev_priv->refuse_hibernation)
1289 DRM_INFO("Can't suspend or hibernate "
1290 "while 3D resources are active.\n");
1291 return -EBUSY; 1386 return -EBUSY;
1292 }
1293 1387
1294 pci_save_state(pdev); 1388 pci_save_state(pdev);
1295 pci_disable_device(pdev); 1389 pci_disable_device(pdev);
@@ -1321,56 +1415,62 @@ static int vmw_pm_resume(struct device *kdev)
1321 return vmw_pci_resume(pdev); 1415 return vmw_pci_resume(pdev);
1322} 1416}
1323 1417
1324static int vmw_pm_prepare(struct device *kdev) 1418static int vmw_pm_freeze(struct device *kdev)
1325{ 1419{
1326 struct pci_dev *pdev = to_pci_dev(kdev); 1420 struct pci_dev *pdev = to_pci_dev(kdev);
1327 struct drm_device *dev = pci_get_drvdata(pdev); 1421 struct drm_device *dev = pci_get_drvdata(pdev);
1328 struct vmw_private *dev_priv = vmw_priv(dev); 1422 struct vmw_private *dev_priv = vmw_priv(dev);
1329 1423
1330 /**
1331 * Release 3d reference held by fbdev and potentially
1332 * stop fifo.
1333 */
1334 dev_priv->suspended = true; 1424 dev_priv->suspended = true;
1335 if (dev_priv->enable_fb) 1425 if (dev_priv->enable_fb)
1336 vmw_3d_resource_dec(dev_priv, true); 1426 vmw_fifo_resource_dec(dev_priv);
1337
1338 if (dev_priv->num_3d_resources != 0) {
1339
1340 DRM_INFO("Can't suspend or hibernate "
1341 "while 3D resources are active.\n");
1342 1427
1428 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1429 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1343 if (dev_priv->enable_fb) 1430 if (dev_priv->enable_fb)
1344 vmw_3d_resource_inc(dev_priv, true); 1431 vmw_fifo_resource_inc(dev_priv);
1432 WARN_ON(vmw_request_device_late(dev_priv));
1345 dev_priv->suspended = false; 1433 dev_priv->suspended = false;
1346 return -EBUSY; 1434 return -EBUSY;
1347 } 1435 }
1348 1436
1437 if (dev_priv->enable_fb)
1438 __vmw_svga_disable(dev_priv);
1439
1440 vmw_release_device_late(dev_priv);
1441
1349 return 0; 1442 return 0;
1350} 1443}
1351 1444
1352static void vmw_pm_complete(struct device *kdev) 1445static int vmw_pm_restore(struct device *kdev)
1353{ 1446{
1354 struct pci_dev *pdev = to_pci_dev(kdev); 1447 struct pci_dev *pdev = to_pci_dev(kdev);
1355 struct drm_device *dev = pci_get_drvdata(pdev); 1448 struct drm_device *dev = pci_get_drvdata(pdev);
1356 struct vmw_private *dev_priv = vmw_priv(dev); 1449 struct vmw_private *dev_priv = vmw_priv(dev);
1450 int ret;
1357 1451
1358 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 1452 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1359 (void) vmw_read(dev_priv, SVGA_REG_ID); 1453 (void) vmw_read(dev_priv, SVGA_REG_ID);
1360 1454
1361 /**
1362 * Reclaim 3d reference held by fbdev and potentially
1363 * start fifo.
1364 */
1365 if (dev_priv->enable_fb) 1455 if (dev_priv->enable_fb)
1366 vmw_3d_resource_inc(dev_priv, false); 1456 vmw_fifo_resource_inc(dev_priv);
1457
1458 ret = vmw_request_device(dev_priv);
1459 if (ret)
1460 return ret;
1461
1462 if (dev_priv->enable_fb)
1463 __vmw_svga_enable(dev_priv);
1367 1464
1368 dev_priv->suspended = false; 1465 dev_priv->suspended = false;
1466
1467 return 0;
1369} 1468}
1370 1469
1371static const struct dev_pm_ops vmw_pm_ops = { 1470static const struct dev_pm_ops vmw_pm_ops = {
1372 .prepare = vmw_pm_prepare, 1471 .freeze = vmw_pm_freeze,
1373 .complete = vmw_pm_complete, 1472 .thaw = vmw_pm_restore,
1473 .restore = vmw_pm_restore,
1374 .suspend = vmw_pm_suspend, 1474 .suspend = vmw_pm_suspend,
1375 .resume = vmw_pm_resume, 1475 .resume = vmw_pm_resume,
1376}; 1476};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index d26a6daa9719..8f40692cf48a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -40,17 +40,17 @@
40#include <drm/ttm/ttm_module.h> 40#include <drm/ttm/ttm_module.h>
41#include "vmwgfx_fence.h" 41#include "vmwgfx_fence.h"
42 42
43#define VMWGFX_DRIVER_DATE "20140704" 43#define VMWGFX_DRIVER_DATE "20150810"
44#define VMWGFX_DRIVER_MAJOR 2 44#define VMWGFX_DRIVER_MAJOR 2
45#define VMWGFX_DRIVER_MINOR 6 45#define VMWGFX_DRIVER_MINOR 9
46#define VMWGFX_DRIVER_PATCHLEVEL 1 46#define VMWGFX_DRIVER_PATCHLEVEL 0
47#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 47#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49#define VMWGFX_MAX_RELOCATIONS 2048 49#define VMWGFX_MAX_RELOCATIONS 2048
50#define VMWGFX_MAX_VALIDATIONS 2048 50#define VMWGFX_MAX_VALIDATIONS 2048
51#define VMWGFX_MAX_DISPLAYS 16 51#define VMWGFX_MAX_DISPLAYS 16
52#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 52#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
53#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0 53#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
54 54
55/* 55/*
56 * Perhaps we should have sysfs entries for these. 56 * Perhaps we should have sysfs entries for these.
@@ -59,6 +59,8 @@
59#define VMWGFX_NUM_GB_SHADER 20000 59#define VMWGFX_NUM_GB_SHADER 20000
60#define VMWGFX_NUM_GB_SURFACE 32768 60#define VMWGFX_NUM_GB_SURFACE 32768
61#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS 61#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
62#define VMWGFX_NUM_DXCONTEXT 256
63#define VMWGFX_NUM_DXQUERY 512
62#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ 64#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
63 VMWGFX_NUM_GB_SHADER +\ 65 VMWGFX_NUM_GB_SHADER +\
64 VMWGFX_NUM_GB_SURFACE +\ 66 VMWGFX_NUM_GB_SURFACE +\
@@ -85,6 +87,9 @@ struct vmw_fpriv {
85struct vmw_dma_buffer { 87struct vmw_dma_buffer {
86 struct ttm_buffer_object base; 88 struct ttm_buffer_object base;
87 struct list_head res_list; 89 struct list_head res_list;
90 s32 pin_count;
91 /* Not ref-counted. Protected by binding_mutex */
92 struct vmw_resource *dx_query_ctx;
88}; 93};
89 94
90/** 95/**
@@ -113,6 +118,7 @@ struct vmw_resource {
113 bool backup_dirty; /* Protected by backup buffer reserved */ 118 bool backup_dirty; /* Protected by backup buffer reserved */
114 struct vmw_dma_buffer *backup; 119 struct vmw_dma_buffer *backup;
115 unsigned long backup_offset; 120 unsigned long backup_offset;
121 unsigned long pin_count; /* Protected by resource reserved */
116 const struct vmw_res_func *func; 122 const struct vmw_res_func *func;
117 struct list_head lru_head; /* Protected by the resource lock */ 123 struct list_head lru_head; /* Protected by the resource lock */
118 struct list_head mob_head; /* Protected by @backup reserved */ 124 struct list_head mob_head; /* Protected by @backup reserved */
@@ -130,6 +136,9 @@ enum vmw_res_type {
130 vmw_res_surface, 136 vmw_res_surface,
131 vmw_res_stream, 137 vmw_res_stream,
132 vmw_res_shader, 138 vmw_res_shader,
139 vmw_res_dx_context,
140 vmw_res_cotable,
141 vmw_res_view,
133 vmw_res_max 142 vmw_res_max
134}; 143};
135 144
@@ -137,7 +146,8 @@ enum vmw_res_type {
137 * Resources that are managed using command streams. 146 * Resources that are managed using command streams.
138 */ 147 */
139enum vmw_cmdbuf_res_type { 148enum vmw_cmdbuf_res_type {
140 vmw_cmdbuf_res_compat_shader 149 vmw_cmdbuf_res_shader,
150 vmw_cmdbuf_res_view
141}; 151};
142 152
143struct vmw_cmdbuf_res_manager; 153struct vmw_cmdbuf_res_manager;
@@ -160,11 +170,13 @@ struct vmw_surface {
160 struct drm_vmw_size *sizes; 170 struct drm_vmw_size *sizes;
161 uint32_t num_sizes; 171 uint32_t num_sizes;
162 bool scanout; 172 bool scanout;
173 uint32_t array_size;
163 /* TODO so far just a extra pointer */ 174 /* TODO so far just a extra pointer */
164 struct vmw_cursor_snooper snooper; 175 struct vmw_cursor_snooper snooper;
165 struct vmw_surface_offset *offsets; 176 struct vmw_surface_offset *offsets;
166 SVGA3dTextureFilter autogen_filter; 177 SVGA3dTextureFilter autogen_filter;
167 uint32_t multisample_count; 178 uint32_t multisample_count;
179 struct list_head view_list;
168}; 180};
169 181
170struct vmw_marker_queue { 182struct vmw_marker_queue {
@@ -176,14 +188,15 @@ struct vmw_marker_queue {
176 188
177struct vmw_fifo_state { 189struct vmw_fifo_state {
178 unsigned long reserved_size; 190 unsigned long reserved_size;
179 __le32 *dynamic_buffer; 191 u32 *dynamic_buffer;
180 __le32 *static_buffer; 192 u32 *static_buffer;
181 unsigned long static_buffer_size; 193 unsigned long static_buffer_size;
182 bool using_bounce_buffer; 194 bool using_bounce_buffer;
183 uint32_t capabilities; 195 uint32_t capabilities;
184 struct mutex fifo_mutex; 196 struct mutex fifo_mutex;
185 struct rw_semaphore rwsem; 197 struct rw_semaphore rwsem;
186 struct vmw_marker_queue marker_queue; 198 struct vmw_marker_queue marker_queue;
199 bool dx;
187}; 200};
188 201
189struct vmw_relocation { 202struct vmw_relocation {
@@ -264,70 +277,15 @@ struct vmw_piter {
264}; 277};
265 278
266/* 279/*
267 * enum vmw_ctx_binding_type - abstract resource to context binding types 280 * enum vmw_display_unit_type - Describes the display unit
268 */ 281 */
269enum vmw_ctx_binding_type { 282enum vmw_display_unit_type {
270 vmw_ctx_binding_shader, 283 vmw_du_invalid = 0,
271 vmw_ctx_binding_rt, 284 vmw_du_legacy,
272 vmw_ctx_binding_tex, 285 vmw_du_screen_object,
273 vmw_ctx_binding_max 286 vmw_du_screen_target
274}; 287};
275 288
276/**
277 * struct vmw_ctx_bindinfo - structure representing a single context binding
278 *
279 * @ctx: Pointer to the context structure. NULL means the binding is not
280 * active.
281 * @res: Non ref-counted pointer to the bound resource.
282 * @bt: The binding type.
283 * @i1: Union of information needed to unbind.
284 */
285struct vmw_ctx_bindinfo {
286 struct vmw_resource *ctx;
287 struct vmw_resource *res;
288 enum vmw_ctx_binding_type bt;
289 bool scrubbed;
290 union {
291 SVGA3dShaderType shader_type;
292 SVGA3dRenderTargetType rt_type;
293 uint32 texture_stage;
294 } i1;
295};
296
297/**
298 * struct vmw_ctx_binding - structure representing a single context binding
299 * - suitable for tracking in a context
300 *
301 * @ctx_list: List head for context.
302 * @res_list: List head for bound resource.
303 * @bi: Binding info
304 */
305struct vmw_ctx_binding {
306 struct list_head ctx_list;
307 struct list_head res_list;
308 struct vmw_ctx_bindinfo bi;
309};
310
311
312/**
313 * struct vmw_ctx_binding_state - context binding state
314 *
315 * @list: linked list of individual bindings.
316 * @render_targets: Render target bindings.
317 * @texture_units: Texture units/samplers bindings.
318 * @shaders: Shader bindings.
319 *
320 * Note that this structure also provides storage space for the individual
321 * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
322 * for individual bindings.
323 *
324 */
325struct vmw_ctx_binding_state {
326 struct list_head list;
327 struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
328 struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
329 struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
330};
331 289
332struct vmw_sw_context{ 290struct vmw_sw_context{
333 struct drm_open_hash res_ht; 291 struct drm_open_hash res_ht;
@@ -342,15 +300,21 @@ struct vmw_sw_context{
342 uint32_t *cmd_bounce; 300 uint32_t *cmd_bounce;
343 uint32_t cmd_bounce_size; 301 uint32_t cmd_bounce_size;
344 struct list_head resource_list; 302 struct list_head resource_list;
345 struct ttm_buffer_object *cur_query_bo; 303 struct list_head ctx_resource_list; /* For contexts and cotables */
304 struct vmw_dma_buffer *cur_query_bo;
346 struct list_head res_relocations; 305 struct list_head res_relocations;
347 uint32_t *buf_start; 306 uint32_t *buf_start;
348 struct vmw_res_cache_entry res_cache[vmw_res_max]; 307 struct vmw_res_cache_entry res_cache[vmw_res_max];
349 struct vmw_resource *last_query_ctx; 308 struct vmw_resource *last_query_ctx;
350 bool needs_post_query_barrier; 309 bool needs_post_query_barrier;
351 struct vmw_resource *error_resource; 310 struct vmw_resource *error_resource;
352 struct vmw_ctx_binding_state staged_bindings; 311 struct vmw_ctx_binding_state *staged_bindings;
312 bool staged_bindings_inuse;
353 struct list_head staged_cmd_res; 313 struct list_head staged_cmd_res;
314 struct vmw_resource_val_node *dx_ctx_node;
315 struct vmw_dma_buffer *dx_query_mob;
316 struct vmw_resource *dx_query_ctx;
317 struct vmw_cmdbuf_res_manager *man;
354}; 318};
355 319
356struct vmw_legacy_display; 320struct vmw_legacy_display;
@@ -358,8 +322,6 @@ struct vmw_overlay;
358 322
359struct vmw_master { 323struct vmw_master {
360 struct ttm_lock lock; 324 struct ttm_lock lock;
361 struct mutex fb_surf_mutex;
362 struct list_head fb_surf;
363}; 325};
364 326
365struct vmw_vga_topology_state { 327struct vmw_vga_topology_state {
@@ -370,6 +332,26 @@ struct vmw_vga_topology_state {
370 uint32_t pos_y; 332 uint32_t pos_y;
371}; 333};
372 334
335
336/*
337 * struct vmw_otable - Guest Memory OBject table metadata
338 *
339 * @size: Size of the table (page-aligned).
340 * @page_table: Pointer to a struct vmw_mob holding the page table.
341 */
342struct vmw_otable {
343 unsigned long size;
344 struct vmw_mob *page_table;
345 bool enabled;
346};
347
348struct vmw_otable_batch {
349 unsigned num_otables;
350 struct vmw_otable *otables;
351 struct vmw_resource *context;
352 struct ttm_buffer_object *otable_bo;
353};
354
373struct vmw_private { 355struct vmw_private {
374 struct ttm_bo_device bdev; 356 struct ttm_bo_device bdev;
375 struct ttm_bo_global_ref bo_global_ref; 357 struct ttm_bo_global_ref bo_global_ref;
@@ -387,9 +369,13 @@ struct vmw_private {
387 uint32_t mmio_size; 369 uint32_t mmio_size;
388 uint32_t fb_max_width; 370 uint32_t fb_max_width;
389 uint32_t fb_max_height; 371 uint32_t fb_max_height;
372 uint32_t texture_max_width;
373 uint32_t texture_max_height;
374 uint32_t stdu_max_width;
375 uint32_t stdu_max_height;
390 uint32_t initial_width; 376 uint32_t initial_width;
391 uint32_t initial_height; 377 uint32_t initial_height;
392 __le32 __iomem *mmio_virt; 378 u32 __iomem *mmio_virt;
393 int mmio_mtrr; 379 int mmio_mtrr;
394 uint32_t capabilities; 380 uint32_t capabilities;
395 uint32_t max_gmr_ids; 381 uint32_t max_gmr_ids;
@@ -401,6 +387,7 @@ struct vmw_private {
401 bool has_mob; 387 bool has_mob;
402 spinlock_t hw_lock; 388 spinlock_t hw_lock;
403 spinlock_t cap_lock; 389 spinlock_t cap_lock;
390 bool has_dx;
404 391
405 /* 392 /*
406 * VGA registers. 393 * VGA registers.
@@ -420,6 +407,7 @@ struct vmw_private {
420 */ 407 */
421 408
422 void *fb_info; 409 void *fb_info;
410 enum vmw_display_unit_type active_display_unit;
423 struct vmw_legacy_display *ldu_priv; 411 struct vmw_legacy_display *ldu_priv;
424 struct vmw_screen_object_display *sou_priv; 412 struct vmw_screen_object_display *sou_priv;
425 struct vmw_overlay *overlay_priv; 413 struct vmw_overlay *overlay_priv;
@@ -453,6 +441,8 @@ struct vmw_private {
453 spinlock_t waiter_lock; 441 spinlock_t waiter_lock;
454 int fence_queue_waiters; /* Protected by waiter_lock */ 442 int fence_queue_waiters; /* Protected by waiter_lock */
455 int goal_queue_waiters; /* Protected by waiter_lock */ 443 int goal_queue_waiters; /* Protected by waiter_lock */
444 int cmdbuf_waiters; /* Protected by irq_lock */
445 int error_waiters; /* Protected by irq_lock */
456 atomic_t fifo_queue_waiters; 446 atomic_t fifo_queue_waiters;
457 uint32_t last_read_seqno; 447 uint32_t last_read_seqno;
458 spinlock_t irq_lock; 448 spinlock_t irq_lock;
@@ -484,6 +474,7 @@ struct vmw_private {
484 474
485 bool stealth; 475 bool stealth;
486 bool enable_fb; 476 bool enable_fb;
477 spinlock_t svga_lock;
487 478
488 /** 479 /**
489 * Master management. 480 * Master management.
@@ -493,9 +484,10 @@ struct vmw_private {
493 struct vmw_master fbdev_master; 484 struct vmw_master fbdev_master;
494 struct notifier_block pm_nb; 485 struct notifier_block pm_nb;
495 bool suspended; 486 bool suspended;
487 bool refuse_hibernation;
496 488
497 struct mutex release_mutex; 489 struct mutex release_mutex;
498 uint32_t num_3d_resources; 490 atomic_t num_fifo_resources;
499 491
500 /* 492 /*
501 * Replace this with an rwsem as soon as we have down_xx_interruptible() 493 * Replace this with an rwsem as soon as we have down_xx_interruptible()
@@ -507,8 +499,8 @@ struct vmw_private {
507 * are protected by the cmdbuf mutex. 499 * are protected by the cmdbuf mutex.
508 */ 500 */
509 501
510 struct ttm_buffer_object *dummy_query_bo; 502 struct vmw_dma_buffer *dummy_query_bo;
511 struct ttm_buffer_object *pinned_bo; 503 struct vmw_dma_buffer *pinned_bo;
512 uint32_t query_cid; 504 uint32_t query_cid;
513 uint32_t query_cid_valid; 505 uint32_t query_cid_valid;
514 bool dummy_query_bo_pinned; 506 bool dummy_query_bo_pinned;
@@ -531,8 +523,9 @@ struct vmw_private {
531 /* 523 /*
532 * Guest Backed stuff 524 * Guest Backed stuff
533 */ 525 */
534 struct ttm_buffer_object *otable_bo; 526 struct vmw_otable_batch otable_batch;
535 struct vmw_otable *otables; 527
528 struct vmw_cmdbuf_man *cman;
536}; 529};
537 530
538static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) 531static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -587,8 +580,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
587 return val; 580 return val;
588} 581}
589 582
590int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga); 583extern void vmw_svga_enable(struct vmw_private *dev_priv);
591void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga); 584extern void vmw_svga_disable(struct vmw_private *dev_priv);
585
592 586
593/** 587/**
594 * GMR utilities - vmwgfx_gmr.c 588 * GMR utilities - vmwgfx_gmr.c
@@ -610,7 +604,8 @@ extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
610extern struct vmw_resource * 604extern struct vmw_resource *
611vmw_resource_reference_unless_doomed(struct vmw_resource *res); 605vmw_resource_reference_unless_doomed(struct vmw_resource *res);
612extern int vmw_resource_validate(struct vmw_resource *res); 606extern int vmw_resource_validate(struct vmw_resource *res);
613extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); 607extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
608 bool no_backup);
614extern bool vmw_resource_needs_backup(const struct vmw_resource *res); 609extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
615extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, 610extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
616 struct ttm_object_file *tfile, 611 struct ttm_object_file *tfile,
@@ -660,10 +655,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
660 uint32_t *inout_id, 655 uint32_t *inout_id,
661 struct vmw_resource **out); 656 struct vmw_resource **out);
662extern void vmw_resource_unreserve(struct vmw_resource *res, 657extern void vmw_resource_unreserve(struct vmw_resource *res,
658 bool switch_backup,
663 struct vmw_dma_buffer *new_backup, 659 struct vmw_dma_buffer *new_backup,
664 unsigned long new_backup_offset); 660 unsigned long new_backup_offset);
665extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, 661extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
666 struct ttm_mem_reg *mem); 662 struct ttm_mem_reg *mem);
663extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
664 struct ttm_mem_reg *mem);
665extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
667extern void vmw_fence_single_bo(struct ttm_buffer_object *bo, 666extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
668 struct vmw_fence_obj *fence); 667 struct vmw_fence_obj *fence);
669extern void vmw_resource_evict_all(struct vmw_private *dev_priv); 668extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
@@ -671,25 +670,25 @@ extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
671/** 670/**
672 * DMA buffer helper routines - vmwgfx_dmabuf.c 671 * DMA buffer helper routines - vmwgfx_dmabuf.c
673 */ 672 */
674extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv, 673extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
675 struct vmw_dma_buffer *bo,
676 struct ttm_placement *placement,
677 bool interruptible);
678extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
679 struct vmw_dma_buffer *buf,
680 bool pin, bool interruptible);
681extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
682 struct vmw_dma_buffer *buf,
683 bool pin, bool interruptible);
684extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
685 struct vmw_dma_buffer *bo, 674 struct vmw_dma_buffer *bo,
686 bool pin, bool interruptible); 675 struct ttm_placement *placement,
676 bool interruptible);
677extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
678 struct vmw_dma_buffer *buf,
679 bool interruptible);
680extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
681 struct vmw_dma_buffer *buf,
682 bool interruptible);
683extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
684 struct vmw_dma_buffer *bo,
685 bool interruptible);
687extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv, 686extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
688 struct vmw_dma_buffer *bo, 687 struct vmw_dma_buffer *bo,
689 bool interruptible); 688 bool interruptible);
690extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, 689extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
691 SVGAGuestPtr *ptr); 690 SVGAGuestPtr *ptr);
692extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin); 691extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
693 692
694/** 693/**
695 * Misc Ioctl functionality - vmwgfx_ioctl.c 694 * Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -717,7 +716,10 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv,
717extern void vmw_fifo_release(struct vmw_private *dev_priv, 716extern void vmw_fifo_release(struct vmw_private *dev_priv,
718 struct vmw_fifo_state *fifo); 717 struct vmw_fifo_state *fifo);
719extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); 718extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
719extern void *
720vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
720extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); 721extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
722extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
721extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, 723extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
722 uint32_t *seqno); 724 uint32_t *seqno);
723extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason); 725extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
@@ -726,6 +728,8 @@ extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
726extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); 728extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
727extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, 729extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
728 uint32_t cid); 730 uint32_t cid);
731extern int vmw_fifo_flush(struct vmw_private *dev_priv,
732 bool interruptible);
729 733
730/** 734/**
731 * TTM glue - vmwgfx_ttm_glue.c 735 * TTM glue - vmwgfx_ttm_glue.c
@@ -750,6 +754,7 @@ extern struct ttm_placement vmw_sys_ne_placement;
750extern struct ttm_placement vmw_evictable_placement; 754extern struct ttm_placement vmw_evictable_placement;
751extern struct ttm_placement vmw_srf_placement; 755extern struct ttm_placement vmw_srf_placement;
752extern struct ttm_placement vmw_mob_placement; 756extern struct ttm_placement vmw_mob_placement;
757extern struct ttm_placement vmw_mob_ne_placement;
753extern struct ttm_bo_driver vmw_bo_driver; 758extern struct ttm_bo_driver vmw_bo_driver;
754extern int vmw_dma_quiescent(struct drm_device *dev); 759extern int vmw_dma_quiescent(struct drm_device *dev);
755extern int vmw_bo_map_dma(struct ttm_buffer_object *bo); 760extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
@@ -800,14 +805,15 @@ static inline struct page *vmw_piter_page(struct vmw_piter *viter)
800 * Command submission - vmwgfx_execbuf.c 805 * Command submission - vmwgfx_execbuf.c
801 */ 806 */
802 807
803extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, 808extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
804 struct drm_file *file_priv); 809 struct drm_file *file_priv, size_t size);
805extern int vmw_execbuf_process(struct drm_file *file_priv, 810extern int vmw_execbuf_process(struct drm_file *file_priv,
806 struct vmw_private *dev_priv, 811 struct vmw_private *dev_priv,
807 void __user *user_commands, 812 void __user *user_commands,
808 void *kernel_commands, 813 void *kernel_commands,
809 uint32_t command_size, 814 uint32_t command_size,
810 uint64_t throttle_us, 815 uint64_t throttle_us,
816 uint32_t dx_context_handle,
811 struct drm_vmw_fence_rep __user 817 struct drm_vmw_fence_rep __user
812 *user_fence_rep, 818 *user_fence_rep,
813 struct vmw_fence_obj **out_fence); 819 struct vmw_fence_obj **out_fence);
@@ -826,6 +832,11 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
826 *user_fence_rep, 832 *user_fence_rep,
827 struct vmw_fence_obj *fence, 833 struct vmw_fence_obj *fence,
828 uint32_t fence_handle); 834 uint32_t fence_handle);
835extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
836 struct ttm_buffer_object *bo,
837 bool interruptible,
838 bool validate_as_mob);
839
829 840
830/** 841/**
831 * IRQs and wating - vmwgfx_irq.c 842 * IRQs and wating - vmwgfx_irq.c
@@ -833,8 +844,8 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
833 844
834extern irqreturn_t vmw_irq_handler(int irq, void *arg); 845extern irqreturn_t vmw_irq_handler(int irq, void *arg);
835extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, 846extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
836 uint32_t seqno, bool interruptible, 847 uint32_t seqno, bool interruptible,
837 unsigned long timeout); 848 unsigned long timeout);
838extern void vmw_irq_preinstall(struct drm_device *dev); 849extern void vmw_irq_preinstall(struct drm_device *dev);
839extern int vmw_irq_postinstall(struct drm_device *dev); 850extern int vmw_irq_postinstall(struct drm_device *dev);
840extern void vmw_irq_uninstall(struct drm_device *dev); 851extern void vmw_irq_uninstall(struct drm_device *dev);
@@ -852,6 +863,10 @@ extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
852extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); 863extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
853extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); 864extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
854extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv); 865extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
866extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
867 int *waiter_count);
868extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
869 u32 flag, int *waiter_count);
855 870
856/** 871/**
857 * Rudimentary fence-like objects currently used only for throttling - 872 * Rudimentary fence-like objects currently used only for throttling -
@@ -861,9 +876,9 @@ extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
861extern void vmw_marker_queue_init(struct vmw_marker_queue *queue); 876extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
862extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue); 877extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
863extern int vmw_marker_push(struct vmw_marker_queue *queue, 878extern int vmw_marker_push(struct vmw_marker_queue *queue,
864 uint32_t seqno); 879 uint32_t seqno);
865extern int vmw_marker_pull(struct vmw_marker_queue *queue, 880extern int vmw_marker_pull(struct vmw_marker_queue *queue,
866 uint32_t signaled_seqno); 881 uint32_t signaled_seqno);
867extern int vmw_wait_lag(struct vmw_private *dev_priv, 882extern int vmw_wait_lag(struct vmw_private *dev_priv,
868 struct vmw_marker_queue *queue, uint32_t us); 883 struct vmw_marker_queue *queue, uint32_t us);
869 884
@@ -908,12 +923,6 @@ int vmw_kms_present(struct vmw_private *dev_priv,
908 uint32_t sid, int32_t destX, int32_t destY, 923 uint32_t sid, int32_t destX, int32_t destY,
909 struct drm_vmw_rect *clips, 924 struct drm_vmw_rect *clips,
910 uint32_t num_clips); 925 uint32_t num_clips);
911int vmw_kms_readback(struct vmw_private *dev_priv,
912 struct drm_file *file_priv,
913 struct vmw_framebuffer *vfb,
914 struct drm_vmw_fence_rep __user *user_fence_rep,
915 struct drm_vmw_rect *clips,
916 uint32_t num_clips);
917int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 926int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
918 struct drm_file *file_priv); 927 struct drm_file *file_priv);
919 928
@@ -927,6 +936,10 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
927int vmw_dumb_destroy(struct drm_file *file_priv, 936int vmw_dumb_destroy(struct drm_file *file_priv,
928 struct drm_device *dev, 937 struct drm_device *dev,
929 uint32_t handle); 938 uint32_t handle);
939extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
940extern void vmw_resource_unpin(struct vmw_resource *res);
941extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
942
930/** 943/**
931 * Overlay control - vmwgfx_overlay.c 944 * Overlay control - vmwgfx_overlay.c
932 */ 945 */
@@ -982,27 +995,33 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv);
982 995
983extern const struct vmw_user_resource_conv *user_context_converter; 996extern const struct vmw_user_resource_conv *user_context_converter;
984 997
985extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
986
987extern int vmw_context_check(struct vmw_private *dev_priv, 998extern int vmw_context_check(struct vmw_private *dev_priv,
988 struct ttm_object_file *tfile, 999 struct ttm_object_file *tfile,
989 int id, 1000 int id,
990 struct vmw_resource **p_res); 1001 struct vmw_resource **p_res);
991extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, 1002extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
992 struct drm_file *file_priv); 1003 struct drm_file *file_priv);
1004extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
1005 struct drm_file *file_priv);
993extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, 1006extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
994 struct drm_file *file_priv); 1007 struct drm_file *file_priv);
995extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
996 const struct vmw_ctx_bindinfo *ci);
997extern void
998vmw_context_binding_state_transfer(struct vmw_resource *res,
999 struct vmw_ctx_binding_state *cbs);
1000extern void vmw_context_binding_res_list_kill(struct list_head *head);
1001extern void vmw_context_binding_res_list_scrub(struct list_head *head);
1002extern int vmw_context_rebind_all(struct vmw_resource *ctx);
1003extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); 1008extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1004extern struct vmw_cmdbuf_res_manager * 1009extern struct vmw_cmdbuf_res_manager *
1005vmw_context_res_man(struct vmw_resource *ctx); 1010vmw_context_res_man(struct vmw_resource *ctx);
1011extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
1012 SVGACOTableType cotable_type);
1013extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1014struct vmw_ctx_binding_state;
1015extern struct vmw_ctx_binding_state *
1016vmw_context_binding_state(struct vmw_resource *ctx);
1017extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
1018 bool readback);
1019extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
1020 struct vmw_dma_buffer *mob);
1021extern struct vmw_dma_buffer *
1022vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
1023
1024
1006/* 1025/*
1007 * Surface management - vmwgfx_surface.c 1026 * Surface management - vmwgfx_surface.c
1008 */ 1027 */
@@ -1025,6 +1044,16 @@ extern int vmw_surface_check(struct vmw_private *dev_priv,
1025 uint32_t handle, int *id); 1044 uint32_t handle, int *id);
1026extern int vmw_surface_validate(struct vmw_private *dev_priv, 1045extern int vmw_surface_validate(struct vmw_private *dev_priv,
1027 struct vmw_surface *srf); 1046 struct vmw_surface *srf);
1047int vmw_surface_gb_priv_define(struct drm_device *dev,
1048 uint32_t user_accounting_size,
1049 uint32_t svga3d_flags,
1050 SVGA3dSurfaceFormat format,
1051 bool for_scanout,
1052 uint32_t num_mip_levels,
1053 uint32_t multisample_count,
1054 uint32_t array_size,
1055 struct drm_vmw_size size,
1056 struct vmw_surface **srf_out);
1028 1057
1029/* 1058/*
1030 * Shader management - vmwgfx_shader.c 1059 * Shader management - vmwgfx_shader.c
@@ -1042,12 +1071,21 @@ extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1042 SVGA3dShaderType shader_type, 1071 SVGA3dShaderType shader_type,
1043 size_t size, 1072 size_t size,
1044 struct list_head *list); 1073 struct list_head *list);
1045extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, 1074extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
1046 u32 user_key, SVGA3dShaderType shader_type, 1075 u32 user_key, SVGA3dShaderType shader_type,
1047 struct list_head *list); 1076 struct list_head *list);
1077extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
1078 struct vmw_resource *ctx,
1079 u32 user_key,
1080 SVGA3dShaderType shader_type,
1081 struct list_head *list);
1082extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
1083 struct list_head *list,
1084 bool readback);
1085
1048extern struct vmw_resource * 1086extern struct vmw_resource *
1049vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, 1087vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1050 u32 user_key, SVGA3dShaderType shader_type); 1088 u32 user_key, SVGA3dShaderType shader_type);
1051 1089
1052/* 1090/*
1053 * Command buffer managed resources - vmwgfx_cmdbuf_res.c 1091 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
@@ -1071,7 +1109,48 @@ extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1071extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, 1109extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1072 enum vmw_cmdbuf_res_type res_type, 1110 enum vmw_cmdbuf_res_type res_type,
1073 u32 user_key, 1111 u32 user_key,
1074 struct list_head *list); 1112 struct list_head *list,
1113 struct vmw_resource **res);
1114
1115/*
1116 * COTable management - vmwgfx_cotable.c
1117 */
1118extern const SVGACOTableType vmw_cotable_scrub_order[];
1119extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
1120 struct vmw_resource *ctx,
1121 u32 type);
1122extern int vmw_cotable_notify(struct vmw_resource *res, int id);
1123extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
1124extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
1125 struct list_head *head);
1126
1127/*
1128 * Command buffer managerment vmwgfx_cmdbuf.c
1129 */
1130struct vmw_cmdbuf_man;
1131struct vmw_cmdbuf_header;
1132
1133extern struct vmw_cmdbuf_man *
1134vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
1135extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1136 size_t size, size_t default_size);
1137extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
1138extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
1139extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
1140 unsigned long timeout);
1141extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1142 int ctx_id, bool interruptible,
1143 struct vmw_cmdbuf_header *header);
1144extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1145 struct vmw_cmdbuf_header *header,
1146 bool flush);
1147extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
1148extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
1149 size_t size, bool interruptible,
1150 struct vmw_cmdbuf_header **p_header);
1151extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
1152extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
1153 bool interruptible);
1075 1154
1076 1155
1077/** 1156/**
@@ -1116,4 +1195,14 @@ static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1116{ 1195{
1117 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object; 1196 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
1118} 1197}
1198
1199static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
1200{
1201 atomic_inc(&dev_priv->num_fifo_resources);
1202}
1203
1204static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
1205{
1206 atomic_dec(&dev_priv->num_fifo_resources);
1207}
1119#endif 1208#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 97ad3bcb99a7..b56565457c96 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,6 +29,8 @@
29#include "vmwgfx_reg.h" 29#include "vmwgfx_reg.h"
30#include <drm/ttm/ttm_bo_api.h> 30#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h> 31#include <drm/ttm/ttm_placement.h>
32#include "vmwgfx_so.h"
33#include "vmwgfx_binding.h"
32 34
33#define VMW_RES_HT_ORDER 12 35#define VMW_RES_HT_ORDER 12
34 36
@@ -59,8 +61,11 @@ struct vmw_resource_relocation {
59 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. 61 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60 * @first_usage: Set to true the first time the resource is referenced in 62 * @first_usage: Set to true the first time the resource is referenced in
61 * the command stream. 63 * the command stream.
62 * @no_buffer_needed: Resources do not need to allocate buffer backup on 64 * @switching_backup: The command stream provides a new backup buffer for a
63 * reservation. The command stream will provide one. 65 * resource.
66 * @no_buffer_needed: This means @switching_backup is true on first buffer
67 * reference. So resource reservation does not need to allocate a backup
68 * buffer for the resource.
64 */ 69 */
65struct vmw_resource_val_node { 70struct vmw_resource_val_node {
66 struct list_head head; 71 struct list_head head;
@@ -69,8 +74,9 @@ struct vmw_resource_val_node {
69 struct vmw_dma_buffer *new_backup; 74 struct vmw_dma_buffer *new_backup;
70 struct vmw_ctx_binding_state *staged_bindings; 75 struct vmw_ctx_binding_state *staged_bindings;
71 unsigned long new_backup_offset; 76 unsigned long new_backup_offset;
72 bool first_usage; 77 u32 first_usage : 1;
73 bool no_buffer_needed; 78 u32 switching_backup : 1;
79 u32 no_buffer_needed : 1;
74}; 80};
75 81
76/** 82/**
@@ -92,22 +98,40 @@ struct vmw_cmd_entry {
92 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ 98 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93 (_gb_disable), (_gb_enable)} 99 (_gb_disable), (_gb_enable)}
94 100
101static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
102 struct vmw_sw_context *sw_context,
103 struct vmw_resource *ctx);
104static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
105 struct vmw_sw_context *sw_context,
106 SVGAMobId *id,
107 struct vmw_dma_buffer **vmw_bo_p);
108static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
109 struct vmw_dma_buffer *vbo,
110 bool validate_as_mob,
111 uint32_t *p_val_node);
112
113
95/** 114/**
96 * vmw_resource_unreserve - unreserve resources previously reserved for 115 * vmw_resources_unreserve - unreserve resources previously reserved for
97 * command submission. 116 * command submission.
98 * 117 *
99 * @list_head: list of resources to unreserve. 118 * @sw_context: pointer to the software context
100 * @backoff: Whether command submission failed. 119 * @backoff: Whether command submission failed.
101 */ 120 */
102static void vmw_resource_list_unreserve(struct list_head *list, 121static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
103 bool backoff) 122 bool backoff)
104{ 123{
105 struct vmw_resource_val_node *val; 124 struct vmw_resource_val_node *val;
125 struct list_head *list = &sw_context->resource_list;
126
127 if (sw_context->dx_query_mob && !backoff)
128 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
129 sw_context->dx_query_mob);
106 130
107 list_for_each_entry(val, list, head) { 131 list_for_each_entry(val, list, head) {
108 struct vmw_resource *res = val->res; 132 struct vmw_resource *res = val->res;
109 struct vmw_dma_buffer *new_backup = 133 bool switch_backup =
110 backoff ? NULL : val->new_backup; 134 (backoff) ? false : val->switching_backup;
111 135
112 /* 136 /*
113 * Transfer staged context bindings to the 137 * Transfer staged context bindings to the
@@ -115,18 +139,71 @@ static void vmw_resource_list_unreserve(struct list_head *list,
115 */ 139 */
116 if (unlikely(val->staged_bindings)) { 140 if (unlikely(val->staged_bindings)) {
117 if (!backoff) { 141 if (!backoff) {
118 vmw_context_binding_state_transfer 142 vmw_binding_state_commit
119 (val->res, val->staged_bindings); 143 (vmw_context_binding_state(val->res),
144 val->staged_bindings);
120 } 145 }
121 kfree(val->staged_bindings); 146
147 if (val->staged_bindings != sw_context->staged_bindings)
148 vmw_binding_state_free(val->staged_bindings);
149 else
150 sw_context->staged_bindings_inuse = false;
122 val->staged_bindings = NULL; 151 val->staged_bindings = NULL;
123 } 152 }
124 vmw_resource_unreserve(res, new_backup, 153 vmw_resource_unreserve(res, switch_backup, val->new_backup,
125 val->new_backup_offset); 154 val->new_backup_offset);
126 vmw_dmabuf_unreference(&val->new_backup); 155 vmw_dmabuf_unreference(&val->new_backup);
127 } 156 }
128} 157}
129 158
159/**
160 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
161 * added to the validate list.
162 *
163 * @dev_priv: Pointer to the device private:
164 * @sw_context: The validation context:
165 * @node: The validation node holding this context.
166 */
167static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
168 struct vmw_sw_context *sw_context,
169 struct vmw_resource_val_node *node)
170{
171 int ret;
172
173 ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
174 if (unlikely(ret != 0))
175 goto out_err;
176
177 if (!sw_context->staged_bindings) {
178 sw_context->staged_bindings =
179 vmw_binding_state_alloc(dev_priv);
180 if (IS_ERR(sw_context->staged_bindings)) {
181 DRM_ERROR("Failed to allocate context binding "
182 "information.\n");
183 ret = PTR_ERR(sw_context->staged_bindings);
184 sw_context->staged_bindings = NULL;
185 goto out_err;
186 }
187 }
188
189 if (sw_context->staged_bindings_inuse) {
190 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
191 if (IS_ERR(node->staged_bindings)) {
192 DRM_ERROR("Failed to allocate context binding "
193 "information.\n");
194 ret = PTR_ERR(node->staged_bindings);
195 node->staged_bindings = NULL;
196 goto out_err;
197 }
198 } else {
199 node->staged_bindings = sw_context->staged_bindings;
200 sw_context->staged_bindings_inuse = true;
201 }
202
203 return 0;
204out_err:
205 return ret;
206}
130 207
131/** 208/**
132 * vmw_resource_val_add - Add a resource to the software context's 209 * vmw_resource_val_add - Add a resource to the software context's
@@ -141,6 +218,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
141 struct vmw_resource *res, 218 struct vmw_resource *res,
142 struct vmw_resource_val_node **p_node) 219 struct vmw_resource_val_node **p_node)
143{ 220{
221 struct vmw_private *dev_priv = res->dev_priv;
144 struct vmw_resource_val_node *node; 222 struct vmw_resource_val_node *node;
145 struct drm_hash_item *hash; 223 struct drm_hash_item *hash;
146 int ret; 224 int ret;
@@ -169,14 +247,90 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
169 kfree(node); 247 kfree(node);
170 return ret; 248 return ret;
171 } 249 }
172 list_add_tail(&node->head, &sw_context->resource_list);
173 node->res = vmw_resource_reference(res); 250 node->res = vmw_resource_reference(res);
174 node->first_usage = true; 251 node->first_usage = true;
175
176 if (unlikely(p_node != NULL)) 252 if (unlikely(p_node != NULL))
177 *p_node = node; 253 *p_node = node;
178 254
179 return 0; 255 if (!dev_priv->has_mob) {
256 list_add_tail(&node->head, &sw_context->resource_list);
257 return 0;
258 }
259
260 switch (vmw_res_type(res)) {
261 case vmw_res_context:
262 case vmw_res_dx_context:
263 list_add(&node->head, &sw_context->ctx_resource_list);
264 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
265 break;
266 case vmw_res_cotable:
267 list_add_tail(&node->head, &sw_context->ctx_resource_list);
268 break;
269 default:
270 list_add_tail(&node->head, &sw_context->resource_list);
271 break;
272 }
273
274 return ret;
275}
276
277/**
278 * vmw_view_res_val_add - Add a view and the surface it's pointing to
279 * to the validation list
280 *
281 * @sw_context: The software context holding the validation list.
282 * @view: Pointer to the view resource.
283 *
284 * Returns 0 if success, negative error code otherwise.
285 */
286static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
287 struct vmw_resource *view)
288{
289 int ret;
290
291 /*
292 * First add the resource the view is pointing to, otherwise
293 * it may be swapped out when the view is validated.
294 */
295 ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
296 if (ret)
297 return ret;
298
299 return vmw_resource_val_add(sw_context, view, NULL);
300}
301
302/**
303 * vmw_view_id_val_add - Look up a view and add it and the surface it's
304 * pointing to to the validation list.
305 *
306 * @sw_context: The software context holding the validation list.
307 * @view_type: The view type to look up.
308 * @id: view id of the view.
309 *
310 * The view is represented by a view id and the DX context it's created on,
311 * or scheduled for creation on. If there is no DX context set, the function
312 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
313 */
314static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
315 enum vmw_view_type view_type, u32 id)
316{
317 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
318 struct vmw_resource *view;
319 int ret;
320
321 if (!ctx_node) {
322 DRM_ERROR("DX Context not set.\n");
323 return -EINVAL;
324 }
325
326 view = vmw_view_lookup(sw_context->man, view_type, id);
327 if (IS_ERR(view))
328 return PTR_ERR(view);
329
330 ret = vmw_view_res_val_add(sw_context, view);
331 vmw_resource_unreference(&view);
332
333 return ret;
180} 334}
181 335
182/** 336/**
@@ -195,24 +349,56 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
195 struct vmw_resource *ctx) 349 struct vmw_resource *ctx)
196{ 350{
197 struct list_head *binding_list; 351 struct list_head *binding_list;
198 struct vmw_ctx_binding *entry; 352 struct vmw_ctx_bindinfo *entry;
199 int ret = 0; 353 int ret = 0;
200 struct vmw_resource *res; 354 struct vmw_resource *res;
355 u32 i;
201 356
357 /* Add all cotables to the validation list. */
358 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
359 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
360 res = vmw_context_cotable(ctx, i);
361 if (IS_ERR(res))
362 continue;
363
364 ret = vmw_resource_val_add(sw_context, res, NULL);
365 vmw_resource_unreference(&res);
366 if (unlikely(ret != 0))
367 return ret;
368 }
369 }
370
371
372 /* Add all resources bound to the context to the validation list */
202 mutex_lock(&dev_priv->binding_mutex); 373 mutex_lock(&dev_priv->binding_mutex);
203 binding_list = vmw_context_binding_list(ctx); 374 binding_list = vmw_context_binding_list(ctx);
204 375
205 list_for_each_entry(entry, binding_list, ctx_list) { 376 list_for_each_entry(entry, binding_list, ctx_list) {
206 res = vmw_resource_reference_unless_doomed(entry->bi.res); 377 /* entry->res is not refcounted */
378 res = vmw_resource_reference_unless_doomed(entry->res);
207 if (unlikely(res == NULL)) 379 if (unlikely(res == NULL))
208 continue; 380 continue;
209 381
210 ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL); 382 if (vmw_res_type(entry->res) == vmw_res_view)
383 ret = vmw_view_res_val_add(sw_context, entry->res);
384 else
385 ret = vmw_resource_val_add(sw_context, entry->res,
386 NULL);
211 vmw_resource_unreference(&res); 387 vmw_resource_unreference(&res);
212 if (unlikely(ret != 0)) 388 if (unlikely(ret != 0))
213 break; 389 break;
214 } 390 }
215 391
392 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
393 struct vmw_dma_buffer *dx_query_mob;
394
395 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
396 if (dx_query_mob)
397 ret = vmw_bo_to_validate_list(sw_context,
398 dx_query_mob,
399 true, NULL);
400 }
401
216 mutex_unlock(&dev_priv->binding_mutex); 402 mutex_unlock(&dev_priv->binding_mutex);
217 return ret; 403 return ret;
218} 404}
@@ -308,7 +494,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
308 * submission is reached. 494 * submission is reached.
309 */ 495 */
310static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, 496static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
311 struct ttm_buffer_object *bo, 497 struct vmw_dma_buffer *vbo,
312 bool validate_as_mob, 498 bool validate_as_mob,
313 uint32_t *p_val_node) 499 uint32_t *p_val_node)
314{ 500{
@@ -318,7 +504,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
318 struct drm_hash_item *hash; 504 struct drm_hash_item *hash;
319 int ret; 505 int ret;
320 506
321 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo, 507 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
322 &hash) == 0)) { 508 &hash) == 0)) {
323 vval_buf = container_of(hash, struct vmw_validate_buffer, 509 vval_buf = container_of(hash, struct vmw_validate_buffer,
324 hash); 510 hash);
@@ -336,7 +522,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
336 return -EINVAL; 522 return -EINVAL;
337 } 523 }
338 vval_buf = &sw_context->val_bufs[val_node]; 524 vval_buf = &sw_context->val_bufs[val_node];
339 vval_buf->hash.key = (unsigned long) bo; 525 vval_buf->hash.key = (unsigned long) vbo;
340 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); 526 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
341 if (unlikely(ret != 0)) { 527 if (unlikely(ret != 0)) {
342 DRM_ERROR("Failed to initialize a buffer validation " 528 DRM_ERROR("Failed to initialize a buffer validation "
@@ -345,7 +531,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
345 } 531 }
346 ++sw_context->cur_val_buf; 532 ++sw_context->cur_val_buf;
347 val_buf = &vval_buf->base; 533 val_buf = &vval_buf->base;
348 val_buf->bo = ttm_bo_reference(bo); 534 val_buf->bo = ttm_bo_reference(&vbo->base);
349 val_buf->shared = false; 535 val_buf->shared = false;
350 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 536 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
351 vval_buf->validate_as_mob = validate_as_mob; 537 vval_buf->validate_as_mob = validate_as_mob;
@@ -370,27 +556,39 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
370static int vmw_resources_reserve(struct vmw_sw_context *sw_context) 556static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
371{ 557{
372 struct vmw_resource_val_node *val; 558 struct vmw_resource_val_node *val;
373 int ret; 559 int ret = 0;
374 560
375 list_for_each_entry(val, &sw_context->resource_list, head) { 561 list_for_each_entry(val, &sw_context->resource_list, head) {
376 struct vmw_resource *res = val->res; 562 struct vmw_resource *res = val->res;
377 563
378 ret = vmw_resource_reserve(res, val->no_buffer_needed); 564 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
379 if (unlikely(ret != 0)) 565 if (unlikely(ret != 0))
380 return ret; 566 return ret;
381 567
382 if (res->backup) { 568 if (res->backup) {
383 struct ttm_buffer_object *bo = &res->backup->base; 569 struct vmw_dma_buffer *vbo = res->backup;
384 570
385 ret = vmw_bo_to_validate_list 571 ret = vmw_bo_to_validate_list
386 (sw_context, bo, 572 (sw_context, vbo,
387 vmw_resource_needs_backup(res), NULL); 573 vmw_resource_needs_backup(res), NULL);
388 574
389 if (unlikely(ret != 0)) 575 if (unlikely(ret != 0))
390 return ret; 576 return ret;
391 } 577 }
392 } 578 }
393 return 0; 579
580 if (sw_context->dx_query_mob) {
581 struct vmw_dma_buffer *expected_dx_query_mob;
582
583 expected_dx_query_mob =
584 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
585 if (expected_dx_query_mob &&
586 expected_dx_query_mob != sw_context->dx_query_mob) {
587 ret = -EINVAL;
588 }
589 }
590
591 return ret;
394} 592}
395 593
396/** 594/**
@@ -409,6 +607,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
409 607
410 list_for_each_entry(val, &sw_context->resource_list, head) { 608 list_for_each_entry(val, &sw_context->resource_list, head) {
411 struct vmw_resource *res = val->res; 609 struct vmw_resource *res = val->res;
610 struct vmw_dma_buffer *backup = res->backup;
412 611
413 ret = vmw_resource_validate(res); 612 ret = vmw_resource_validate(res);
414 if (unlikely(ret != 0)) { 613 if (unlikely(ret != 0)) {
@@ -416,18 +615,29 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
416 DRM_ERROR("Failed to validate resource.\n"); 615 DRM_ERROR("Failed to validate resource.\n");
417 return ret; 616 return ret;
418 } 617 }
618
619 /* Check if the resource switched backup buffer */
620 if (backup && res->backup && (backup != res->backup)) {
621 struct vmw_dma_buffer *vbo = res->backup;
622
623 ret = vmw_bo_to_validate_list
624 (sw_context, vbo,
625 vmw_resource_needs_backup(res), NULL);
626 if (ret) {
627 ttm_bo_unreserve(&vbo->base);
628 return ret;
629 }
630 }
419 } 631 }
420 return 0; 632 return 0;
421} 633}
422 634
423
424/** 635/**
425 * vmw_cmd_res_reloc_add - Add a resource to a software context's 636 * vmw_cmd_res_reloc_add - Add a resource to a software context's
426 * relocation- and validation lists. 637 * relocation- and validation lists.
427 * 638 *
428 * @dev_priv: Pointer to a struct vmw_private identifying the device. 639 * @dev_priv: Pointer to a struct vmw_private identifying the device.
429 * @sw_context: Pointer to the software context. 640 * @sw_context: Pointer to the software context.
430 * @res_type: Resource type.
431 * @id_loc: Pointer to where the id that needs translation is located. 641 * @id_loc: Pointer to where the id that needs translation is located.
432 * @res: Valid pointer to a struct vmw_resource. 642 * @res: Valid pointer to a struct vmw_resource.
433 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node 643 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
@@ -435,7 +645,6 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
435 */ 645 */
436static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, 646static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
437 struct vmw_sw_context *sw_context, 647 struct vmw_sw_context *sw_context,
438 enum vmw_res_type res_type,
439 uint32_t *id_loc, 648 uint32_t *id_loc,
440 struct vmw_resource *res, 649 struct vmw_resource *res,
441 struct vmw_resource_val_node **p_val) 650 struct vmw_resource_val_node **p_val)
@@ -454,29 +663,6 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
454 if (unlikely(ret != 0)) 663 if (unlikely(ret != 0))
455 return ret; 664 return ret;
456 665
457 if (res_type == vmw_res_context && dev_priv->has_mob &&
458 node->first_usage) {
459
460 /*
461 * Put contexts first on the list to be able to exit
462 * list traversal for contexts early.
463 */
464 list_del(&node->head);
465 list_add(&node->head, &sw_context->resource_list);
466
467 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
468 if (unlikely(ret != 0))
469 return ret;
470 node->staged_bindings =
471 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
472 if (node->staged_bindings == NULL) {
473 DRM_ERROR("Failed to allocate context binding "
474 "information.\n");
475 return -ENOMEM;
476 }
477 INIT_LIST_HEAD(&node->staged_bindings->list);
478 }
479
480 if (p_val) 666 if (p_val)
481 *p_val = node; 667 *p_val = node;
482 668
@@ -554,7 +740,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
554 rcache->res = res; 740 rcache->res = res;
555 rcache->handle = *id_loc; 741 rcache->handle = *id_loc;
556 742
557 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc, 743 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
558 res, &node); 744 res, &node);
559 if (unlikely(ret != 0)) 745 if (unlikely(ret != 0))
560 goto out_no_reloc; 746 goto out_no_reloc;
@@ -573,6 +759,46 @@ out_no_reloc:
573} 759}
574 760
575/** 761/**
762 * vmw_rebind_dx_query - Rebind DX query associated with the context
763 *
764 * @ctx_res: context the query belongs to
765 *
766 * This function assumes binding_mutex is held.
767 */
768static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
769{
770 struct vmw_private *dev_priv = ctx_res->dev_priv;
771 struct vmw_dma_buffer *dx_query_mob;
772 struct {
773 SVGA3dCmdHeader header;
774 SVGA3dCmdDXBindAllQuery body;
775 } *cmd;
776
777
778 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
779
780 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
781 return 0;
782
783 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
784
785 if (cmd == NULL) {
786 DRM_ERROR("Failed to rebind queries.\n");
787 return -ENOMEM;
788 }
789
790 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
791 cmd->header.size = sizeof(cmd->body);
792 cmd->body.cid = ctx_res->id;
793 cmd->body.mobid = dx_query_mob->base.mem.start;
794 vmw_fifo_commit(dev_priv, sizeof(*cmd));
795
796 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
797
798 return 0;
799}
800
801/**
576 * vmw_rebind_contexts - Rebind all resources previously bound to 802 * vmw_rebind_contexts - Rebind all resources previously bound to
577 * referenced contexts. 803 * referenced contexts.
578 * 804 *
@@ -589,12 +815,80 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
589 if (unlikely(!val->staged_bindings)) 815 if (unlikely(!val->staged_bindings))
590 break; 816 break;
591 817
592 ret = vmw_context_rebind_all(val->res); 818 ret = vmw_binding_rebind_all
819 (vmw_context_binding_state(val->res));
593 if (unlikely(ret != 0)) { 820 if (unlikely(ret != 0)) {
594 if (ret != -ERESTARTSYS) 821 if (ret != -ERESTARTSYS)
595 DRM_ERROR("Failed to rebind context.\n"); 822 DRM_ERROR("Failed to rebind context.\n");
596 return ret; 823 return ret;
597 } 824 }
825
826 ret = vmw_rebind_all_dx_query(val->res);
827 if (ret != 0)
828 return ret;
829 }
830
831 return 0;
832}
833
834/**
835 * vmw_view_bindings_add - Add an array of view bindings to a context
836 * binding state tracker.
837 *
838 * @sw_context: The execbuf state used for this command.
839 * @view_type: View type for the bindings.
840 * @binding_type: Binding type for the bindings.
841 * @shader_slot: The shader slot to user for the bindings.
842 * @view_ids: Array of view ids to be bound.
843 * @num_views: Number of view ids in @view_ids.
844 * @first_slot: The binding slot to be used for the first view id in @view_ids.
845 */
846static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
847 enum vmw_view_type view_type,
848 enum vmw_ctx_binding_type binding_type,
849 uint32 shader_slot,
850 uint32 view_ids[], u32 num_views,
851 u32 first_slot)
852{
853 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
854 struct vmw_cmdbuf_res_manager *man;
855 u32 i;
856 int ret;
857
858 if (!ctx_node) {
859 DRM_ERROR("DX Context not set.\n");
860 return -EINVAL;
861 }
862
863 man = sw_context->man;
864 for (i = 0; i < num_views; ++i) {
865 struct vmw_ctx_bindinfo_view binding;
866 struct vmw_resource *view = NULL;
867
868 if (view_ids[i] != SVGA3D_INVALID_ID) {
869 view = vmw_view_lookup(man, view_type, view_ids[i]);
870 if (IS_ERR(view)) {
871 DRM_ERROR("View not found.\n");
872 return PTR_ERR(view);
873 }
874
875 ret = vmw_view_res_val_add(sw_context, view);
876 if (ret) {
877 DRM_ERROR("Could not add view to "
878 "validation list.\n");
879 vmw_resource_unreference(&view);
880 return ret;
881 }
882 }
883 binding.bi.ctx = ctx_node->res;
884 binding.bi.res = view;
885 binding.bi.bt = binding_type;
886 binding.shader_slot = shader_slot;
887 binding.slot = first_slot + i;
888 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
889 shader_slot, binding.slot);
890 if (view)
891 vmw_resource_unreference(&view);
598 } 892 }
599 893
600 return 0; 894 return 0;
@@ -638,6 +932,12 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
638 932
639 cmd = container_of(header, struct vmw_sid_cmd, header); 933 cmd = container_of(header, struct vmw_sid_cmd, header);
640 934
935 if (cmd->body.type >= SVGA3D_RT_MAX) {
936 DRM_ERROR("Illegal render target type %u.\n",
937 (unsigned) cmd->body.type);
938 return -EINVAL;
939 }
940
641 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 941 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
642 user_context_converter, &cmd->body.cid, 942 user_context_converter, &cmd->body.cid,
643 &ctx_node); 943 &ctx_node);
@@ -651,13 +951,14 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
651 return ret; 951 return ret;
652 952
653 if (dev_priv->has_mob) { 953 if (dev_priv->has_mob) {
654 struct vmw_ctx_bindinfo bi; 954 struct vmw_ctx_bindinfo_view binding;
655 955
656 bi.ctx = ctx_node->res; 956 binding.bi.ctx = ctx_node->res;
657 bi.res = res_node ? res_node->res : NULL; 957 binding.bi.res = res_node ? res_node->res : NULL;
658 bi.bt = vmw_ctx_binding_rt; 958 binding.bi.bt = vmw_ctx_binding_rt;
659 bi.i1.rt_type = cmd->body.type; 959 binding.slot = cmd->body.type;
660 return vmw_context_binding_add(ctx_node->staged_bindings, &bi); 960 vmw_binding_add(ctx_node->staged_bindings,
961 &binding.bi, 0, binding.slot);
661 } 962 }
662 963
663 return 0; 964 return 0;
@@ -674,16 +975,62 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
674 int ret; 975 int ret;
675 976
676 cmd = container_of(header, struct vmw_sid_cmd, header); 977 cmd = container_of(header, struct vmw_sid_cmd, header);
978
677 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 979 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
678 user_surface_converter, 980 user_surface_converter,
679 &cmd->body.src.sid, NULL); 981 &cmd->body.src.sid, NULL);
680 if (unlikely(ret != 0)) 982 if (ret)
681 return ret; 983 return ret;
984
682 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 985 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
683 user_surface_converter, 986 user_surface_converter,
684 &cmd->body.dest.sid, NULL); 987 &cmd->body.dest.sid, NULL);
685} 988}
686 989
990static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
991 struct vmw_sw_context *sw_context,
992 SVGA3dCmdHeader *header)
993{
994 struct {
995 SVGA3dCmdHeader header;
996 SVGA3dCmdDXBufferCopy body;
997 } *cmd;
998 int ret;
999
1000 cmd = container_of(header, typeof(*cmd), header);
1001 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1002 user_surface_converter,
1003 &cmd->body.src, NULL);
1004 if (ret != 0)
1005 return ret;
1006
1007 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1008 user_surface_converter,
1009 &cmd->body.dest, NULL);
1010}
1011
1012static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1013 struct vmw_sw_context *sw_context,
1014 SVGA3dCmdHeader *header)
1015{
1016 struct {
1017 SVGA3dCmdHeader header;
1018 SVGA3dCmdDXPredCopyRegion body;
1019 } *cmd;
1020 int ret;
1021
1022 cmd = container_of(header, typeof(*cmd), header);
1023 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1024 user_surface_converter,
1025 &cmd->body.srcSid, NULL);
1026 if (ret != 0)
1027 return ret;
1028
1029 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1030 user_surface_converter,
1031 &cmd->body.dstSid, NULL);
1032}
1033
687static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, 1034static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
688 struct vmw_sw_context *sw_context, 1035 struct vmw_sw_context *sw_context,
689 SVGA3dCmdHeader *header) 1036 SVGA3dCmdHeader *header)
@@ -752,7 +1099,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
752 * command batch. 1099 * command batch.
753 */ 1100 */
754static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, 1101static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
755 struct ttm_buffer_object *new_query_bo, 1102 struct vmw_dma_buffer *new_query_bo,
756 struct vmw_sw_context *sw_context) 1103 struct vmw_sw_context *sw_context)
757{ 1104{
758 struct vmw_res_cache_entry *ctx_entry = 1105 struct vmw_res_cache_entry *ctx_entry =
@@ -764,7 +1111,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
764 1111
765 if (unlikely(new_query_bo != sw_context->cur_query_bo)) { 1112 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
766 1113
767 if (unlikely(new_query_bo->num_pages > 4)) { 1114 if (unlikely(new_query_bo->base.num_pages > 4)) {
768 DRM_ERROR("Query buffer too large.\n"); 1115 DRM_ERROR("Query buffer too large.\n");
769 return -EINVAL; 1116 return -EINVAL;
770 } 1117 }
@@ -833,12 +1180,12 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
833 1180
834 if (dev_priv->pinned_bo != sw_context->cur_query_bo) { 1181 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
835 if (dev_priv->pinned_bo) { 1182 if (dev_priv->pinned_bo) {
836 vmw_bo_pin(dev_priv->pinned_bo, false); 1183 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
837 ttm_bo_unref(&dev_priv->pinned_bo); 1184 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
838 } 1185 }
839 1186
840 if (!sw_context->needs_post_query_barrier) { 1187 if (!sw_context->needs_post_query_barrier) {
841 vmw_bo_pin(sw_context->cur_query_bo, true); 1188 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
842 1189
843 /* 1190 /*
844 * We pin also the dummy_query_bo buffer so that we 1191 * We pin also the dummy_query_bo buffer so that we
@@ -846,14 +1193,17 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
846 * dummy queries in context destroy paths. 1193 * dummy queries in context destroy paths.
847 */ 1194 */
848 1195
849 vmw_bo_pin(dev_priv->dummy_query_bo, true); 1196 if (!dev_priv->dummy_query_bo_pinned) {
850 dev_priv->dummy_query_bo_pinned = true; 1197 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1198 true);
1199 dev_priv->dummy_query_bo_pinned = true;
1200 }
851 1201
852 BUG_ON(sw_context->last_query_ctx == NULL); 1202 BUG_ON(sw_context->last_query_ctx == NULL);
853 dev_priv->query_cid = sw_context->last_query_ctx->id; 1203 dev_priv->query_cid = sw_context->last_query_ctx->id;
854 dev_priv->query_cid_valid = true; 1204 dev_priv->query_cid_valid = true;
855 dev_priv->pinned_bo = 1205 dev_priv->pinned_bo =
856 ttm_bo_reference(sw_context->cur_query_bo); 1206 vmw_dmabuf_reference(sw_context->cur_query_bo);
857 } 1207 }
858 } 1208 }
859} 1209}
@@ -882,7 +1232,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
882 struct vmw_dma_buffer **vmw_bo_p) 1232 struct vmw_dma_buffer **vmw_bo_p)
883{ 1233{
884 struct vmw_dma_buffer *vmw_bo = NULL; 1234 struct vmw_dma_buffer *vmw_bo = NULL;
885 struct ttm_buffer_object *bo;
886 uint32_t handle = *id; 1235 uint32_t handle = *id;
887 struct vmw_relocation *reloc; 1236 struct vmw_relocation *reloc;
888 int ret; 1237 int ret;
@@ -893,7 +1242,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
893 ret = -EINVAL; 1242 ret = -EINVAL;
894 goto out_no_reloc; 1243 goto out_no_reloc;
895 } 1244 }
896 bo = &vmw_bo->base;
897 1245
898 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { 1246 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
899 DRM_ERROR("Max number relocations per submission" 1247 DRM_ERROR("Max number relocations per submission"
@@ -906,7 +1254,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
906 reloc->mob_loc = id; 1254 reloc->mob_loc = id;
907 reloc->location = NULL; 1255 reloc->location = NULL;
908 1256
909 ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); 1257 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
910 if (unlikely(ret != 0)) 1258 if (unlikely(ret != 0))
911 goto out_no_reloc; 1259 goto out_no_reloc;
912 1260
@@ -944,7 +1292,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
944 struct vmw_dma_buffer **vmw_bo_p) 1292 struct vmw_dma_buffer **vmw_bo_p)
945{ 1293{
946 struct vmw_dma_buffer *vmw_bo = NULL; 1294 struct vmw_dma_buffer *vmw_bo = NULL;
947 struct ttm_buffer_object *bo;
948 uint32_t handle = ptr->gmrId; 1295 uint32_t handle = ptr->gmrId;
949 struct vmw_relocation *reloc; 1296 struct vmw_relocation *reloc;
950 int ret; 1297 int ret;
@@ -955,7 +1302,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
955 ret = -EINVAL; 1302 ret = -EINVAL;
956 goto out_no_reloc; 1303 goto out_no_reloc;
957 } 1304 }
958 bo = &vmw_bo->base;
959 1305
960 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { 1306 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
961 DRM_ERROR("Max number relocations per submission" 1307 DRM_ERROR("Max number relocations per submission"
@@ -967,7 +1313,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
967 reloc = &sw_context->relocs[sw_context->cur_reloc++]; 1313 reloc = &sw_context->relocs[sw_context->cur_reloc++];
968 reloc->location = ptr; 1314 reloc->location = ptr;
969 1315
970 ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); 1316 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
971 if (unlikely(ret != 0)) 1317 if (unlikely(ret != 0))
972 goto out_no_reloc; 1318 goto out_no_reloc;
973 1319
@@ -980,6 +1326,98 @@ out_no_reloc:
980 return ret; 1326 return ret;
981} 1327}
982 1328
1329
1330
1331/**
1332 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1333 *
1334 * @dev_priv: Pointer to a device private struct.
1335 * @sw_context: The software context used for this command submission.
1336 * @header: Pointer to the command header in the command stream.
1337 *
1338 * This function adds the new query into the query COTABLE
1339 */
1340static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1341 struct vmw_sw_context *sw_context,
1342 SVGA3dCmdHeader *header)
1343{
1344 struct vmw_dx_define_query_cmd {
1345 SVGA3dCmdHeader header;
1346 SVGA3dCmdDXDefineQuery q;
1347 } *cmd;
1348
1349 int ret;
1350 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1351 struct vmw_resource *cotable_res;
1352
1353
1354 if (ctx_node == NULL) {
1355 DRM_ERROR("DX Context not set for query.\n");
1356 return -EINVAL;
1357 }
1358
1359 cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1360
1361 if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
1362 cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1363 return -EINVAL;
1364
1365 cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1366 ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1367 vmw_resource_unreference(&cotable_res);
1368
1369 return ret;
1370}
1371
1372
1373
1374/**
1375 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1376 *
1377 * @dev_priv: Pointer to a device private struct.
1378 * @sw_context: The software context used for this command submission.
1379 * @header: Pointer to the command header in the command stream.
1380 *
1381 * The query bind operation will eventually associate the query ID
1382 * with its backing MOB. In this function, we take the user mode
1383 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1384 * kernel mode equivalent.
1385 */
1386static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1387 struct vmw_sw_context *sw_context,
1388 SVGA3dCmdHeader *header)
1389{
1390 struct vmw_dx_bind_query_cmd {
1391 SVGA3dCmdHeader header;
1392 SVGA3dCmdDXBindQuery q;
1393 } *cmd;
1394
1395 struct vmw_dma_buffer *vmw_bo;
1396 int ret;
1397
1398
1399 cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1400
1401 /*
1402 * Look up the buffer pointed to by q.mobid, put it on the relocation
1403 * list so its kernel mode MOB ID can be filled in later
1404 */
1405 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1406 &vmw_bo);
1407
1408 if (ret != 0)
1409 return ret;
1410
1411 sw_context->dx_query_mob = vmw_bo;
1412 sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1413
1414 vmw_dmabuf_unreference(&vmw_bo);
1415
1416 return ret;
1417}
1418
1419
1420
983/** 1421/**
984 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. 1422 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
985 * 1423 *
@@ -1074,7 +1512,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1074 if (unlikely(ret != 0)) 1512 if (unlikely(ret != 0))
1075 return ret; 1513 return ret;
1076 1514
1077 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); 1515 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1078 1516
1079 vmw_dmabuf_unreference(&vmw_bo); 1517 vmw_dmabuf_unreference(&vmw_bo);
1080 return ret; 1518 return ret;
@@ -1128,7 +1566,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1128 if (unlikely(ret != 0)) 1566 if (unlikely(ret != 0))
1129 return ret; 1567 return ret;
1130 1568
1131 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); 1569 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1132 1570
1133 vmw_dmabuf_unreference(&vmw_bo); 1571 vmw_dmabuf_unreference(&vmw_bo);
1134 return ret; 1572 return ret;
@@ -1363,6 +1801,12 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1363 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) 1801 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1364 continue; 1802 continue;
1365 1803
1804 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1805 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1806 (unsigned) cur_state->stage);
1807 return -EINVAL;
1808 }
1809
1366 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1810 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1367 user_surface_converter, 1811 user_surface_converter,
1368 &cur_state->value, &res_node); 1812 &cur_state->value, &res_node);
@@ -1370,14 +1814,14 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1370 return ret; 1814 return ret;
1371 1815
1372 if (dev_priv->has_mob) { 1816 if (dev_priv->has_mob) {
1373 struct vmw_ctx_bindinfo bi; 1817 struct vmw_ctx_bindinfo_tex binding;
1374 1818
1375 bi.ctx = ctx_node->res; 1819 binding.bi.ctx = ctx_node->res;
1376 bi.res = res_node ? res_node->res : NULL; 1820 binding.bi.res = res_node ? res_node->res : NULL;
1377 bi.bt = vmw_ctx_binding_tex; 1821 binding.bi.bt = vmw_ctx_binding_tex;
1378 bi.i1.texture_stage = cur_state->stage; 1822 binding.texture_stage = cur_state->stage;
1379 vmw_context_binding_add(ctx_node->staged_bindings, 1823 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1380 &bi); 1824 0, binding.texture_stage);
1381 } 1825 }
1382 } 1826 }
1383 1827
@@ -1407,6 +1851,47 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1407 return ret; 1851 return ret;
1408} 1852}
1409 1853
1854
1855/**
1856 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1857 * switching
1858 *
1859 * @dev_priv: Pointer to a device private struct.
1860 * @sw_context: The software context being used for this batch.
1861 * @val_node: The validation node representing the resource.
1862 * @buf_id: Pointer to the user-space backup buffer handle in the command
1863 * stream.
1864 * @backup_offset: Offset of backup into MOB.
1865 *
1866 * This function prepares for registering a switch of backup buffers
1867 * in the resource metadata just prior to unreserving. It's basically a wrapper
1868 * around vmw_cmd_res_switch_backup with a different interface.
1869 */
1870static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1871 struct vmw_sw_context *sw_context,
1872 struct vmw_resource_val_node *val_node,
1873 uint32_t *buf_id,
1874 unsigned long backup_offset)
1875{
1876 struct vmw_dma_buffer *dma_buf;
1877 int ret;
1878
1879 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1880 if (ret)
1881 return ret;
1882
1883 val_node->switching_backup = true;
1884 if (val_node->first_usage)
1885 val_node->no_buffer_needed = true;
1886
1887 vmw_dmabuf_unreference(&val_node->new_backup);
1888 val_node->new_backup = dma_buf;
1889 val_node->new_backup_offset = backup_offset;
1890
1891 return 0;
1892}
1893
1894
1410/** 1895/**
1411 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching 1896 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1412 * 1897 *
@@ -1420,7 +1905,8 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1420 * @backup_offset: Offset of backup into MOB. 1905 * @backup_offset: Offset of backup into MOB.
1421 * 1906 *
1422 * This function prepares for registering a switch of backup buffers 1907 * This function prepares for registering a switch of backup buffers
1423 * in the resource metadata just prior to unreserving. 1908 * in the resource metadata just prior to unreserving. It's basically a wrapper
1909 * around vmw_cmd_res_switch_backup with a different interface.
1424 */ 1910 */
1425static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, 1911static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1426 struct vmw_sw_context *sw_context, 1912 struct vmw_sw_context *sw_context,
@@ -1431,27 +1917,16 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1431 uint32_t *buf_id, 1917 uint32_t *buf_id,
1432 unsigned long backup_offset) 1918 unsigned long backup_offset)
1433{ 1919{
1434 int ret;
1435 struct vmw_dma_buffer *dma_buf;
1436 struct vmw_resource_val_node *val_node; 1920 struct vmw_resource_val_node *val_node;
1921 int ret;
1437 1922
1438 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, 1923 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1439 converter, res_id, &val_node); 1924 converter, res_id, &val_node);
1440 if (unlikely(ret != 0)) 1925 if (ret)
1441 return ret;
1442
1443 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1444 if (unlikely(ret != 0))
1445 return ret; 1926 return ret;
1446 1927
1447 if (val_node->first_usage) 1928 return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1448 val_node->no_buffer_needed = true; 1929 buf_id, backup_offset);
1449
1450 vmw_dmabuf_unreference(&val_node->new_backup);
1451 val_node->new_backup = dma_buf;
1452 val_node->new_backup_offset = backup_offset;
1453
1454 return 0;
1455} 1930}
1456 1931
1457/** 1932/**
@@ -1703,10 +2178,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1703 if (unlikely(!dev_priv->has_mob)) 2178 if (unlikely(!dev_priv->has_mob))
1704 return 0; 2179 return 0;
1705 2180
1706 ret = vmw_compat_shader_remove(vmw_context_res_man(val->res), 2181 ret = vmw_shader_remove(vmw_context_res_man(val->res),
1707 cmd->body.shid, 2182 cmd->body.shid,
1708 cmd->body.type, 2183 cmd->body.type,
1709 &sw_context->staged_cmd_res); 2184 &sw_context->staged_cmd_res);
1710 if (unlikely(ret != 0)) 2185 if (unlikely(ret != 0))
1711 return ret; 2186 return ret;
1712 2187
@@ -1734,13 +2209,19 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1734 SVGA3dCmdSetShader body; 2209 SVGA3dCmdSetShader body;
1735 } *cmd; 2210 } *cmd;
1736 struct vmw_resource_val_node *ctx_node, *res_node = NULL; 2211 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
1737 struct vmw_ctx_bindinfo bi; 2212 struct vmw_ctx_bindinfo_shader binding;
1738 struct vmw_resource *res = NULL; 2213 struct vmw_resource *res = NULL;
1739 int ret; 2214 int ret;
1740 2215
1741 cmd = container_of(header, struct vmw_set_shader_cmd, 2216 cmd = container_of(header, struct vmw_set_shader_cmd,
1742 header); 2217 header);
1743 2218
2219 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2220 DRM_ERROR("Illegal shader type %u.\n",
2221 (unsigned) cmd->body.type);
2222 return -EINVAL;
2223 }
2224
1744 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2225 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1745 user_context_converter, &cmd->body.cid, 2226 user_context_converter, &cmd->body.cid,
1746 &ctx_node); 2227 &ctx_node);
@@ -1751,14 +2232,12 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1751 return 0; 2232 return 0;
1752 2233
1753 if (cmd->body.shid != SVGA3D_INVALID_ID) { 2234 if (cmd->body.shid != SVGA3D_INVALID_ID) {
1754 res = vmw_compat_shader_lookup 2235 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
1755 (vmw_context_res_man(ctx_node->res), 2236 cmd->body.shid,
1756 cmd->body.shid, 2237 cmd->body.type);
1757 cmd->body.type);
1758 2238
1759 if (!IS_ERR(res)) { 2239 if (!IS_ERR(res)) {
1760 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, 2240 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
1761 vmw_res_shader,
1762 &cmd->body.shid, res, 2241 &cmd->body.shid, res,
1763 &res_node); 2242 &res_node);
1764 vmw_resource_unreference(&res); 2243 vmw_resource_unreference(&res);
@@ -1776,11 +2255,13 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1776 return ret; 2255 return ret;
1777 } 2256 }
1778 2257
1779 bi.ctx = ctx_node->res; 2258 binding.bi.ctx = ctx_node->res;
1780 bi.res = res_node ? res_node->res : NULL; 2259 binding.bi.res = res_node ? res_node->res : NULL;
1781 bi.bt = vmw_ctx_binding_shader; 2260 binding.bi.bt = vmw_ctx_binding_shader;
1782 bi.i1.shader_type = cmd->body.type; 2261 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
1783 return vmw_context_binding_add(ctx_node->staged_bindings, &bi); 2262 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2263 binding.shader_slot, 0);
2264 return 0;
1784} 2265}
1785 2266
1786/** 2267/**
@@ -1842,6 +2323,690 @@ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1842 cmd->body.offsetInBytes); 2323 cmd->body.offsetInBytes);
1843} 2324}
1844 2325
2326/**
2327 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2328 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2329 *
2330 * @dev_priv: Pointer to a device private struct.
2331 * @sw_context: The software context being used for this batch.
2332 * @header: Pointer to the command header in the command stream.
2333 */
2334static int
2335vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2336 struct vmw_sw_context *sw_context,
2337 SVGA3dCmdHeader *header)
2338{
2339 struct {
2340 SVGA3dCmdHeader header;
2341 SVGA3dCmdDXSetSingleConstantBuffer body;
2342 } *cmd;
2343 struct vmw_resource_val_node *res_node = NULL;
2344 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2345 struct vmw_ctx_bindinfo_cb binding;
2346 int ret;
2347
2348 if (unlikely(ctx_node == NULL)) {
2349 DRM_ERROR("DX Context not set.\n");
2350 return -EINVAL;
2351 }
2352
2353 cmd = container_of(header, typeof(*cmd), header);
2354 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2355 user_surface_converter,
2356 &cmd->body.sid, &res_node);
2357 if (unlikely(ret != 0))
2358 return ret;
2359
2360 binding.bi.ctx = ctx_node->res;
2361 binding.bi.res = res_node ? res_node->res : NULL;
2362 binding.bi.bt = vmw_ctx_binding_cb;
2363 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2364 binding.offset = cmd->body.offsetInBytes;
2365 binding.size = cmd->body.sizeInBytes;
2366 binding.slot = cmd->body.slot;
2367
2368 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2369 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2370 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2371 (unsigned) cmd->body.type,
2372 (unsigned) binding.slot);
2373 return -EINVAL;
2374 }
2375
2376 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2377 binding.shader_slot, binding.slot);
2378
2379 return 0;
2380}
2381
2382/**
2383 * vmw_cmd_dx_set_shader_res - Validate an
2384 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2385 *
2386 * @dev_priv: Pointer to a device private struct.
2387 * @sw_context: The software context being used for this batch.
2388 * @header: Pointer to the command header in the command stream.
2389 */
2390static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2391 struct vmw_sw_context *sw_context,
2392 SVGA3dCmdHeader *header)
2393{
2394 struct {
2395 SVGA3dCmdHeader header;
2396 SVGA3dCmdDXSetShaderResources body;
2397 } *cmd = container_of(header, typeof(*cmd), header);
2398 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2399 sizeof(SVGA3dShaderResourceViewId);
2400
2401 if ((u64) cmd->body.startView + (u64) num_sr_view >
2402 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2403 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2404 DRM_ERROR("Invalid shader binding.\n");
2405 return -EINVAL;
2406 }
2407
2408 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2409 vmw_ctx_binding_sr,
2410 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2411 (void *) &cmd[1], num_sr_view,
2412 cmd->body.startView);
2413}
2414
2415/**
2416 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2417 * command
2418 *
2419 * @dev_priv: Pointer to a device private struct.
2420 * @sw_context: The software context being used for this batch.
2421 * @header: Pointer to the command header in the command stream.
2422 */
2423static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2424 struct vmw_sw_context *sw_context,
2425 SVGA3dCmdHeader *header)
2426{
2427 struct {
2428 SVGA3dCmdHeader header;
2429 SVGA3dCmdDXSetShader body;
2430 } *cmd;
2431 struct vmw_resource *res = NULL;
2432 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2433 struct vmw_ctx_bindinfo_shader binding;
2434 int ret = 0;
2435
2436 if (unlikely(ctx_node == NULL)) {
2437 DRM_ERROR("DX Context not set.\n");
2438 return -EINVAL;
2439 }
2440
2441 cmd = container_of(header, typeof(*cmd), header);
2442
2443 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2444 DRM_ERROR("Illegal shader type %u.\n",
2445 (unsigned) cmd->body.type);
2446 return -EINVAL;
2447 }
2448
2449 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2450 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2451 if (IS_ERR(res)) {
2452 DRM_ERROR("Could not find shader for binding.\n");
2453 return PTR_ERR(res);
2454 }
2455
2456 ret = vmw_resource_val_add(sw_context, res, NULL);
2457 if (ret)
2458 goto out_unref;
2459 }
2460
2461 binding.bi.ctx = ctx_node->res;
2462 binding.bi.res = res;
2463 binding.bi.bt = vmw_ctx_binding_dx_shader;
2464 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2465
2466 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2467 binding.shader_slot, 0);
2468out_unref:
2469 if (res)
2470 vmw_resource_unreference(&res);
2471
2472 return ret;
2473}
2474
2475/**
2476 * vmw_cmd_dx_set_vertex_buffers - Validates an
2477 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2478 *
2479 * @dev_priv: Pointer to a device private struct.
2480 * @sw_context: The software context being used for this batch.
2481 * @header: Pointer to the command header in the command stream.
2482 */
2483static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2484 struct vmw_sw_context *sw_context,
2485 SVGA3dCmdHeader *header)
2486{
2487 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2488 struct vmw_ctx_bindinfo_vb binding;
2489 struct vmw_resource_val_node *res_node;
2490 struct {
2491 SVGA3dCmdHeader header;
2492 SVGA3dCmdDXSetVertexBuffers body;
2493 SVGA3dVertexBuffer buf[];
2494 } *cmd;
2495 int i, ret, num;
2496
2497 if (unlikely(ctx_node == NULL)) {
2498 DRM_ERROR("DX Context not set.\n");
2499 return -EINVAL;
2500 }
2501
2502 cmd = container_of(header, typeof(*cmd), header);
2503 num = (cmd->header.size - sizeof(cmd->body)) /
2504 sizeof(SVGA3dVertexBuffer);
2505 if ((u64)num + (u64)cmd->body.startBuffer >
2506 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2507 DRM_ERROR("Invalid number of vertex buffers.\n");
2508 return -EINVAL;
2509 }
2510
2511 for (i = 0; i < num; i++) {
2512 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2513 user_surface_converter,
2514 &cmd->buf[i].sid, &res_node);
2515 if (unlikely(ret != 0))
2516 return ret;
2517
2518 binding.bi.ctx = ctx_node->res;
2519 binding.bi.bt = vmw_ctx_binding_vb;
2520 binding.bi.res = ((res_node) ? res_node->res : NULL);
2521 binding.offset = cmd->buf[i].offset;
2522 binding.stride = cmd->buf[i].stride;
2523 binding.slot = i + cmd->body.startBuffer;
2524
2525 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2526 0, binding.slot);
2527 }
2528
2529 return 0;
2530}
2531
2532/**
2533 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2534 * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2535 *
2536 * @dev_priv: Pointer to a device private struct.
2537 * @sw_context: The software context being used for this batch.
2538 * @header: Pointer to the command header in the command stream.
2539 */
2540static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2541 struct vmw_sw_context *sw_context,
2542 SVGA3dCmdHeader *header)
2543{
2544 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2545 struct vmw_ctx_bindinfo_ib binding;
2546 struct vmw_resource_val_node *res_node;
2547 struct {
2548 SVGA3dCmdHeader header;
2549 SVGA3dCmdDXSetIndexBuffer body;
2550 } *cmd;
2551 int ret;
2552
2553 if (unlikely(ctx_node == NULL)) {
2554 DRM_ERROR("DX Context not set.\n");
2555 return -EINVAL;
2556 }
2557
2558 cmd = container_of(header, typeof(*cmd), header);
2559 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2560 user_surface_converter,
2561 &cmd->body.sid, &res_node);
2562 if (unlikely(ret != 0))
2563 return ret;
2564
2565 binding.bi.ctx = ctx_node->res;
2566 binding.bi.res = ((res_node) ? res_node->res : NULL);
2567 binding.bi.bt = vmw_ctx_binding_ib;
2568 binding.offset = cmd->body.offset;
2569 binding.format = cmd->body.format;
2570
2571 vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2572
2573 return 0;
2574}
2575
2576/**
2577 * vmw_cmd_dx_set_rendertarget - Validate an
2578 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2579 *
2580 * @dev_priv: Pointer to a device private struct.
2581 * @sw_context: The software context being used for this batch.
2582 * @header: Pointer to the command header in the command stream.
2583 */
2584static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2585 struct vmw_sw_context *sw_context,
2586 SVGA3dCmdHeader *header)
2587{
2588 struct {
2589 SVGA3dCmdHeader header;
2590 SVGA3dCmdDXSetRenderTargets body;
2591 } *cmd = container_of(header, typeof(*cmd), header);
2592 int ret;
2593 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2594 sizeof(SVGA3dRenderTargetViewId);
2595
2596 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2597 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2598 return -EINVAL;
2599 }
2600
2601 ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2602 vmw_ctx_binding_ds, 0,
2603 &cmd->body.depthStencilViewId, 1, 0);
2604 if (ret)
2605 return ret;
2606
2607 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2608 vmw_ctx_binding_dx_rt, 0,
2609 (void *)&cmd[1], num_rt_view, 0);
2610}
2611
2612/**
2613 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2614 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2615 *
2616 * @dev_priv: Pointer to a device private struct.
2617 * @sw_context: The software context being used for this batch.
2618 * @header: Pointer to the command header in the command stream.
2619 */
2620static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2621 struct vmw_sw_context *sw_context,
2622 SVGA3dCmdHeader *header)
2623{
2624 struct {
2625 SVGA3dCmdHeader header;
2626 SVGA3dCmdDXClearRenderTargetView body;
2627 } *cmd = container_of(header, typeof(*cmd), header);
2628
2629 return vmw_view_id_val_add(sw_context, vmw_view_rt,
2630 cmd->body.renderTargetViewId);
2631}
2632
2633/**
2634 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2635 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2636 *
2637 * @dev_priv: Pointer to a device private struct.
2638 * @sw_context: The software context being used for this batch.
2639 * @header: Pointer to the command header in the command stream.
2640 */
2641static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2642 struct vmw_sw_context *sw_context,
2643 SVGA3dCmdHeader *header)
2644{
2645 struct {
2646 SVGA3dCmdHeader header;
2647 SVGA3dCmdDXClearDepthStencilView body;
2648 } *cmd = container_of(header, typeof(*cmd), header);
2649
2650 return vmw_view_id_val_add(sw_context, vmw_view_ds,
2651 cmd->body.depthStencilViewId);
2652}
2653
2654static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2655 struct vmw_sw_context *sw_context,
2656 SVGA3dCmdHeader *header)
2657{
2658 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2659 struct vmw_resource_val_node *srf_node;
2660 struct vmw_resource *res;
2661 enum vmw_view_type view_type;
2662 int ret;
2663 /*
2664 * This is based on the fact that all affected define commands have
2665 * the same initial command body layout.
2666 */
2667 struct {
2668 SVGA3dCmdHeader header;
2669 uint32 defined_id;
2670 uint32 sid;
2671 } *cmd;
2672
2673 if (unlikely(ctx_node == NULL)) {
2674 DRM_ERROR("DX Context not set.\n");
2675 return -EINVAL;
2676 }
2677
2678 view_type = vmw_view_cmd_to_type(header->id);
2679 cmd = container_of(header, typeof(*cmd), header);
2680 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2681 user_surface_converter,
2682 &cmd->sid, &srf_node);
2683 if (unlikely(ret != 0))
2684 return ret;
2685
2686 res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2687 ret = vmw_cotable_notify(res, cmd->defined_id);
2688 vmw_resource_unreference(&res);
2689 if (unlikely(ret != 0))
2690 return ret;
2691
2692 return vmw_view_add(sw_context->man,
2693 ctx_node->res,
2694 srf_node->res,
2695 view_type,
2696 cmd->defined_id,
2697 header,
2698 header->size + sizeof(*header),
2699 &sw_context->staged_cmd_res);
2700}
2701
2702/**
2703 * vmw_cmd_dx_set_so_targets - Validate an
2704 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2705 *
2706 * @dev_priv: Pointer to a device private struct.
2707 * @sw_context: The software context being used for this batch.
2708 * @header: Pointer to the command header in the command stream.
2709 */
2710static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2711 struct vmw_sw_context *sw_context,
2712 SVGA3dCmdHeader *header)
2713{
2714 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2715 struct vmw_ctx_bindinfo_so binding;
2716 struct vmw_resource_val_node *res_node;
2717 struct {
2718 SVGA3dCmdHeader header;
2719 SVGA3dCmdDXSetSOTargets body;
2720 SVGA3dSoTarget targets[];
2721 } *cmd;
2722 int i, ret, num;
2723
2724 if (unlikely(ctx_node == NULL)) {
2725 DRM_ERROR("DX Context not set.\n");
2726 return -EINVAL;
2727 }
2728
2729 cmd = container_of(header, typeof(*cmd), header);
2730 num = (cmd->header.size - sizeof(cmd->body)) /
2731 sizeof(SVGA3dSoTarget);
2732
2733 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2734 DRM_ERROR("Invalid DX SO binding.\n");
2735 return -EINVAL;
2736 }
2737
2738 for (i = 0; i < num; i++) {
2739 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2740 user_surface_converter,
2741 &cmd->targets[i].sid, &res_node);
2742 if (unlikely(ret != 0))
2743 return ret;
2744
2745 binding.bi.ctx = ctx_node->res;
2746 binding.bi.res = ((res_node) ? res_node->res : NULL);
2747 binding.bi.bt = vmw_ctx_binding_so,
2748 binding.offset = cmd->targets[i].offset;
2749 binding.size = cmd->targets[i].sizeInBytes;
2750 binding.slot = i;
2751
2752 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2753 0, binding.slot);
2754 }
2755
2756 return 0;
2757}
2758
2759static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2760 struct vmw_sw_context *sw_context,
2761 SVGA3dCmdHeader *header)
2762{
2763 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2764 struct vmw_resource *res;
2765 /*
2766 * This is based on the fact that all affected define commands have
2767 * the same initial command body layout.
2768 */
2769 struct {
2770 SVGA3dCmdHeader header;
2771 uint32 defined_id;
2772 } *cmd;
2773 enum vmw_so_type so_type;
2774 int ret;
2775
2776 if (unlikely(ctx_node == NULL)) {
2777 DRM_ERROR("DX Context not set.\n");
2778 return -EINVAL;
2779 }
2780
2781 so_type = vmw_so_cmd_to_type(header->id);
2782 res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2783 cmd = container_of(header, typeof(*cmd), header);
2784 ret = vmw_cotable_notify(res, cmd->defined_id);
2785 vmw_resource_unreference(&res);
2786
2787 return ret;
2788}
2789
2790/**
2791 * vmw_cmd_dx_check_subresource - Validate an
2792 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2793 *
2794 * @dev_priv: Pointer to a device private struct.
2795 * @sw_context: The software context being used for this batch.
2796 * @header: Pointer to the command header in the command stream.
2797 */
2798static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2799 struct vmw_sw_context *sw_context,
2800 SVGA3dCmdHeader *header)
2801{
2802 struct {
2803 SVGA3dCmdHeader header;
2804 union {
2805 SVGA3dCmdDXReadbackSubResource r_body;
2806 SVGA3dCmdDXInvalidateSubResource i_body;
2807 SVGA3dCmdDXUpdateSubResource u_body;
2808 SVGA3dSurfaceId sid;
2809 };
2810 } *cmd;
2811
2812 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2813 offsetof(typeof(*cmd), sid));
2814 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2815 offsetof(typeof(*cmd), sid));
2816 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2817 offsetof(typeof(*cmd), sid));
2818
2819 cmd = container_of(header, typeof(*cmd), header);
2820
2821 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2822 user_surface_converter,
2823 &cmd->sid, NULL);
2824}
2825
2826static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2827 struct vmw_sw_context *sw_context,
2828 SVGA3dCmdHeader *header)
2829{
2830 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2831
2832 if (unlikely(ctx_node == NULL)) {
2833 DRM_ERROR("DX Context not set.\n");
2834 return -EINVAL;
2835 }
2836
2837 return 0;
2838}
2839
2840/**
2841 * vmw_cmd_dx_view_remove - validate a view remove command and
2842 * schedule the view resource for removal.
2843 *
2844 * @dev_priv: Pointer to a device private struct.
2845 * @sw_context: The software context being used for this batch.
2846 * @header: Pointer to the command header in the command stream.
2847 *
2848 * Check that the view exists, and if it was not created using this
2849 * command batch, make sure it's validated (present in the device) so that
2850 * the remove command will not confuse the device.
2851 */
2852static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2853 struct vmw_sw_context *sw_context,
2854 SVGA3dCmdHeader *header)
2855{
2856 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2857 struct {
2858 SVGA3dCmdHeader header;
2859 union vmw_view_destroy body;
2860 } *cmd = container_of(header, typeof(*cmd), header);
2861 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2862 struct vmw_resource *view;
2863 int ret;
2864
2865 if (!ctx_node) {
2866 DRM_ERROR("DX Context not set.\n");
2867 return -EINVAL;
2868 }
2869
2870 ret = vmw_view_remove(sw_context->man,
2871 cmd->body.view_id, view_type,
2872 &sw_context->staged_cmd_res,
2873 &view);
2874 if (ret || !view)
2875 return ret;
2876
2877 /*
2878 * Add view to the validate list iff it was not created using this
2879 * command batch.
2880 */
2881 return vmw_view_res_val_add(sw_context, view);
2882}
2883
2884/**
2885 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2886 * command
2887 *
2888 * @dev_priv: Pointer to a device private struct.
2889 * @sw_context: The software context being used for this batch.
2890 * @header: Pointer to the command header in the command stream.
2891 */
2892static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2893 struct vmw_sw_context *sw_context,
2894 SVGA3dCmdHeader *header)
2895{
2896 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2897 struct vmw_resource *res;
2898 struct {
2899 SVGA3dCmdHeader header;
2900 SVGA3dCmdDXDefineShader body;
2901 } *cmd = container_of(header, typeof(*cmd), header);
2902 int ret;
2903
2904 if (!ctx_node) {
2905 DRM_ERROR("DX Context not set.\n");
2906 return -EINVAL;
2907 }
2908
2909 res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2910 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2911 vmw_resource_unreference(&res);
2912 if (ret)
2913 return ret;
2914
2915 return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2916 cmd->body.shaderId, cmd->body.type,
2917 &sw_context->staged_cmd_res);
2918}
2919
2920/**
2921 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2922 * command
2923 *
2924 * @dev_priv: Pointer to a device private struct.
2925 * @sw_context: The software context being used for this batch.
2926 * @header: Pointer to the command header in the command stream.
2927 */
2928static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2929 struct vmw_sw_context *sw_context,
2930 SVGA3dCmdHeader *header)
2931{
2932 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2933 struct {
2934 SVGA3dCmdHeader header;
2935 SVGA3dCmdDXDestroyShader body;
2936 } *cmd = container_of(header, typeof(*cmd), header);
2937 int ret;
2938
2939 if (!ctx_node) {
2940 DRM_ERROR("DX Context not set.\n");
2941 return -EINVAL;
2942 }
2943
2944 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2945 &sw_context->staged_cmd_res);
2946 if (ret)
2947 DRM_ERROR("Could not find shader to remove.\n");
2948
2949 return ret;
2950}
2951
2952/**
2953 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2954 * command
2955 *
2956 * @dev_priv: Pointer to a device private struct.
2957 * @sw_context: The software context being used for this batch.
2958 * @header: Pointer to the command header in the command stream.
2959 */
2960static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2961 struct vmw_sw_context *sw_context,
2962 SVGA3dCmdHeader *header)
2963{
2964 struct vmw_resource_val_node *ctx_node;
2965 struct vmw_resource_val_node *res_node;
2966 struct vmw_resource *res;
2967 struct {
2968 SVGA3dCmdHeader header;
2969 SVGA3dCmdDXBindShader body;
2970 } *cmd = container_of(header, typeof(*cmd), header);
2971 int ret;
2972
2973 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2974 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2975 user_context_converter,
2976 &cmd->body.cid, &ctx_node);
2977 if (ret)
2978 return ret;
2979 } else {
2980 ctx_node = sw_context->dx_ctx_node;
2981 if (!ctx_node) {
2982 DRM_ERROR("DX Context not set.\n");
2983 return -EINVAL;
2984 }
2985 }
2986
2987 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2988 cmd->body.shid, 0);
2989 if (IS_ERR(res)) {
2990 DRM_ERROR("Could not find shader to bind.\n");
2991 return PTR_ERR(res);
2992 }
2993
2994 ret = vmw_resource_val_add(sw_context, res, &res_node);
2995 if (ret) {
2996 DRM_ERROR("Error creating resource validation node.\n");
2997 goto out_unref;
2998 }
2999
3000
3001 ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3002 &cmd->body.mobid,
3003 cmd->body.offsetInBytes);
3004out_unref:
3005 vmw_resource_unreference(&res);
3006
3007 return ret;
3008}
3009
1845static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, 3010static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1846 struct vmw_sw_context *sw_context, 3011 struct vmw_sw_context *sw_context,
1847 void *buf, uint32_t *size) 3012 void *buf, uint32_t *size)
@@ -1849,7 +3014,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1849 uint32_t size_remaining = *size; 3014 uint32_t size_remaining = *size;
1850 uint32_t cmd_id; 3015 uint32_t cmd_id;
1851 3016
1852 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); 3017 cmd_id = ((uint32_t *)buf)[0];
1853 switch (cmd_id) { 3018 switch (cmd_id) {
1854 case SVGA_CMD_UPDATE: 3019 case SVGA_CMD_UPDATE:
1855 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); 3020 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
@@ -1980,7 +3145,7 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1980 false, false, true), 3145 false, false, true),
1981 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, 3146 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1982 false, false, true), 3147 false, false, true),
1983 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid, 3148 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
1984 false, false, true), 3149 false, false, true),
1985 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, 3150 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1986 false, false, true), 3151 false, false, true),
@@ -2051,7 +3216,147 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
2051 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, 3216 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2052 false, false, true), 3217 false, false, true),
2053 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, 3218 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
2054 true, false, true) 3219 true, false, true),
3220 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3221 false, false, true),
3222 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3223 false, false, true),
3224 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3225 false, false, true),
3226 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3227 false, false, true),
3228
3229 /*
3230 * DX commands
3231 */
3232 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3233 false, false, true),
3234 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3235 false, false, true),
3236 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3237 false, false, true),
3238 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3239 false, false, true),
3240 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3241 false, false, true),
3242 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3243 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3244 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3245 &vmw_cmd_dx_set_shader_res, true, false, true),
3246 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3247 true, false, true),
3248 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3249 true, false, true),
3250 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3251 true, false, true),
3252 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3253 true, false, true),
3254 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3255 true, false, true),
3256 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3257 &vmw_cmd_dx_cid_check, true, false, true),
3258 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3259 true, false, true),
3260 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3261 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3262 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3263 &vmw_cmd_dx_set_index_buffer, true, false, true),
3264 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3265 &vmw_cmd_dx_set_rendertargets, true, false, true),
3266 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3267 true, false, true),
3268 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3269 &vmw_cmd_dx_cid_check, true, false, true),
3270 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3271 &vmw_cmd_dx_cid_check, true, false, true),
3272 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3273 true, false, true),
3274 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
3275 true, false, true),
3276 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3277 true, false, true),
3278 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3279 &vmw_cmd_ok, true, false, true),
3280 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
3281 true, false, true),
3282 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
3283 true, false, true),
3284 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3285 true, false, true),
3286 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
3287 true, false, true),
3288 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3289 true, false, true),
3290 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3291 true, false, true),
3292 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3293 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3295 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3297 true, false, true),
3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
3299 true, false, true),
3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3301 &vmw_cmd_dx_check_subresource, true, false, true),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3303 &vmw_cmd_dx_check_subresource, true, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3305 &vmw_cmd_dx_check_subresource, true, false, true),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3307 &vmw_cmd_dx_view_define, true, false, true),
3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3309 &vmw_cmd_dx_view_remove, true, false, true),
3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3311 &vmw_cmd_dx_view_define, true, false, true),
3312 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3313 &vmw_cmd_dx_view_remove, true, false, true),
3314 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3315 &vmw_cmd_dx_view_define, true, false, true),
3316 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3317 &vmw_cmd_dx_view_remove, true, false, true),
3318 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3319 &vmw_cmd_dx_so_define, true, false, true),
3320 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3321 &vmw_cmd_dx_cid_check, true, false, true),
3322 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3323 &vmw_cmd_dx_so_define, true, false, true),
3324 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3325 &vmw_cmd_dx_cid_check, true, false, true),
3326 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3327 &vmw_cmd_dx_so_define, true, false, true),
3328 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3329 &vmw_cmd_dx_cid_check, true, false, true),
3330 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3331 &vmw_cmd_dx_so_define, true, false, true),
3332 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3333 &vmw_cmd_dx_cid_check, true, false, true),
3334 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3335 &vmw_cmd_dx_so_define, true, false, true),
3336 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3337 &vmw_cmd_dx_cid_check, true, false, true),
3338 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3339 &vmw_cmd_dx_define_shader, true, false, true),
3340 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3341 &vmw_cmd_dx_destroy_shader, true, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3343 &vmw_cmd_dx_bind_shader, true, false, true),
3344 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3345 &vmw_cmd_dx_so_define, true, false, true),
3346 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3347 &vmw_cmd_dx_cid_check, true, false, true),
3348 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3349 true, false, true),
3350 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3351 &vmw_cmd_dx_set_so_targets, true, false, true),
3352 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3353 &vmw_cmd_dx_cid_check, true, false, true),
3354 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3355 &vmw_cmd_dx_cid_check, true, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3357 &vmw_cmd_buffer_copy_check, true, false, true),
3358 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3359 &vmw_cmd_pred_copy_check, true, false, true),
2055}; 3360};
2056 3361
2057static int vmw_cmd_check(struct vmw_private *dev_priv, 3362static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -2065,14 +3370,14 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
2065 const struct vmw_cmd_entry *entry; 3370 const struct vmw_cmd_entry *entry;
2066 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; 3371 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
2067 3372
2068 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); 3373 cmd_id = ((uint32_t *)buf)[0];
2069 /* Handle any none 3D commands */ 3374 /* Handle any none 3D commands */
2070 if (unlikely(cmd_id < SVGA_CMD_MAX)) 3375 if (unlikely(cmd_id < SVGA_CMD_MAX))
2071 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); 3376 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
2072 3377
2073 3378
2074 cmd_id = le32_to_cpu(header->id); 3379 cmd_id = header->id;
2075 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); 3380 *size = header->size + sizeof(SVGA3dCmdHeader);
2076 3381
2077 cmd_id -= SVGA_3D_CMD_BASE; 3382 cmd_id -= SVGA_3D_CMD_BASE;
2078 if (unlikely(*size > size_remaining)) 3383 if (unlikely(*size > size_remaining))
@@ -2184,7 +3489,8 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
2184 * 3489 *
2185 * @list: The resource list. 3490 * @list: The resource list.
2186 */ 3491 */
2187static void vmw_resource_list_unreference(struct list_head *list) 3492static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3493 struct list_head *list)
2188{ 3494{
2189 struct vmw_resource_val_node *val, *val_next; 3495 struct vmw_resource_val_node *val, *val_next;
2190 3496
@@ -2195,8 +3501,15 @@ static void vmw_resource_list_unreference(struct list_head *list)
2195 list_for_each_entry_safe(val, val_next, list, head) { 3501 list_for_each_entry_safe(val, val_next, list, head) {
2196 list_del_init(&val->head); 3502 list_del_init(&val->head);
2197 vmw_resource_unreference(&val->res); 3503 vmw_resource_unreference(&val->res);
2198 if (unlikely(val->staged_bindings)) 3504
2199 kfree(val->staged_bindings); 3505 if (val->staged_bindings) {
3506 if (val->staged_bindings != sw_context->staged_bindings)
3507 vmw_binding_state_free(val->staged_bindings);
3508 else
3509 sw_context->staged_bindings_inuse = false;
3510 val->staged_bindings = NULL;
3511 }
3512
2200 kfree(val); 3513 kfree(val);
2201 } 3514 }
2202} 3515}
@@ -2222,24 +3535,21 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
2222 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); 3535 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
2223} 3536}
2224 3537
2225static int vmw_validate_single_buffer(struct vmw_private *dev_priv, 3538int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2226 struct ttm_buffer_object *bo, 3539 struct ttm_buffer_object *bo,
2227 bool validate_as_mob) 3540 bool interruptible,
3541 bool validate_as_mob)
2228{ 3542{
3543 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3544 base);
2229 int ret; 3545 int ret;
2230 3546
2231 3547 if (vbo->pin_count > 0)
2232 /*
2233 * Don't validate pinned buffers.
2234 */
2235
2236 if (bo == dev_priv->pinned_bo ||
2237 (bo == dev_priv->dummy_query_bo &&
2238 dev_priv->dummy_query_bo_pinned))
2239 return 0; 3548 return 0;
2240 3549
2241 if (validate_as_mob) 3550 if (validate_as_mob)
2242 return ttm_bo_validate(bo, &vmw_mob_placement, true, false); 3551 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3552 false);
2243 3553
2244 /** 3554 /**
2245 * Put BO in VRAM if there is space, otherwise as a GMR. 3555 * Put BO in VRAM if there is space, otherwise as a GMR.
@@ -2248,7 +3558,8 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2248 * used as a GMR, this will return -ENOMEM. 3558 * used as a GMR, this will return -ENOMEM.
2249 */ 3559 */
2250 3560
2251 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false); 3561 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3562 false);
2252 if (likely(ret == 0 || ret == -ERESTARTSYS)) 3563 if (likely(ret == 0 || ret == -ERESTARTSYS))
2253 return ret; 3564 return ret;
2254 3565
@@ -2257,8 +3568,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2257 * previous contents. 3568 * previous contents.
2258 */ 3569 */
2259 3570
2260 DRM_INFO("Falling through to VRAM.\n"); 3571 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
2261 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
2262 return ret; 3572 return ret;
2263} 3573}
2264 3574
@@ -2270,6 +3580,7 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
2270 3580
2271 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { 3581 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
2272 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, 3582 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3583 true,
2273 entry->validate_as_mob); 3584 entry->validate_as_mob);
2274 if (unlikely(ret != 0)) 3585 if (unlikely(ret != 0))
2275 return ret; 3586 return ret;
@@ -2417,7 +3728,164 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2417 } 3728 }
2418} 3729}
2419 3730
3731/**
3732 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3733 * the fifo.
3734 *
3735 * @dev_priv: Pointer to a device private structure.
3736 * @kernel_commands: Pointer to the unpatched command batch.
3737 * @command_size: Size of the unpatched command batch.
3738 * @sw_context: Structure holding the relocation lists.
3739 *
3740 * Side effects: If this function returns 0, then the command batch
3741 * pointed to by @kernel_commands will have been modified.
3742 */
3743static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3744 void *kernel_commands,
3745 u32 command_size,
3746 struct vmw_sw_context *sw_context)
3747{
3748 void *cmd;
2420 3749
3750 if (sw_context->dx_ctx_node)
3751 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3752 sw_context->dx_ctx_node->res->id);
3753 else
3754 cmd = vmw_fifo_reserve(dev_priv, command_size);
3755 if (!cmd) {
3756 DRM_ERROR("Failed reserving fifo space for commands.\n");
3757 return -ENOMEM;
3758 }
3759
3760 vmw_apply_relocations(sw_context);
3761 memcpy(cmd, kernel_commands, command_size);
3762 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3763 vmw_resource_relocations_free(&sw_context->res_relocations);
3764 vmw_fifo_commit(dev_priv, command_size);
3765
3766 return 0;
3767}
3768
3769/**
3770 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3771 * the command buffer manager.
3772 *
3773 * @dev_priv: Pointer to a device private structure.
3774 * @header: Opaque handle to the command buffer allocation.
3775 * @command_size: Size of the unpatched command batch.
3776 * @sw_context: Structure holding the relocation lists.
3777 *
3778 * Side effects: If this function returns 0, then the command buffer
3779 * represented by @header will have been modified.
3780 */
3781static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3782 struct vmw_cmdbuf_header *header,
3783 u32 command_size,
3784 struct vmw_sw_context *sw_context)
3785{
3786 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3787 SVGA3D_INVALID_ID);
3788 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3789 id, false, header);
3790
3791 vmw_apply_relocations(sw_context);
3792 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3793 vmw_resource_relocations_free(&sw_context->res_relocations);
3794 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3795
3796 return 0;
3797}
3798
3799/**
3800 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3801 * submission using a command buffer.
3802 *
3803 * @dev_priv: Pointer to a device private structure.
3804 * @user_commands: User-space pointer to the commands to be submitted.
3805 * @command_size: Size of the unpatched command batch.
3806 * @header: Out parameter returning the opaque pointer to the command buffer.
3807 *
3808 * This function checks whether we can use the command buffer manager for
3809 * submission and if so, creates a command buffer of suitable size and
3810 * copies the user data into that buffer.
3811 *
3812 * On successful return, the function returns a pointer to the data in the
3813 * command buffer and *@header is set to non-NULL.
3814 * If command buffers could not be used, the function will return the value
3815 * of @kernel_commands on function call. That value may be NULL. In that case,
3816 * the value of *@header will be set to NULL.
3817 * If an error is encountered, the function will return a pointer error value.
3818 * If the function is interrupted by a signal while sleeping, it will return
3819 * -ERESTARTSYS casted to a pointer error value.
3820 */
3821static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3822 void __user *user_commands,
3823 void *kernel_commands,
3824 u32 command_size,
3825 struct vmw_cmdbuf_header **header)
3826{
3827 size_t cmdbuf_size;
3828 int ret;
3829
3830 *header = NULL;
3831 if (!dev_priv->cman || kernel_commands)
3832 return kernel_commands;
3833
3834 if (command_size > SVGA_CB_MAX_SIZE) {
3835 DRM_ERROR("Command buffer is too large.\n");
3836 return ERR_PTR(-EINVAL);
3837 }
3838
3839 /* If possible, add a little space for fencing. */
3840 cmdbuf_size = command_size + 512;
3841 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3842 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3843 true, header);
3844 if (IS_ERR(kernel_commands))
3845 return kernel_commands;
3846
3847 ret = copy_from_user(kernel_commands, user_commands,
3848 command_size);
3849 if (ret) {
3850 DRM_ERROR("Failed copying commands.\n");
3851 vmw_cmdbuf_header_free(*header);
3852 *header = NULL;
3853 return ERR_PTR(-EFAULT);
3854 }
3855
3856 return kernel_commands;
3857}
3858
3859static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3860 struct vmw_sw_context *sw_context,
3861 uint32_t handle)
3862{
3863 struct vmw_resource_val_node *ctx_node;
3864 struct vmw_resource *res;
3865 int ret;
3866
3867 if (handle == SVGA3D_INVALID_ID)
3868 return 0;
3869
3870 ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3871 handle, user_context_converter,
3872 &res);
3873 if (unlikely(ret != 0)) {
3874 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3875 (unsigned) handle);
3876 return ret;
3877 }
3878
3879 ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3880 if (unlikely(ret != 0))
3881 goto out_err;
3882
3883 sw_context->dx_ctx_node = ctx_node;
3884 sw_context->man = vmw_context_res_man(res);
3885out_err:
3886 vmw_resource_unreference(&res);
3887 return ret;
3888}
2421 3889
2422int vmw_execbuf_process(struct drm_file *file_priv, 3890int vmw_execbuf_process(struct drm_file *file_priv,
2423 struct vmw_private *dev_priv, 3891 struct vmw_private *dev_priv,
@@ -2425,6 +3893,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2425 void *kernel_commands, 3893 void *kernel_commands,
2426 uint32_t command_size, 3894 uint32_t command_size,
2427 uint64_t throttle_us, 3895 uint64_t throttle_us,
3896 uint32_t dx_context_handle,
2428 struct drm_vmw_fence_rep __user *user_fence_rep, 3897 struct drm_vmw_fence_rep __user *user_fence_rep,
2429 struct vmw_fence_obj **out_fence) 3898 struct vmw_fence_obj **out_fence)
2430{ 3899{
@@ -2432,18 +3901,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2432 struct vmw_fence_obj *fence = NULL; 3901 struct vmw_fence_obj *fence = NULL;
2433 struct vmw_resource *error_resource; 3902 struct vmw_resource *error_resource;
2434 struct list_head resource_list; 3903 struct list_head resource_list;
3904 struct vmw_cmdbuf_header *header;
2435 struct ww_acquire_ctx ticket; 3905 struct ww_acquire_ctx ticket;
2436 uint32_t handle; 3906 uint32_t handle;
2437 void *cmd;
2438 int ret; 3907 int ret;
2439 3908
3909 if (throttle_us) {
3910 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3911 throttle_us);
3912
3913 if (ret)
3914 return ret;
3915 }
3916
3917 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3918 kernel_commands, command_size,
3919 &header);
3920 if (IS_ERR(kernel_commands))
3921 return PTR_ERR(kernel_commands);
3922
2440 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); 3923 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2441 if (unlikely(ret != 0)) 3924 if (ret) {
2442 return -ERESTARTSYS; 3925 ret = -ERESTARTSYS;
3926 goto out_free_header;
3927 }
2443 3928
3929 sw_context->kernel = false;
2444 if (kernel_commands == NULL) { 3930 if (kernel_commands == NULL) {
2445 sw_context->kernel = false;
2446
2447 ret = vmw_resize_cmd_bounce(sw_context, command_size); 3931 ret = vmw_resize_cmd_bounce(sw_context, command_size);
2448 if (unlikely(ret != 0)) 3932 if (unlikely(ret != 0))
2449 goto out_unlock; 3933 goto out_unlock;
@@ -2458,19 +3942,26 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2458 goto out_unlock; 3942 goto out_unlock;
2459 } 3943 }
2460 kernel_commands = sw_context->cmd_bounce; 3944 kernel_commands = sw_context->cmd_bounce;
2461 } else 3945 } else if (!header)
2462 sw_context->kernel = true; 3946 sw_context->kernel = true;
2463 3947
2464 sw_context->fp = vmw_fpriv(file_priv); 3948 sw_context->fp = vmw_fpriv(file_priv);
2465 sw_context->cur_reloc = 0; 3949 sw_context->cur_reloc = 0;
2466 sw_context->cur_val_buf = 0; 3950 sw_context->cur_val_buf = 0;
2467 INIT_LIST_HEAD(&sw_context->resource_list); 3951 INIT_LIST_HEAD(&sw_context->resource_list);
3952 INIT_LIST_HEAD(&sw_context->ctx_resource_list);
2468 sw_context->cur_query_bo = dev_priv->pinned_bo; 3953 sw_context->cur_query_bo = dev_priv->pinned_bo;
2469 sw_context->last_query_ctx = NULL; 3954 sw_context->last_query_ctx = NULL;
2470 sw_context->needs_post_query_barrier = false; 3955 sw_context->needs_post_query_barrier = false;
3956 sw_context->dx_ctx_node = NULL;
3957 sw_context->dx_query_mob = NULL;
3958 sw_context->dx_query_ctx = NULL;
2471 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); 3959 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2472 INIT_LIST_HEAD(&sw_context->validate_nodes); 3960 INIT_LIST_HEAD(&sw_context->validate_nodes);
2473 INIT_LIST_HEAD(&sw_context->res_relocations); 3961 INIT_LIST_HEAD(&sw_context->res_relocations);
3962 if (sw_context->staged_bindings)
3963 vmw_binding_state_reset(sw_context->staged_bindings);
3964
2474 if (!sw_context->res_ht_initialized) { 3965 if (!sw_context->res_ht_initialized) {
2475 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); 3966 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2476 if (unlikely(ret != 0)) 3967 if (unlikely(ret != 0))
@@ -2478,10 +3969,24 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2478 sw_context->res_ht_initialized = true; 3969 sw_context->res_ht_initialized = true;
2479 } 3970 }
2480 INIT_LIST_HEAD(&sw_context->staged_cmd_res); 3971 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
2481
2482 INIT_LIST_HEAD(&resource_list); 3972 INIT_LIST_HEAD(&resource_list);
3973 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3974 if (unlikely(ret != 0)) {
3975 list_splice_init(&sw_context->ctx_resource_list,
3976 &sw_context->resource_list);
3977 goto out_err_nores;
3978 }
3979
2483 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, 3980 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2484 command_size); 3981 command_size);
3982 /*
3983 * Merge the resource lists before checking the return status
3984 * from vmd_cmd_check_all so that all the open hashtabs will
3985 * be handled properly even if vmw_cmd_check_all fails.
3986 */
3987 list_splice_init(&sw_context->ctx_resource_list,
3988 &sw_context->resource_list);
3989
2485 if (unlikely(ret != 0)) 3990 if (unlikely(ret != 0))
2486 goto out_err_nores; 3991 goto out_err_nores;
2487 3992
@@ -2502,14 +4007,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2502 if (unlikely(ret != 0)) 4007 if (unlikely(ret != 0))
2503 goto out_err; 4008 goto out_err;
2504 4009
2505 if (throttle_us) {
2506 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2507 throttle_us);
2508
2509 if (unlikely(ret != 0))
2510 goto out_err;
2511 }
2512
2513 ret = mutex_lock_interruptible(&dev_priv->binding_mutex); 4010 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2514 if (unlikely(ret != 0)) { 4011 if (unlikely(ret != 0)) {
2515 ret = -ERESTARTSYS; 4012 ret = -ERESTARTSYS;
@@ -2522,21 +4019,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2522 goto out_unlock_binding; 4019 goto out_unlock_binding;
2523 } 4020 }
2524 4021
2525 cmd = vmw_fifo_reserve(dev_priv, command_size); 4022 if (!header) {
2526 if (unlikely(cmd == NULL)) { 4023 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
2527 DRM_ERROR("Failed reserving fifo space for commands.\n"); 4024 command_size, sw_context);
2528 ret = -ENOMEM; 4025 } else {
2529 goto out_unlock_binding; 4026 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4027 sw_context);
4028 header = NULL;
2530 } 4029 }
2531
2532 vmw_apply_relocations(sw_context);
2533 memcpy(cmd, kernel_commands, command_size);
2534
2535 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2536 vmw_resource_relocations_free(&sw_context->res_relocations);
2537
2538 vmw_fifo_commit(dev_priv, command_size);
2539 mutex_unlock(&dev_priv->binding_mutex); 4030 mutex_unlock(&dev_priv->binding_mutex);
4031 if (ret)
4032 goto out_err;
2540 4033
2541 vmw_query_bo_switch_commit(dev_priv, sw_context); 4034 vmw_query_bo_switch_commit(dev_priv, sw_context);
2542 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, 4035 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +4044,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2551 if (ret != 0) 4044 if (ret != 0)
2552 DRM_ERROR("Fence submission error. Syncing.\n"); 4045 DRM_ERROR("Fence submission error. Syncing.\n");
2553 4046
2554 vmw_resource_list_unreserve(&sw_context->resource_list, false); 4047 vmw_resources_unreserve(sw_context, false);
2555 4048
2556 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, 4049 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2557 (void *) fence); 4050 (void *) fence);
@@ -2580,7 +4073,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2580 * Unreference resources outside of the cmdbuf_mutex to 4073 * Unreference resources outside of the cmdbuf_mutex to
2581 * avoid deadlocks in resource destruction paths. 4074 * avoid deadlocks in resource destruction paths.
2582 */ 4075 */
2583 vmw_resource_list_unreference(&resource_list); 4076 vmw_resource_list_unreference(sw_context, &resource_list);
2584 4077
2585 return 0; 4078 return 0;
2586 4079
@@ -2589,7 +4082,7 @@ out_unlock_binding:
2589out_err: 4082out_err:
2590 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); 4083 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2591out_err_nores: 4084out_err_nores:
2592 vmw_resource_list_unreserve(&sw_context->resource_list, true); 4085 vmw_resources_unreserve(sw_context, true);
2593 vmw_resource_relocations_free(&sw_context->res_relocations); 4086 vmw_resource_relocations_free(&sw_context->res_relocations);
2594 vmw_free_relocations(sw_context); 4087 vmw_free_relocations(sw_context);
2595 vmw_clear_validations(sw_context); 4088 vmw_clear_validations(sw_context);
@@ -2607,9 +4100,12 @@ out_unlock:
2607 * Unreference resources outside of the cmdbuf_mutex to 4100 * Unreference resources outside of the cmdbuf_mutex to
2608 * avoid deadlocks in resource destruction paths. 4101 * avoid deadlocks in resource destruction paths.
2609 */ 4102 */
2610 vmw_resource_list_unreference(&resource_list); 4103 vmw_resource_list_unreference(sw_context, &resource_list);
2611 if (unlikely(error_resource != NULL)) 4104 if (unlikely(error_resource != NULL))
2612 vmw_resource_unreference(&error_resource); 4105 vmw_resource_unreference(&error_resource);
4106out_free_header:
4107 if (header)
4108 vmw_cmdbuf_header_free(header);
2613 4109
2614 return ret; 4110 return ret;
2615} 4111}
@@ -2628,9 +4124,11 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2628 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n"); 4124 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2629 4125
2630 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); 4126 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2631 vmw_bo_pin(dev_priv->pinned_bo, false); 4127 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
2632 vmw_bo_pin(dev_priv->dummy_query_bo, false); 4128 if (dev_priv->dummy_query_bo_pinned) {
2633 dev_priv->dummy_query_bo_pinned = false; 4129 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4130 dev_priv->dummy_query_bo_pinned = false;
4131 }
2634} 4132}
2635 4133
2636 4134
@@ -2672,11 +4170,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2672 4170
2673 INIT_LIST_HEAD(&validate_list); 4171 INIT_LIST_HEAD(&validate_list);
2674 4172
2675 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); 4173 pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
2676 pinned_val.shared = false; 4174 pinned_val.shared = false;
2677 list_add_tail(&pinned_val.head, &validate_list); 4175 list_add_tail(&pinned_val.head, &validate_list);
2678 4176
2679 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); 4177 query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
2680 query_val.shared = false; 4178 query_val.shared = false;
2681 list_add_tail(&query_val.head, &validate_list); 4179 list_add_tail(&query_val.head, &validate_list);
2682 4180
@@ -2697,10 +4195,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2697 dev_priv->query_cid_valid = false; 4195 dev_priv->query_cid_valid = false;
2698 } 4196 }
2699 4197
2700 vmw_bo_pin(dev_priv->pinned_bo, false); 4198 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
2701 vmw_bo_pin(dev_priv->dummy_query_bo, false); 4199 if (dev_priv->dummy_query_bo_pinned) {
2702 dev_priv->dummy_query_bo_pinned = false; 4200 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
2703 4201 dev_priv->dummy_query_bo_pinned = false;
4202 }
2704 if (fence == NULL) { 4203 if (fence == NULL) {
2705 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, 4204 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2706 NULL); 4205 NULL);
@@ -2712,7 +4211,9 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2712 4211
2713 ttm_bo_unref(&query_val.bo); 4212 ttm_bo_unref(&query_val.bo);
2714 ttm_bo_unref(&pinned_val.bo); 4213 ttm_bo_unref(&pinned_val.bo);
2715 ttm_bo_unref(&dev_priv->pinned_bo); 4214 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4215 DRM_INFO("Dummy query bo pin count: %d\n",
4216 dev_priv->dummy_query_bo->pin_count);
2716 4217
2717out_unlock: 4218out_unlock:
2718 return; 4219 return;
@@ -2722,7 +4223,7 @@ out_no_emit:
2722out_no_reserve: 4223out_no_reserve:
2723 ttm_bo_unref(&query_val.bo); 4224 ttm_bo_unref(&query_val.bo);
2724 ttm_bo_unref(&pinned_val.bo); 4225 ttm_bo_unref(&pinned_val.bo);
2725 ttm_bo_unref(&dev_priv->pinned_bo); 4226 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
2726} 4227}
2727 4228
2728/** 4229/**
@@ -2751,36 +4252,68 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2751 mutex_unlock(&dev_priv->cmdbuf_mutex); 4252 mutex_unlock(&dev_priv->cmdbuf_mutex);
2752} 4253}
2753 4254
2754 4255int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
2755int vmw_execbuf_ioctl(struct drm_device *dev, void *data, 4256 struct drm_file *file_priv, size_t size)
2756 struct drm_file *file_priv)
2757{ 4257{
2758 struct vmw_private *dev_priv = vmw_priv(dev); 4258 struct vmw_private *dev_priv = vmw_priv(dev);
2759 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; 4259 struct drm_vmw_execbuf_arg arg;
2760 int ret; 4260 int ret;
4261 static const size_t copy_offset[] = {
4262 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4263 sizeof(struct drm_vmw_execbuf_arg)};
4264
4265 if (unlikely(size < copy_offset[0])) {
4266 DRM_ERROR("Invalid command size, ioctl %d\n",
4267 DRM_VMW_EXECBUF);
4268 return -EINVAL;
4269 }
4270
4271 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4272 return -EFAULT;
2761 4273
2762 /* 4274 /*
2763 * This will allow us to extend the ioctl argument while 4275 * Extend the ioctl argument while
2764 * maintaining backwards compatibility: 4276 * maintaining backwards compatibility:
2765 * We take different code paths depending on the value of 4277 * We take different code paths depending on the value of
2766 * arg->version. 4278 * arg.version.
2767 */ 4279 */
2768 4280
2769 if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) { 4281 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4282 arg.version == 0)) {
2770 DRM_ERROR("Incorrect execbuf version.\n"); 4283 DRM_ERROR("Incorrect execbuf version.\n");
2771 DRM_ERROR("You're running outdated experimental "
2772 "vmwgfx user-space drivers.");
2773 return -EINVAL; 4284 return -EINVAL;
2774 } 4285 }
2775 4286
4287 if (arg.version > 1 &&
4288 copy_from_user(&arg.context_handle,
4289 (void __user *) (data + copy_offset[0]),
4290 copy_offset[arg.version - 1] -
4291 copy_offset[0]) != 0)
4292 return -EFAULT;
4293
4294 switch (arg.version) {
4295 case 1:
4296 arg.context_handle = (uint32_t) -1;
4297 break;
4298 case 2:
4299 if (arg.pad64 != 0) {
4300 DRM_ERROR("Unused IOCTL data not set to zero.\n");
4301 return -EINVAL;
4302 }
4303 break;
4304 default:
4305 break;
4306 }
4307
2776 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 4308 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2777 if (unlikely(ret != 0)) 4309 if (unlikely(ret != 0))
2778 return ret; 4310 return ret;
2779 4311
2780 ret = vmw_execbuf_process(file_priv, dev_priv, 4312 ret = vmw_execbuf_process(file_priv, dev_priv,
2781 (void __user *)(unsigned long)arg->commands, 4313 (void __user *)(unsigned long)arg.commands,
2782 NULL, arg->command_size, arg->throttle_us, 4314 NULL, arg.command_size, arg.throttle_us,
2783 (void __user *)(unsigned long)arg->fence_rep, 4315 arg.context_handle,
4316 (void __user *)(unsigned long)arg.fence_rep,
2784 NULL); 4317 NULL);
2785 ttm_read_unlock(&dev_priv->reservation_sem); 4318 ttm_read_unlock(&dev_priv->reservation_sem);
2786 if (unlikely(ret != 0)) 4319 if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 0a474f391fad..042c5b4c706c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -1,7 +1,7 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2007 David Airlie 3 * Copyright © 2007 David Airlie
4 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 4 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved. 5 * All Rights Reserved.
6 * 6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a 7 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -30,6 +30,7 @@
30 30
31#include <drm/drmP.h> 31#include <drm/drmP.h>
32#include "vmwgfx_drv.h" 32#include "vmwgfx_drv.h"
33#include "vmwgfx_kms.h"
33 34
34#include <drm/ttm/ttm_placement.h> 35#include <drm/ttm/ttm_placement.h>
35 36
@@ -40,21 +41,22 @@ struct vmw_fb_par {
40 41
41 void *vmalloc; 42 void *vmalloc;
42 43
44 struct mutex bo_mutex;
43 struct vmw_dma_buffer *vmw_bo; 45 struct vmw_dma_buffer *vmw_bo;
44 struct ttm_bo_kmap_obj map; 46 struct ttm_bo_kmap_obj map;
47 void *bo_ptr;
48 unsigned bo_size;
49 struct drm_framebuffer *set_fb;
50 struct drm_display_mode *set_mode;
51 u32 fb_x;
52 u32 fb_y;
53 bool bo_iowrite;
45 54
46 u32 pseudo_palette[17]; 55 u32 pseudo_palette[17];
47 56
48 unsigned depth;
49 unsigned bpp;
50
51 unsigned max_width; 57 unsigned max_width;
52 unsigned max_height; 58 unsigned max_height;
53 59
54 void *bo_ptr;
55 unsigned bo_size;
56 bool bo_iowrite;
57
58 struct { 60 struct {
59 spinlock_t lock; 61 spinlock_t lock;
60 bool active; 62 bool active;
@@ -63,6 +65,11 @@ struct vmw_fb_par {
63 unsigned x2; 65 unsigned x2;
64 unsigned y2; 66 unsigned y2;
65 } dirty; 67 } dirty;
68
69 struct drm_crtc *crtc;
70 struct drm_connector *con;
71
72 bool local_mode;
66}; 73};
67 74
68static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, 75static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -77,7 +84,7 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
77 return 1; 84 return 1;
78 } 85 }
79 86
80 switch (par->depth) { 87 switch (par->set_fb->depth) {
81 case 24: 88 case 24:
82 case 32: 89 case 32:
83 pal[regno] = ((red & 0xff00) << 8) | 90 pal[regno] = ((red & 0xff00) << 8) |
@@ -85,7 +92,8 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
85 ((blue & 0xff00) >> 8); 92 ((blue & 0xff00) >> 8);
86 break; 93 break;
87 default: 94 default:
88 DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp); 95 DRM_ERROR("Bad depth %u, bpp %u.\n", par->set_fb->depth,
96 par->set_fb->bits_per_pixel);
89 return 1; 97 return 1;
90 } 98 }
91 99
@@ -134,12 +142,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
134 return -EINVAL; 142 return -EINVAL;
135 } 143 }
136 144
137 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
138 (var->xoffset != 0 || var->yoffset != 0)) {
139 DRM_ERROR("Can not handle panning without display topology\n");
140 return -EINVAL;
141 }
142
143 if ((var->xoffset + var->xres) > par->max_width || 145 if ((var->xoffset + var->xres) > par->max_width ||
144 (var->yoffset + var->yres) > par->max_height) { 146 (var->yoffset + var->yres) > par->max_height) {
145 DRM_ERROR("Requested geom can not fit in framebuffer\n"); 147 DRM_ERROR("Requested geom can not fit in framebuffer\n");
@@ -156,46 +158,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
156 return 0; 158 return 0;
157} 159}
158 160
159static int vmw_fb_set_par(struct fb_info *info)
160{
161 struct vmw_fb_par *par = info->par;
162 struct vmw_private *vmw_priv = par->vmw_priv;
163 int ret;
164
165 info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
166
167 ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
168 info->fix.line_length,
169 par->bpp, par->depth);
170 if (ret)
171 return ret;
172
173 if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
174 /* TODO check if pitch and offset changes */
175 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
176 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
177 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
178 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
179 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
180 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
181 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
182 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
183 }
184
185 /* This is really helpful since if this fails the user
186 * can probably not see anything on the screen.
187 */
188 WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
189
190 return 0;
191}
192
193static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
194 struct fb_info *info)
195{
196 return 0;
197}
198
199static int vmw_fb_blank(int blank, struct fb_info *info) 161static int vmw_fb_blank(int blank, struct fb_info *info)
200{ 162{
201 return 0; 163 return 0;
@@ -209,54 +171,77 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
209{ 171{
210 struct vmw_private *vmw_priv = par->vmw_priv; 172 struct vmw_private *vmw_priv = par->vmw_priv;
211 struct fb_info *info = vmw_priv->fb_info; 173 struct fb_info *info = vmw_priv->fb_info;
212 int stride = (info->fix.line_length / 4); 174 unsigned long irq_flags;
213 int *src = (int *)info->screen_base; 175 s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
214 __le32 __iomem *vram_mem = par->bo_ptr; 176 u32 cpp, max_x, max_y;
215 unsigned long flags; 177 struct drm_clip_rect clip;
216 unsigned x, y, w, h; 178 struct drm_framebuffer *cur_fb;
217 int i, k; 179 u8 *src_ptr, *dst_ptr;
218 struct {
219 uint32_t header;
220 SVGAFifoCmdUpdate body;
221 } *cmd;
222 180
223 if (vmw_priv->suspended) 181 if (vmw_priv->suspended)
224 return; 182 return;
225 183
226 spin_lock_irqsave(&par->dirty.lock, flags); 184 mutex_lock(&par->bo_mutex);
227 if (!par->dirty.active) { 185 cur_fb = par->set_fb;
228 spin_unlock_irqrestore(&par->dirty.lock, flags); 186 if (!cur_fb)
229 return; 187 goto out_unlock;
230 }
231 x = par->dirty.x1;
232 y = par->dirty.y1;
233 w = min(par->dirty.x2, info->var.xres) - x;
234 h = min(par->dirty.y2, info->var.yres) - y;
235 par->dirty.x1 = par->dirty.x2 = 0;
236 par->dirty.y1 = par->dirty.y2 = 0;
237 spin_unlock_irqrestore(&par->dirty.lock, flags);
238 188
239 for (i = y * stride; i < info->fix.smem_len / 4; i += stride) { 189 spin_lock_irqsave(&par->dirty.lock, irq_flags);
240 for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++) 190 if (!par->dirty.active) {
241 iowrite32(src[k], vram_mem + k); 191 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
192 goto out_unlock;
242 } 193 }
243 194
244#if 0 195 /*
245 DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h); 196 * Handle panning when copying from vmalloc to framebuffer.
246#endif 197 * Clip dirty area to framebuffer.
198 */
199 cpp = (cur_fb->bits_per_pixel + 7) / 8;
200 max_x = par->fb_x + cur_fb->width;
201 max_y = par->fb_y + cur_fb->height;
202
203 dst_x1 = par->dirty.x1 - par->fb_x;
204 dst_y1 = par->dirty.y1 - par->fb_y;
205 dst_x1 = max_t(s32, dst_x1, 0);
206 dst_y1 = max_t(s32, dst_y1, 0);
207
208 dst_x2 = par->dirty.x2 - par->fb_x;
209 dst_y2 = par->dirty.y2 - par->fb_y;
210 dst_x2 = min_t(s32, dst_x2, max_x);
211 dst_y2 = min_t(s32, dst_y2, max_y);
212 w = dst_x2 - dst_x1;
213 h = dst_y2 - dst_y1;
214 w = max_t(s32, 0, w);
215 h = max_t(s32, 0, h);
247 216
248 cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd)); 217 par->dirty.x1 = par->dirty.x2 = 0;
249 if (unlikely(cmd == NULL)) { 218 par->dirty.y1 = par->dirty.y2 = 0;
250 DRM_ERROR("Fifo reserve failed.\n"); 219 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
251 return; 220
221 if (w && h) {
222 dst_ptr = (u8 *)par->bo_ptr +
223 (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
224 src_ptr = (u8 *)par->vmalloc +
225 ((dst_y1 + par->fb_y) * info->fix.line_length +
226 (dst_x1 + par->fb_x) * cpp);
227
228 while (h-- > 0) {
229 memcpy(dst_ptr, src_ptr, w*cpp);
230 dst_ptr += par->set_fb->pitches[0];
231 src_ptr += info->fix.line_length;
232 }
233
234 clip.x1 = dst_x1;
235 clip.x2 = dst_x2;
236 clip.y1 = dst_y1;
237 clip.y2 = dst_y2;
238
239 WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
240 &clip, 1));
241 vmw_fifo_flush(vmw_priv, false);
252 } 242 }
253 243out_unlock:
254 cmd->header = cpu_to_le32(SVGA_CMD_UPDATE); 244 mutex_unlock(&par->bo_mutex);
255 cmd->body.x = cpu_to_le32(x);
256 cmd->body.y = cpu_to_le32(y);
257 cmd->body.width = cpu_to_le32(w);
258 cmd->body.height = cpu_to_le32(h);
259 vmw_fifo_commit(vmw_priv, sizeof(*cmd));
260} 245}
261 246
262static void vmw_fb_dirty_mark(struct vmw_fb_par *par, 247static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
@@ -291,6 +276,28 @@ static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
291 spin_unlock_irqrestore(&par->dirty.lock, flags); 276 spin_unlock_irqrestore(&par->dirty.lock, flags);
292} 277}
293 278
279static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
280 struct fb_info *info)
281{
282 struct vmw_fb_par *par = info->par;
283
284 if ((var->xoffset + var->xres) > var->xres_virtual ||
285 (var->yoffset + var->yres) > var->yres_virtual) {
286 DRM_ERROR("Requested panning can not fit in framebuffer\n");
287 return -EINVAL;
288 }
289
290 mutex_lock(&par->bo_mutex);
291 par->fb_x = var->xoffset;
292 par->fb_y = var->yoffset;
293 if (par->set_fb)
294 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
295 par->set_fb->height);
296 mutex_unlock(&par->bo_mutex);
297
298 return 0;
299}
300
294static void vmw_deferred_io(struct fb_info *info, 301static void vmw_deferred_io(struct fb_info *info,
295 struct list_head *pagelist) 302 struct list_head *pagelist)
296{ 303{
@@ -324,7 +331,7 @@ static void vmw_deferred_io(struct fb_info *info,
324 vmw_fb_dirty_flush(par); 331 vmw_fb_dirty_flush(par);
325}; 332};
326 333
327struct fb_deferred_io vmw_defio = { 334static struct fb_deferred_io vmw_defio = {
328 .delay = VMW_DIRTY_DELAY, 335 .delay = VMW_DIRTY_DELAY,
329 .deferred_io = vmw_deferred_io, 336 .deferred_io = vmw_deferred_io,
330}; 337};
@@ -358,33 +365,12 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
358 * Bring up code 365 * Bring up code
359 */ 366 */
360 367
361static struct fb_ops vmw_fb_ops = {
362 .owner = THIS_MODULE,
363 .fb_check_var = vmw_fb_check_var,
364 .fb_set_par = vmw_fb_set_par,
365 .fb_setcolreg = vmw_fb_setcolreg,
366 .fb_fillrect = vmw_fb_fillrect,
367 .fb_copyarea = vmw_fb_copyarea,
368 .fb_imageblit = vmw_fb_imageblit,
369 .fb_pan_display = vmw_fb_pan_display,
370 .fb_blank = vmw_fb_blank,
371};
372
373static int vmw_fb_create_bo(struct vmw_private *vmw_priv, 368static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
374 size_t size, struct vmw_dma_buffer **out) 369 size_t size, struct vmw_dma_buffer **out)
375{ 370{
376 struct vmw_dma_buffer *vmw_bo; 371 struct vmw_dma_buffer *vmw_bo;
377 struct ttm_place ne_place = vmw_vram_ne_placement.placement[0];
378 struct ttm_placement ne_placement;
379 int ret; 372 int ret;
380 373
381 ne_placement.num_placement = 1;
382 ne_placement.placement = &ne_place;
383 ne_placement.num_busy_placement = 1;
384 ne_placement.busy_placement = &ne_place;
385
386 ne_place.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
387
388 (void) ttm_write_lock(&vmw_priv->reservation_sem, false); 374 (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
389 375
390 vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); 376 vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
@@ -394,31 +380,265 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
394 } 380 }
395 381
396 ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, 382 ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
397 &ne_placement, 383 &vmw_sys_placement,
398 false, 384 false,
399 &vmw_dmabuf_bo_free); 385 &vmw_dmabuf_bo_free);
400 if (unlikely(ret != 0)) 386 if (unlikely(ret != 0))
401 goto err_unlock; /* init frees the buffer on failure */ 387 goto err_unlock; /* init frees the buffer on failure */
402 388
403 *out = vmw_bo; 389 *out = vmw_bo;
404 390 ttm_write_unlock(&vmw_priv->reservation_sem);
405 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
406 391
407 return 0; 392 return 0;
408 393
409err_unlock: 394err_unlock:
410 ttm_write_unlock(&vmw_priv->fbdev_master.lock); 395 ttm_write_unlock(&vmw_priv->reservation_sem);
396 return ret;
397}
398
399static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
400 int *depth)
401{
402 switch (var->bits_per_pixel) {
403 case 32:
404 *depth = (var->transp.length > 0) ? 32 : 24;
405 break;
406 default:
407 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
408 return -EINVAL;
409 }
410
411 return 0;
412}
413
414static int vmw_fb_kms_detach(struct vmw_fb_par *par,
415 bool detach_bo,
416 bool unref_bo)
417{
418 struct drm_framebuffer *cur_fb = par->set_fb;
419 int ret;
420
421 /* Detach the KMS framebuffer from crtcs */
422 if (par->set_mode) {
423 struct drm_mode_set set;
424
425 set.crtc = par->crtc;
426 set.x = 0;
427 set.y = 0;
428 set.mode = NULL;
429 set.fb = NULL;
430 set.num_connectors = 1;
431 set.connectors = &par->con;
432 ret = drm_mode_set_config_internal(&set);
433 if (ret) {
434 DRM_ERROR("Could not unset a mode.\n");
435 return ret;
436 }
437 drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
438 par->set_mode = NULL;
439 }
440
441 if (cur_fb) {
442 drm_framebuffer_unreference(cur_fb);
443 par->set_fb = NULL;
444 }
445
446 if (par->vmw_bo && detach_bo) {
447 if (par->bo_ptr) {
448 ttm_bo_kunmap(&par->map);
449 par->bo_ptr = NULL;
450 }
451 if (unref_bo)
452 vmw_dmabuf_unreference(&par->vmw_bo);
453 else
454 vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
455 }
456
457 return 0;
458}
459
460static int vmw_fb_kms_framebuffer(struct fb_info *info)
461{
462 struct drm_mode_fb_cmd mode_cmd;
463 struct vmw_fb_par *par = info->par;
464 struct fb_var_screeninfo *var = &info->var;
465 struct drm_framebuffer *cur_fb;
466 struct vmw_framebuffer *vfb;
467 int ret = 0;
468 size_t new_bo_size;
469
470 ret = vmw_fb_compute_depth(var, &mode_cmd.depth);
471 if (ret)
472 return ret;
473
474 mode_cmd.width = var->xres;
475 mode_cmd.height = var->yres;
476 mode_cmd.bpp = var->bits_per_pixel;
477 mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width;
478
479 cur_fb = par->set_fb;
480 if (cur_fb && cur_fb->width == mode_cmd.width &&
481 cur_fb->height == mode_cmd.height &&
482 cur_fb->bits_per_pixel == mode_cmd.bpp &&
483 cur_fb->depth == mode_cmd.depth &&
484 cur_fb->pitches[0] == mode_cmd.pitch)
485 return 0;
486
487 /* Need new buffer object ? */
488 new_bo_size = (size_t) mode_cmd.pitch * (size_t) mode_cmd.height;
489 ret = vmw_fb_kms_detach(par,
490 par->bo_size < new_bo_size ||
491 par->bo_size > 2*new_bo_size,
492 true);
493 if (ret)
494 return ret;
495
496 if (!par->vmw_bo) {
497 ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
498 &par->vmw_bo);
499 if (ret) {
500 DRM_ERROR("Failed creating a buffer object for "
501 "fbdev.\n");
502 return ret;
503 }
504 par->bo_size = new_bo_size;
505 }
506
507 vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
508 true, &mode_cmd);
509 if (IS_ERR(vfb))
510 return PTR_ERR(vfb);
511
512 par->set_fb = &vfb->base;
513
514 if (!par->bo_ptr) {
515 /*
516 * Pin before mapping. Since we don't know in what placement
517 * to pin, call into KMS to do it for us.
518 */
519 ret = vfb->pin(vfb);
520 if (ret) {
521 DRM_ERROR("Could not pin the fbdev framebuffer.\n");
522 return ret;
523 }
524
525 ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
526 par->vmw_bo->base.num_pages, &par->map);
527 if (ret) {
528 vfb->unpin(vfb);
529 DRM_ERROR("Could not map the fbdev framebuffer.\n");
530 return ret;
531 }
532
533 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
534 }
535
536 return 0;
537}
538
539static int vmw_fb_set_par(struct fb_info *info)
540{
541 struct vmw_fb_par *par = info->par;
542 struct vmw_private *vmw_priv = par->vmw_priv;
543 struct drm_mode_set set;
544 struct fb_var_screeninfo *var = &info->var;
545 struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
546 DRM_MODE_TYPE_DRIVER,
547 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
548 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
549 };
550 struct drm_display_mode *old_mode;
551 struct drm_display_mode *mode;
552 int ret;
553
554 old_mode = par->set_mode;
555 mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
556 if (!mode) {
557 DRM_ERROR("Could not create new fb mode.\n");
558 return -ENOMEM;
559 }
560
561 mode->hdisplay = var->xres;
562 mode->vdisplay = var->yres;
563 vmw_guess_mode_timing(mode);
564
565 if (old_mode && drm_mode_equal(old_mode, mode)) {
566 drm_mode_destroy(vmw_priv->dev, mode);
567 mode = old_mode;
568 old_mode = NULL;
569 } else if (!vmw_kms_validate_mode_vram(vmw_priv,
570 mode->hdisplay *
571 (var->bits_per_pixel + 7) / 8,
572 mode->vdisplay)) {
573 drm_mode_destroy(vmw_priv->dev, mode);
574 return -EINVAL;
575 }
576
577 mutex_lock(&par->bo_mutex);
578 drm_modeset_lock_all(vmw_priv->dev);
579 ret = vmw_fb_kms_framebuffer(info);
580 if (ret)
581 goto out_unlock;
582
583 par->fb_x = var->xoffset;
584 par->fb_y = var->yoffset;
585
586 set.crtc = par->crtc;
587 set.x = 0;
588 set.y = 0;
589 set.mode = mode;
590 set.fb = par->set_fb;
591 set.num_connectors = 1;
592 set.connectors = &par->con;
593
594 ret = drm_mode_set_config_internal(&set);
595 if (ret)
596 goto out_unlock;
597
598 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
599 par->set_fb->width, par->set_fb->height);
600
601 /* If there already was stuff dirty we wont
602 * schedule a new work, so lets do it now */
603
604#if (defined(VMWGFX_STANDALONE) && defined(VMWGFX_FB_DEFERRED))
605 schedule_delayed_work(&par->def_par.deferred_work, 0);
606#else
607 schedule_delayed_work(&info->deferred_work, 0);
608#endif
609
610out_unlock:
611 if (old_mode)
612 drm_mode_destroy(vmw_priv->dev, old_mode);
613 par->set_mode = mode;
614
615 drm_modeset_unlock_all(vmw_priv->dev);
616 mutex_unlock(&par->bo_mutex);
617
411 return ret; 618 return ret;
412} 619}
413 620
621
622static struct fb_ops vmw_fb_ops = {
623 .owner = THIS_MODULE,
624 .fb_check_var = vmw_fb_check_var,
625 .fb_set_par = vmw_fb_set_par,
626 .fb_setcolreg = vmw_fb_setcolreg,
627 .fb_fillrect = vmw_fb_fillrect,
628 .fb_copyarea = vmw_fb_copyarea,
629 .fb_imageblit = vmw_fb_imageblit,
630 .fb_pan_display = vmw_fb_pan_display,
631 .fb_blank = vmw_fb_blank,
632};
633
414int vmw_fb_init(struct vmw_private *vmw_priv) 634int vmw_fb_init(struct vmw_private *vmw_priv)
415{ 635{
416 struct device *device = &vmw_priv->dev->pdev->dev; 636 struct device *device = &vmw_priv->dev->pdev->dev;
417 struct vmw_fb_par *par; 637 struct vmw_fb_par *par;
418 struct fb_info *info; 638 struct fb_info *info;
419 unsigned initial_width, initial_height;
420 unsigned fb_width, fb_height; 639 unsigned fb_width, fb_height;
421 unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size; 640 unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
641 struct drm_display_mode *init_mode;
422 int ret; 642 int ret;
423 643
424 fb_bpp = 32; 644 fb_bpp = 32;
@@ -428,9 +648,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
428 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); 648 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
429 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); 649 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
430 650
431 initial_width = min(vmw_priv->initial_width, fb_width);
432 initial_height = min(vmw_priv->initial_height, fb_height);
433
434 fb_pitch = fb_width * fb_bpp / 8; 651 fb_pitch = fb_width * fb_bpp / 8;
435 fb_size = fb_pitch * fb_height; 652 fb_size = fb_pitch * fb_height;
436 fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); 653 fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
@@ -444,35 +661,34 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
444 */ 661 */
445 vmw_priv->fb_info = info; 662 vmw_priv->fb_info = info;
446 par = info->par; 663 par = info->par;
664 memset(par, 0, sizeof(*par));
447 par->vmw_priv = vmw_priv; 665 par->vmw_priv = vmw_priv;
448 par->depth = fb_depth;
449 par->bpp = fb_bpp;
450 par->vmalloc = NULL; 666 par->vmalloc = NULL;
451 par->max_width = fb_width; 667 par->max_width = fb_width;
452 par->max_height = fb_height; 668 par->max_height = fb_height;
453 669
670 drm_modeset_lock_all(vmw_priv->dev);
671 ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
672 par->max_height, &par->con,
673 &par->crtc, &init_mode);
674 if (ret) {
675 drm_modeset_unlock_all(vmw_priv->dev);
676 goto err_kms;
677 }
678
679 info->var.xres = init_mode->hdisplay;
680 info->var.yres = init_mode->vdisplay;
681 drm_modeset_unlock_all(vmw_priv->dev);
682
454 /* 683 /*
455 * Create buffers and alloc memory 684 * Create buffers and alloc memory
456 */ 685 */
457 par->vmalloc = vmalloc(fb_size); 686 par->vmalloc = vzalloc(fb_size);
458 if (unlikely(par->vmalloc == NULL)) { 687 if (unlikely(par->vmalloc == NULL)) {
459 ret = -ENOMEM; 688 ret = -ENOMEM;
460 goto err_free; 689 goto err_free;
461 } 690 }
462 691
463 ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
464 if (unlikely(ret != 0))
465 goto err_free;
466
467 ret = ttm_bo_kmap(&par->vmw_bo->base,
468 0,
469 par->vmw_bo->base.num_pages,
470 &par->map);
471 if (unlikely(ret != 0))
472 goto err_unref;
473 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
474 par->bo_size = fb_size;
475
476 /* 692 /*
477 * Fixed and var 693 * Fixed and var
478 */ 694 */
@@ -490,7 +706,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
490 info->fix.smem_len = fb_size; 706 info->fix.smem_len = fb_size;
491 707
492 info->pseudo_palette = par->pseudo_palette; 708 info->pseudo_palette = par->pseudo_palette;
493 info->screen_base = par->vmalloc; 709 info->screen_base = (char __iomem *)par->vmalloc;
494 info->screen_size = fb_size; 710 info->screen_size = fb_size;
495 711
496 info->flags = FBINFO_DEFAULT; 712 info->flags = FBINFO_DEFAULT;
@@ -508,18 +724,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
508 724
509 info->var.xres_virtual = fb_width; 725 info->var.xres_virtual = fb_width;
510 info->var.yres_virtual = fb_height; 726 info->var.yres_virtual = fb_height;
511 info->var.bits_per_pixel = par->bpp; 727 info->var.bits_per_pixel = fb_bpp;
512 info->var.xoffset = 0; 728 info->var.xoffset = 0;
513 info->var.yoffset = 0; 729 info->var.yoffset = 0;
514 info->var.activate = FB_ACTIVATE_NOW; 730 info->var.activate = FB_ACTIVATE_NOW;
515 info->var.height = -1; 731 info->var.height = -1;
516 info->var.width = -1; 732 info->var.width = -1;
517 733
518 info->var.xres = initial_width;
519 info->var.yres = initial_height;
520
521 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 734 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
522
523 info->apertures = alloc_apertures(1); 735 info->apertures = alloc_apertures(1);
524 if (!info->apertures) { 736 if (!info->apertures) {
525 ret = -ENOMEM; 737 ret = -ENOMEM;
@@ -535,6 +747,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
535 par->dirty.y1 = par->dirty.y2 = 0; 747 par->dirty.y1 = par->dirty.y2 = 0;
536 par->dirty.active = true; 748 par->dirty.active = true;
537 spin_lock_init(&par->dirty.lock); 749 spin_lock_init(&par->dirty.lock);
750 mutex_init(&par->bo_mutex);
538 info->fbdefio = &vmw_defio; 751 info->fbdefio = &vmw_defio;
539 fb_deferred_io_init(info); 752 fb_deferred_io_init(info);
540 753
@@ -542,16 +755,16 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
542 if (unlikely(ret != 0)) 755 if (unlikely(ret != 0))
543 goto err_defio; 756 goto err_defio;
544 757
758 vmw_fb_set_par(info);
759
545 return 0; 760 return 0;
546 761
547err_defio: 762err_defio:
548 fb_deferred_io_cleanup(info); 763 fb_deferred_io_cleanup(info);
549err_aper: 764err_aper:
550 ttm_bo_kunmap(&par->map);
551err_unref:
552 ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
553err_free: 765err_free:
554 vfree(par->vmalloc); 766 vfree(par->vmalloc);
767err_kms:
555 framebuffer_release(info); 768 framebuffer_release(info);
556 vmw_priv->fb_info = NULL; 769 vmw_priv->fb_info = NULL;
557 770
@@ -562,22 +775,18 @@ int vmw_fb_close(struct vmw_private *vmw_priv)
562{ 775{
563 struct fb_info *info; 776 struct fb_info *info;
564 struct vmw_fb_par *par; 777 struct vmw_fb_par *par;
565 struct ttm_buffer_object *bo;
566 778
567 if (!vmw_priv->fb_info) 779 if (!vmw_priv->fb_info)
568 return 0; 780 return 0;
569 781
570 info = vmw_priv->fb_info; 782 info = vmw_priv->fb_info;
571 par = info->par; 783 par = info->par;
572 bo = &par->vmw_bo->base;
573 par->vmw_bo = NULL;
574 784
575 /* ??? order */ 785 /* ??? order */
576 fb_deferred_io_cleanup(info); 786 fb_deferred_io_cleanup(info);
577 unregister_framebuffer(info); 787 unregister_framebuffer(info);
578 788
579 ttm_bo_kunmap(&par->map); 789 (void) vmw_fb_kms_detach(par, true, true);
580 ttm_bo_unref(&bo);
581 790
582 vfree(par->vmalloc); 791 vfree(par->vmalloc);
583 framebuffer_release(info); 792 framebuffer_release(info);
@@ -603,10 +812,9 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
603 812
604 flush_delayed_work(&info->deferred_work); 813 flush_delayed_work(&info->deferred_work);
605 814
606 par->bo_ptr = NULL; 815 mutex_lock(&par->bo_mutex);
607 ttm_bo_kunmap(&par->map); 816 (void) vmw_fb_kms_detach(par, true, false);
608 817 mutex_unlock(&par->bo_mutex);
609 vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
610 818
611 return 0; 819 return 0;
612} 820}
@@ -616,8 +824,6 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
616 struct fb_info *info; 824 struct fb_info *info;
617 struct vmw_fb_par *par; 825 struct vmw_fb_par *par;
618 unsigned long flags; 826 unsigned long flags;
619 bool dummy;
620 int ret;
621 827
622 if (!vmw_priv->fb_info) 828 if (!vmw_priv->fb_info)
623 return -EINVAL; 829 return -EINVAL;
@@ -625,38 +831,10 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
625 info = vmw_priv->fb_info; 831 info = vmw_priv->fb_info;
626 par = info->par; 832 par = info->par;
627 833
628 /* we are already active */ 834 vmw_fb_set_par(info);
629 if (par->bo_ptr != NULL)
630 return 0;
631
632 /* Make sure that all overlays are stoped when we take over */
633 vmw_overlay_stop_all(vmw_priv);
634
635 ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
636 if (unlikely(ret != 0)) {
637 DRM_ERROR("could not move buffer to start of VRAM\n");
638 goto err_no_buffer;
639 }
640
641 ret = ttm_bo_kmap(&par->vmw_bo->base,
642 0,
643 par->vmw_bo->base.num_pages,
644 &par->map);
645 BUG_ON(ret != 0);
646 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
647
648 spin_lock_irqsave(&par->dirty.lock, flags); 835 spin_lock_irqsave(&par->dirty.lock, flags);
649 par->dirty.active = true; 836 par->dirty.active = true;
650 spin_unlock_irqrestore(&par->dirty.lock, flags); 837 spin_unlock_irqrestore(&par->dirty.lock, flags);
651 838
652err_no_buffer:
653 vmw_fb_set_par(info);
654
655 vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
656
657 /* If there already was stuff dirty we wont
658 * schedule a new work, so lets do it now */
659 schedule_delayed_work(&info->deferred_work, 0);
660
661 return 0; 839 return 0;
662} 840}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 945f1e0dad92..567ddede51d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -142,7 +142,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
142 struct vmw_fence_manager *fman = fman_from_fence(fence); 142 struct vmw_fence_manager *fman = fman_from_fence(fence);
143 struct vmw_private *dev_priv = fman->dev_priv; 143 struct vmw_private *dev_priv = fman->dev_priv;
144 144
145 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 145 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
146 u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 146 u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
147 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) 147 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
148 return false; 148 return false;
@@ -386,7 +386,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
386 u32 passed_seqno) 386 u32 passed_seqno)
387{ 387{
388 u32 goal_seqno; 388 u32 goal_seqno;
389 __le32 __iomem *fifo_mem; 389 u32 __iomem *fifo_mem;
390 struct vmw_fence_obj *fence; 390 struct vmw_fence_obj *fence;
391 391
392 if (likely(!fman->seqno_valid)) 392 if (likely(!fman->seqno_valid))
@@ -430,7 +430,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
430{ 430{
431 struct vmw_fence_manager *fman = fman_from_fence(fence); 431 struct vmw_fence_manager *fman = fman_from_fence(fence);
432 u32 goal_seqno; 432 u32 goal_seqno;
433 __le32 __iomem *fifo_mem; 433 u32 __iomem *fifo_mem;
434 434
435 if (fence_is_signaled_locked(&fence->base)) 435 if (fence_is_signaled_locked(&fence->base))
436 return false; 436 return false;
@@ -453,7 +453,7 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
453 struct list_head action_list; 453 struct list_head action_list;
454 bool needs_rerun; 454 bool needs_rerun;
455 uint32_t seqno, new_seqno; 455 uint32_t seqno, new_seqno;
456 __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt; 456 u32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
457 457
458 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 458 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
459rerun: 459rerun:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 26a4add39208..8be6c29f5eb5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2011-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 39f2b03888e7..80c40c31d4f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,9 +29,14 @@
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include <drm/ttm/ttm_placement.h> 30#include <drm/ttm/ttm_placement.h>
31 31
32struct vmw_temp_set_context {
33 SVGA3dCmdHeader header;
34 SVGA3dCmdDXTempSetContext body;
35};
36
32bool vmw_fifo_have_3d(struct vmw_private *dev_priv) 37bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
33{ 38{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 39 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t fifo_min, hwversion; 40 uint32_t fifo_min, hwversion;
36 const struct vmw_fifo_state *fifo = &dev_priv->fifo; 41 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
37 42
@@ -71,8 +76,8 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
71 if (hwversion < SVGA3D_HWVERSION_WS8_B1) 76 if (hwversion < SVGA3D_HWVERSION_WS8_B1)
72 return false; 77 return false;
73 78
74 /* Non-Screen Object path does not support surfaces */ 79 /* Legacy Display Unit does not support surfaces */
75 if (!dev_priv->sou_priv) 80 if (dev_priv->active_display_unit == vmw_du_legacy)
76 return false; 81 return false;
77 82
78 return true; 83 return true;
@@ -80,7 +85,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
80 85
81bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) 86bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
82{ 87{
83 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 88 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
84 uint32_t caps; 89 uint32_t caps;
85 90
86 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) 91 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
@@ -95,11 +100,11 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
95 100
96int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 101int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
97{ 102{
98 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 103 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
99 uint32_t max; 104 uint32_t max;
100 uint32_t min; 105 uint32_t min;
101 uint32_t dummy;
102 106
107 fifo->dx = false;
103 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; 108 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
104 fifo->static_buffer = vmalloc(fifo->static_buffer_size); 109 fifo->static_buffer = vmalloc(fifo->static_buffer_size);
105 if (unlikely(fifo->static_buffer == NULL)) 110 if (unlikely(fifo->static_buffer == NULL))
@@ -112,10 +117,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
112 mutex_init(&fifo->fifo_mutex); 117 mutex_init(&fifo->fifo_mutex);
113 init_rwsem(&fifo->rwsem); 118 init_rwsem(&fifo->rwsem);
114 119
115 /*
116 * Allow mapping the first page read-only to user-space.
117 */
118
119 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH)); 120 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
120 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); 121 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
121 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); 122 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
@@ -123,7 +124,10 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
123 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 124 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
124 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 125 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
125 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); 126 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
126 vmw_write(dev_priv, SVGA_REG_ENABLE, 1); 127
128 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
129 SVGA_REG_ENABLE_HIDE);
130 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
127 131
128 min = 4; 132 min = 4;
129 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO) 133 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
@@ -155,12 +159,13 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
155 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); 159 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
156 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE); 160 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
157 vmw_marker_queue_init(&fifo->marker_queue); 161 vmw_marker_queue_init(&fifo->marker_queue);
158 return vmw_fifo_send_fence(dev_priv, &dummy); 162
163 return 0;
159} 164}
160 165
161void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) 166void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
162{ 167{
163 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 168 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
164 static DEFINE_SPINLOCK(ping_lock); 169 static DEFINE_SPINLOCK(ping_lock);
165 unsigned long irq_flags; 170 unsigned long irq_flags;
166 171
@@ -178,7 +183,7 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
178 183
179void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 184void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
180{ 185{
181 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 186 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
182 187
183 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 188 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
184 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) 189 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
@@ -208,7 +213,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
208 213
209static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) 214static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
210{ 215{
211 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 216 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
212 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); 217 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
213 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 218 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
214 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); 219 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -312,10 +317,11 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
312 * Returns: 317 * Returns:
313 * Pointer to the fifo, or null on error (possible hardware hang). 318 * Pointer to the fifo, or null on error (possible hardware hang).
314 */ 319 */
315void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) 320static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
321 uint32_t bytes)
316{ 322{
317 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 323 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
318 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 324 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
319 uint32_t max; 325 uint32_t max;
320 uint32_t min; 326 uint32_t min;
321 uint32_t next_cmd; 327 uint32_t next_cmd;
@@ -372,7 +378,8 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
372 if (reserveable) 378 if (reserveable)
373 iowrite32(bytes, fifo_mem + 379 iowrite32(bytes, fifo_mem +
374 SVGA_FIFO_RESERVED); 380 SVGA_FIFO_RESERVED);
375 return fifo_mem + (next_cmd >> 2); 381 return (void __force *) (fifo_mem +
382 (next_cmd >> 2));
376 } else { 383 } else {
377 need_bounce = true; 384 need_bounce = true;
378 } 385 }
@@ -391,11 +398,36 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
391out_err: 398out_err:
392 fifo_state->reserved_size = 0; 399 fifo_state->reserved_size = 0;
393 mutex_unlock(&fifo_state->fifo_mutex); 400 mutex_unlock(&fifo_state->fifo_mutex);
401
394 return NULL; 402 return NULL;
395} 403}
396 404
405void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
406 int ctx_id)
407{
408 void *ret;
409
410 if (dev_priv->cman)
411 ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
412 ctx_id, false, NULL);
413 else if (ctx_id == SVGA3D_INVALID_ID)
414 ret = vmw_local_fifo_reserve(dev_priv, bytes);
415 else {
416 WARN_ON("Command buffer has not been allocated.\n");
417 ret = NULL;
418 }
419 if (IS_ERR_OR_NULL(ret)) {
420 DRM_ERROR("Fifo reserve failure of %u bytes.\n",
421 (unsigned) bytes);
422 dump_stack();
423 return NULL;
424 }
425
426 return ret;
427}
428
397static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, 429static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
398 __le32 __iomem *fifo_mem, 430 u32 __iomem *fifo_mem,
399 uint32_t next_cmd, 431 uint32_t next_cmd,
400 uint32_t max, uint32_t min, uint32_t bytes) 432 uint32_t max, uint32_t min, uint32_t bytes)
401{ 433{
@@ -417,7 +449,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
417} 449}
418 450
419static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, 451static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
420 __le32 __iomem *fifo_mem, 452 u32 __iomem *fifo_mem,
421 uint32_t next_cmd, 453 uint32_t next_cmd,
422 uint32_t max, uint32_t min, uint32_t bytes) 454 uint32_t max, uint32_t min, uint32_t bytes)
423{ 455{
@@ -436,15 +468,19 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
436 } 468 }
437} 469}
438 470
439void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) 471static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
440{ 472{
441 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 473 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
442 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 474 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
443 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 475 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
444 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); 476 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
445 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); 477 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
446 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 478 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
447 479
480 if (fifo_state->dx)
481 bytes += sizeof(struct vmw_temp_set_context);
482
483 fifo_state->dx = false;
448 BUG_ON((bytes & 3) != 0); 484 BUG_ON((bytes & 3) != 0);
449 BUG_ON(bytes > fifo_state->reserved_size); 485 BUG_ON(bytes > fifo_state->reserved_size);
450 486
@@ -482,13 +518,53 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
482 mutex_unlock(&fifo_state->fifo_mutex); 518 mutex_unlock(&fifo_state->fifo_mutex);
483} 519}
484 520
521void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
522{
523 if (dev_priv->cman)
524 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
525 else
526 vmw_local_fifo_commit(dev_priv, bytes);
527}
528
529
530/**
531 * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
532 *
533 * @dev_priv: Pointer to device private structure.
534 * @bytes: Number of bytes to commit.
535 */
536void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
537{
538 if (dev_priv->cman)
539 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
540 else
541 vmw_local_fifo_commit(dev_priv, bytes);
542}
543
544/**
545 * vmw_fifo_flush - Flush any buffered commands and make sure command processing
546 * starts.
547 *
548 * @dev_priv: Pointer to device private structure.
549 * @interruptible: Whether to wait interruptible if function needs to sleep.
550 */
551int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
552{
553 might_sleep();
554
555 if (dev_priv->cman)
556 return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
557 else
558 return 0;
559}
560
485int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) 561int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
486{ 562{
487 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 563 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
488 struct svga_fifo_cmd_fence *cmd_fence; 564 struct svga_fifo_cmd_fence *cmd_fence;
489 void *fm; 565 u32 *fm;
490 int ret = 0; 566 int ret = 0;
491 uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence); 567 uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
492 568
493 fm = vmw_fifo_reserve(dev_priv, bytes); 569 fm = vmw_fifo_reserve(dev_priv, bytes);
494 if (unlikely(fm == NULL)) { 570 if (unlikely(fm == NULL)) {
@@ -514,12 +590,10 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
514 return 0; 590 return 0;
515 } 591 }
516 592
517 *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE); 593 *fm++ = SVGA_CMD_FENCE;
518 cmd_fence = (struct svga_fifo_cmd_fence *) 594 cmd_fence = (struct svga_fifo_cmd_fence *) fm;
519 ((unsigned long)fm + sizeof(__le32)); 595 cmd_fence->fence = *seqno;
520 596 vmw_fifo_commit_flush(dev_priv, bytes);
521 iowrite32(*seqno, &cmd_fence->fence);
522 vmw_fifo_commit(dev_priv, bytes);
523 (void) vmw_marker_push(&fifo_state->marker_queue, *seqno); 597 (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
524 vmw_update_seqno(dev_priv, fifo_state); 598 vmw_update_seqno(dev_priv, fifo_state);
525 599
@@ -545,7 +619,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
545 * without writing to the query result structure. 619 * without writing to the query result structure.
546 */ 620 */
547 621
548 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; 622 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
549 struct { 623 struct {
550 SVGA3dCmdHeader header; 624 SVGA3dCmdHeader header;
551 SVGA3dCmdWaitForQuery body; 625 SVGA3dCmdWaitForQuery body;
@@ -594,7 +668,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
594 * without writing to the query result structure. 668 * without writing to the query result structure.
595 */ 669 */
596 670
597 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; 671 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
598 struct { 672 struct {
599 SVGA3dCmdHeader header; 673 SVGA3dCmdHeader header;
600 SVGA3dCmdWaitForGBQuery body; 674 SVGA3dCmdWaitForGBQuery body;
@@ -647,3 +721,8 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
647 721
648 return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); 722 return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
649} 723}
724
725void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
726{
727 return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
728}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 61d8d803199f..66ffa1d4759c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 69c8ce23123c..0a970afed93b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,6 +28,7 @@
28#include "vmwgfx_drv.h" 28#include "vmwgfx_drv.h"
29#include <drm/vmwgfx_drm.h> 29#include <drm/vmwgfx_drm.h>
30#include "vmwgfx_kms.h" 30#include "vmwgfx_kms.h"
31#include "device_include/svga3d_caps.h"
31 32
32struct svga_3d_compat_cap { 33struct svga_3d_compat_cap {
33 SVGA3dCapsRecordHeader header; 34 SVGA3dCapsRecordHeader header;
@@ -63,7 +64,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
63 break; 64 break;
64 case DRM_VMW_PARAM_FIFO_HW_VERSION: 65 case DRM_VMW_PARAM_FIFO_HW_VERSION:
65 { 66 {
66 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 67 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
67 const struct vmw_fifo_state *fifo = &dev_priv->fifo; 68 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
68 69
69 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) { 70 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
@@ -105,6 +106,13 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
105 case DRM_VMW_PARAM_MAX_MOB_SIZE: 106 case DRM_VMW_PARAM_MAX_MOB_SIZE:
106 param->value = dev_priv->max_mob_size; 107 param->value = dev_priv->max_mob_size;
107 break; 108 break;
109 case DRM_VMW_PARAM_SCREEN_TARGET:
110 param->value =
111 (dev_priv->active_display_unit == vmw_du_screen_target);
112 break;
113 case DRM_VMW_PARAM_DX:
114 param->value = dev_priv->has_dx;
115 break;
108 default: 116 default:
109 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 117 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
110 param->param); 118 param->param);
@@ -154,7 +162,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
154 (struct drm_vmw_get_3d_cap_arg *) data; 162 (struct drm_vmw_get_3d_cap_arg *) data;
155 struct vmw_private *dev_priv = vmw_priv(dev); 163 struct vmw_private *dev_priv = vmw_priv(dev);
156 uint32_t size; 164 uint32_t size;
157 __le32 __iomem *fifo_mem; 165 u32 __iomem *fifo_mem;
158 void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); 166 void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
159 void *bounce; 167 void *bounce;
160 int ret; 168 int ret;
@@ -235,7 +243,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
235 int ret; 243 int ret;
236 244
237 num_clips = arg->num_clips; 245 num_clips = arg->num_clips;
238 clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; 246 clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
239 247
240 if (unlikely(num_clips == 0)) 248 if (unlikely(num_clips == 0))
241 return 0; 249 return 0;
@@ -318,7 +326,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
318 int ret; 326 int ret;
319 327
320 num_clips = arg->num_clips; 328 num_clips = arg->num_clips;
321 clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; 329 clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
322 330
323 if (unlikely(num_clips == 0)) 331 if (unlikely(num_clips == 0))
324 return 0; 332 return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 9fe9827ee499..9498a5e33c12 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -56,6 +56,9 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
56 if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) 56 if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
57 wake_up_all(&dev_priv->fifo_queue); 57 wake_up_all(&dev_priv->fifo_queue);
58 58
59 if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
60 SVGA_IRQFLAG_ERROR))
61 vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
59 62
60 return IRQ_HANDLED; 63 return IRQ_HANDLED;
61} 64}
@@ -69,7 +72,7 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
69void vmw_update_seqno(struct vmw_private *dev_priv, 72void vmw_update_seqno(struct vmw_private *dev_priv,
70 struct vmw_fifo_state *fifo_state) 73 struct vmw_fifo_state *fifo_state)
71{ 74{
72 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 75 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
73 uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 76 uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
74 77
75 if (dev_priv->last_read_seqno != seqno) { 78 if (dev_priv->last_read_seqno != seqno) {
@@ -131,8 +134,16 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
131 * Block command submission while waiting for idle. 134 * Block command submission while waiting for idle.
132 */ 135 */
133 136
134 if (fifo_idle) 137 if (fifo_idle) {
135 down_read(&fifo_state->rwsem); 138 down_read(&fifo_state->rwsem);
139 if (dev_priv->cman) {
140 ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
141 10*HZ);
142 if (ret)
143 goto out_err;
144 }
145 }
146
136 signal_seq = atomic_read(&dev_priv->marker_seq); 147 signal_seq = atomic_read(&dev_priv->marker_seq);
137 ret = 0; 148 ret = 0;
138 149
@@ -167,10 +178,11 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
167 } 178 }
168 finish_wait(&dev_priv->fence_queue, &__wait); 179 finish_wait(&dev_priv->fence_queue, &__wait);
169 if (ret == 0 && fifo_idle) { 180 if (ret == 0 && fifo_idle) {
170 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 181 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
171 iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE); 182 iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
172 } 183 }
173 wake_up_all(&dev_priv->fence_queue); 184 wake_up_all(&dev_priv->fence_queue);
185out_err:
174 if (fifo_idle) 186 if (fifo_idle)
175 up_read(&fifo_state->rwsem); 187 up_read(&fifo_state->rwsem);
176 188
@@ -315,3 +327,30 @@ void vmw_irq_uninstall(struct drm_device *dev)
315 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 327 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
316 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 328 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
317} 329}
330
331void vmw_generic_waiter_add(struct vmw_private *dev_priv,
332 u32 flag, int *waiter_count)
333{
334 unsigned long irq_flags;
335
336 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
337 if ((*waiter_count)++ == 0) {
338 outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
339 dev_priv->irq_mask |= flag;
340 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
341 }
342 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
343}
344
345void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
346 u32 flag, int *waiter_count)
347{
348 unsigned long irq_flags;
349
350 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
351 if (--(*waiter_count) == 0) {
352 dev_priv->irq_mask &= ~flag;
353 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
354 }
355 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
356}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 2adc11bc0920..61fb7f3de311 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,45 +31,7 @@
31/* Might need a hrtimer here? */ 31/* Might need a hrtimer here? */
32#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) 32#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
33 33
34 34void vmw_du_cleanup(struct vmw_display_unit *du)
35struct vmw_clip_rect {
36 int x1, x2, y1, y2;
37};
38
39/**
40 * Clip @num_rects number of @rects against @clip storing the
41 * results in @out_rects and the number of passed rects in @out_num.
42 */
43static void vmw_clip_cliprects(struct drm_clip_rect *rects,
44 int num_rects,
45 struct vmw_clip_rect clip,
46 SVGASignedRect *out_rects,
47 int *out_num)
48{
49 int i, k;
50
51 for (i = 0, k = 0; i < num_rects; i++) {
52 int x1 = max_t(int, clip.x1, rects[i].x1);
53 int y1 = max_t(int, clip.y1, rects[i].y1);
54 int x2 = min_t(int, clip.x2, rects[i].x2);
55 int y2 = min_t(int, clip.y2, rects[i].y2);
56
57 if (x1 >= x2)
58 continue;
59 if (y1 >= y2)
60 continue;
61
62 out_rects[k].left = x1;
63 out_rects[k].top = y1;
64 out_rects[k].right = x2;
65 out_rects[k].bottom = y2;
66 k++;
67 }
68
69 *out_num = k;
70}
71
72void vmw_display_unit_cleanup(struct vmw_display_unit *du)
73{ 35{
74 if (du->cursor_surface) 36 if (du->cursor_surface)
75 vmw_surface_unreference(&du->cursor_surface); 37 vmw_surface_unreference(&du->cursor_surface);
@@ -109,12 +71,12 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
109 71
110 memcpy(&cmd[1], image, image_size); 72 memcpy(&cmd[1], image, image_size);
111 73
112 cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR); 74 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
113 cmd->cursor.id = cpu_to_le32(0); 75 cmd->cursor.id = 0;
114 cmd->cursor.width = cpu_to_le32(width); 76 cmd->cursor.width = width;
115 cmd->cursor.height = cpu_to_le32(height); 77 cmd->cursor.height = height;
116 cmd->cursor.hotspotX = cpu_to_le32(hotspotX); 78 cmd->cursor.hotspotX = hotspotX;
117 cmd->cursor.hotspotY = cpu_to_le32(hotspotY); 79 cmd->cursor.hotspotY = hotspotY;
118 80
119 vmw_fifo_commit(dev_priv, cmd_size); 81 vmw_fifo_commit(dev_priv, cmd_size);
120 82
@@ -161,7 +123,7 @@ err_unreserve:
161void vmw_cursor_update_position(struct vmw_private *dev_priv, 123void vmw_cursor_update_position(struct vmw_private *dev_priv,
162 bool show, int x, int y) 124 bool show, int x, int y)
163{ 125{
164 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 126 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
165 uint32_t count; 127 uint32_t count;
166 128
167 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON); 129 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
@@ -367,15 +329,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
367 329
368 srf->snooper.age++; 330 srf->snooper.age++;
369 331
370 /* we can't call this function from this function since execbuf has
371 * reserved fifo space.
372 *
373 * if (srf->snooper.crtc)
374 * vmw_ldu_crtc_cursor_update_image(dev_priv,
375 * srf->snooper.image, 64, 64,
376 * du->hotspot_x, du->hotspot_y);
377 */
378
379 ttm_bo_kunmap(&map); 332 ttm_bo_kunmap(&map);
380err_unreserve: 333err_unreserve:
381 ttm_bo_unreserve(bo); 334 ttm_bo_unreserve(bo);
@@ -412,183 +365,19 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
412 * Surface framebuffer code 365 * Surface framebuffer code
413 */ 366 */
414 367
415#define vmw_framebuffer_to_vfbs(x) \
416 container_of(x, struct vmw_framebuffer_surface, base.base)
417
418struct vmw_framebuffer_surface {
419 struct vmw_framebuffer base;
420 struct vmw_surface *surface;
421 struct vmw_dma_buffer *buffer;
422 struct list_head head;
423 struct drm_master *master;
424};
425
426static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) 368static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
427{ 369{
428 struct vmw_framebuffer_surface *vfbs = 370 struct vmw_framebuffer_surface *vfbs =
429 vmw_framebuffer_to_vfbs(framebuffer); 371 vmw_framebuffer_to_vfbs(framebuffer);
430 struct vmw_master *vmaster = vmw_master(vfbs->master);
431
432 372
433 mutex_lock(&vmaster->fb_surf_mutex);
434 list_del(&vfbs->head);
435 mutex_unlock(&vmaster->fb_surf_mutex);
436
437 drm_master_put(&vfbs->master);
438 drm_framebuffer_cleanup(framebuffer); 373 drm_framebuffer_cleanup(framebuffer);
439 vmw_surface_unreference(&vfbs->surface); 374 vmw_surface_unreference(&vfbs->surface);
440 ttm_base_object_unref(&vfbs->base.user_obj); 375 if (vfbs->base.user_obj)
376 ttm_base_object_unref(&vfbs->base.user_obj);
441 377
442 kfree(vfbs); 378 kfree(vfbs);
443} 379}
444 380
445static int do_surface_dirty_sou(struct vmw_private *dev_priv,
446 struct drm_file *file_priv,
447 struct vmw_framebuffer *framebuffer,
448 unsigned flags, unsigned color,
449 struct drm_clip_rect *clips,
450 unsigned num_clips, int inc,
451 struct vmw_fence_obj **out_fence)
452{
453 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
454 struct drm_clip_rect *clips_ptr;
455 struct drm_clip_rect *tmp;
456 struct drm_crtc *crtc;
457 size_t fifo_size;
458 int i, num_units;
459 int ret = 0; /* silence warning */
460 int left, right, top, bottom;
461
462 struct {
463 SVGA3dCmdHeader header;
464 SVGA3dCmdBlitSurfaceToScreen body;
465 } *cmd;
466 SVGASignedRect *blits;
467
468 num_units = 0;
469 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
470 head) {
471 if (crtc->primary->fb != &framebuffer->base)
472 continue;
473 units[num_units++] = vmw_crtc_to_du(crtc);
474 }
475
476 BUG_ON(!clips || !num_clips);
477
478 tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
479 if (unlikely(tmp == NULL)) {
480 DRM_ERROR("Temporary cliprect memory alloc failed.\n");
481 return -ENOMEM;
482 }
483
484 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
485 cmd = kzalloc(fifo_size, GFP_KERNEL);
486 if (unlikely(cmd == NULL)) {
487 DRM_ERROR("Temporary fifo memory alloc failed.\n");
488 ret = -ENOMEM;
489 goto out_free_tmp;
490 }
491
492 /* setup blits pointer */
493 blits = (SVGASignedRect *)&cmd[1];
494
495 /* initial clip region */
496 left = clips->x1;
497 right = clips->x2;
498 top = clips->y1;
499 bottom = clips->y2;
500
501 /* skip the first clip rect */
502 for (i = 1, clips_ptr = clips + inc;
503 i < num_clips; i++, clips_ptr += inc) {
504 left = min_t(int, left, (int)clips_ptr->x1);
505 right = max_t(int, right, (int)clips_ptr->x2);
506 top = min_t(int, top, (int)clips_ptr->y1);
507 bottom = max_t(int, bottom, (int)clips_ptr->y2);
508 }
509
510 /* only need to do this once */
511 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
512 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
513
514 cmd->body.srcRect.left = left;
515 cmd->body.srcRect.right = right;
516 cmd->body.srcRect.top = top;
517 cmd->body.srcRect.bottom = bottom;
518
519 clips_ptr = clips;
520 for (i = 0; i < num_clips; i++, clips_ptr += inc) {
521 tmp[i].x1 = clips_ptr->x1 - left;
522 tmp[i].x2 = clips_ptr->x2 - left;
523 tmp[i].y1 = clips_ptr->y1 - top;
524 tmp[i].y2 = clips_ptr->y2 - top;
525 }
526
527 /* do per unit writing, reuse fifo for each */
528 for (i = 0; i < num_units; i++) {
529 struct vmw_display_unit *unit = units[i];
530 struct vmw_clip_rect clip;
531 int num;
532
533 clip.x1 = left - unit->crtc.x;
534 clip.y1 = top - unit->crtc.y;
535 clip.x2 = right - unit->crtc.x;
536 clip.y2 = bottom - unit->crtc.y;
537
538 /* skip any crtcs that misses the clip region */
539 if (clip.x1 >= unit->crtc.mode.hdisplay ||
540 clip.y1 >= unit->crtc.mode.vdisplay ||
541 clip.x2 <= 0 || clip.y2 <= 0)
542 continue;
543
544 /*
545 * In order for the clip rects to be correctly scaled
546 * the src and dest rects needs to be the same size.
547 */
548 cmd->body.destRect.left = clip.x1;
549 cmd->body.destRect.right = clip.x2;
550 cmd->body.destRect.top = clip.y1;
551 cmd->body.destRect.bottom = clip.y2;
552
553 /* create a clip rect of the crtc in dest coords */
554 clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
555 clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
556 clip.x1 = 0 - clip.x1;
557 clip.y1 = 0 - clip.y1;
558
559 /* need to reset sid as it is changed by execbuf */
560 cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
561 cmd->body.destScreenId = unit->unit;
562
563 /* clip and write blits to cmd stream */
564 vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
565
566 /* if no cliprects hit skip this */
567 if (num == 0)
568 continue;
569
570 /* only return the last fence */
571 if (out_fence && *out_fence)
572 vmw_fence_obj_unreference(out_fence);
573
574 /* recalculate package length */
575 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
576 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
577 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
578 fifo_size, 0, NULL, out_fence);
579
580 if (unlikely(ret != 0))
581 break;
582 }
583
584
585 kfree(cmd);
586out_free_tmp:
587 kfree(tmp);
588
589 return ret;
590}
591
592static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, 381static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
593 struct drm_file *file_priv, 382 struct drm_file *file_priv,
594 unsigned flags, unsigned color, 383 unsigned flags, unsigned color,
@@ -601,11 +390,8 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
601 struct drm_clip_rect norect; 390 struct drm_clip_rect norect;
602 int ret, inc = 1; 391 int ret, inc = 1;
603 392
604 if (unlikely(vfbs->master != file_priv->master)) 393 /* Legacy Display Unit does not support 3D */
605 return -EINVAL; 394 if (dev_priv->active_display_unit == vmw_du_legacy)
606
607 /* Require ScreenObject support for 3D */
608 if (!dev_priv->sou_priv)
609 return -EINVAL; 395 return -EINVAL;
610 396
611 drm_modeset_lock_all(dev_priv->dev); 397 drm_modeset_lock_all(dev_priv->dev);
@@ -627,10 +413,16 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
627 inc = 2; /* skip source rects */ 413 inc = 2; /* skip source rects */
628 } 414 }
629 415
630 ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base, 416 if (dev_priv->active_display_unit == vmw_du_screen_object)
631 flags, color, 417 ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
632 clips, num_clips, inc, NULL); 418 clips, NULL, NULL, 0, 0,
419 num_clips, inc, NULL);
420 else
421 ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
422 clips, NULL, NULL, 0, 0,
423 num_clips, inc, NULL);
633 424
425 vmw_fifo_flush(dev_priv, false);
634 ttm_read_unlock(&dev_priv->reservation_sem); 426 ttm_read_unlock(&dev_priv->reservation_sem);
635 427
636 drm_modeset_unlock_all(dev_priv->dev); 428 drm_modeset_unlock_all(dev_priv->dev);
@@ -638,27 +430,66 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
638 return 0; 430 return 0;
639} 431}
640 432
433/**
434 * vmw_kms_readback - Perform a readback from the screen system to
435 * a dma-buffer backed framebuffer.
436 *
437 * @dev_priv: Pointer to the device private structure.
438 * @file_priv: Pointer to a struct drm_file identifying the caller.
439 * Must be set to NULL if @user_fence_rep is NULL.
440 * @vfb: Pointer to the dma-buffer backed framebuffer.
441 * @user_fence_rep: User-space provided structure for fence information.
442 * Must be set to non-NULL if @file_priv is non-NULL.
443 * @vclips: Array of clip rects.
444 * @num_clips: Number of clip rects in @vclips.
445 *
446 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
447 * interrupted.
448 */
449int vmw_kms_readback(struct vmw_private *dev_priv,
450 struct drm_file *file_priv,
451 struct vmw_framebuffer *vfb,
452 struct drm_vmw_fence_rep __user *user_fence_rep,
453 struct drm_vmw_rect *vclips,
454 uint32_t num_clips)
455{
456 switch (dev_priv->active_display_unit) {
457 case vmw_du_screen_object:
458 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
459 user_fence_rep, vclips, num_clips);
460 case vmw_du_screen_target:
461 return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
462 user_fence_rep, NULL, vclips, num_clips,
463 1, false, true);
464 default:
465 WARN_ONCE(true,
466 "Readback called with invalid display system.\n");
467}
468
469 return -ENOSYS;
470}
471
472
641static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { 473static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
642 .destroy = vmw_framebuffer_surface_destroy, 474 .destroy = vmw_framebuffer_surface_destroy,
643 .dirty = vmw_framebuffer_surface_dirty, 475 .dirty = vmw_framebuffer_surface_dirty,
644}; 476};
645 477
646static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, 478static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
647 struct drm_file *file_priv,
648 struct vmw_surface *surface, 479 struct vmw_surface *surface,
649 struct vmw_framebuffer **out, 480 struct vmw_framebuffer **out,
650 const struct drm_mode_fb_cmd 481 const struct drm_mode_fb_cmd
651 *mode_cmd) 482 *mode_cmd,
483 bool is_dmabuf_proxy)
652 484
653{ 485{
654 struct drm_device *dev = dev_priv->dev; 486 struct drm_device *dev = dev_priv->dev;
655 struct vmw_framebuffer_surface *vfbs; 487 struct vmw_framebuffer_surface *vfbs;
656 enum SVGA3dSurfaceFormat format; 488 enum SVGA3dSurfaceFormat format;
657 struct vmw_master *vmaster = vmw_master(file_priv->master);
658 int ret; 489 int ret;
659 490
660 /* 3D is only supported on HWv8 hosts which supports screen objects */ 491 /* 3D is only supported on HWv8 and newer hosts */
661 if (!dev_priv->sou_priv) 492 if (dev_priv->active_display_unit == vmw_du_legacy)
662 return -ENOSYS; 493 return -ENOSYS;
663 494
664 /* 495 /*
@@ -692,15 +523,16 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
692 case 15: 523 case 15:
693 format = SVGA3D_A1R5G5B5; 524 format = SVGA3D_A1R5G5B5;
694 break; 525 break;
695 case 8:
696 format = SVGA3D_LUMINANCE8;
697 break;
698 default: 526 default:
699 DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth); 527 DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
700 return -EINVAL; 528 return -EINVAL;
701 } 529 }
702 530
703 if (unlikely(format != surface->format)) { 531 /*
532 * For DX, surface format validation is done when surface->scanout
533 * is set.
534 */
535 if (!dev_priv->has_dx && format != surface->format) {
704 DRM_ERROR("Invalid surface format for requested mode.\n"); 536 DRM_ERROR("Invalid surface format for requested mode.\n");
705 return -EINVAL; 537 return -EINVAL;
706 } 538 }
@@ -711,38 +543,27 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
711 goto out_err1; 543 goto out_err1;
712 } 544 }
713 545
714 if (!vmw_surface_reference(surface)) {
715 DRM_ERROR("failed to reference surface %p\n", surface);
716 ret = -EINVAL;
717 goto out_err2;
718 }
719
720 /* XXX get the first 3 from the surface info */ 546 /* XXX get the first 3 from the surface info */
721 vfbs->base.base.bits_per_pixel = mode_cmd->bpp; 547 vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
722 vfbs->base.base.pitches[0] = mode_cmd->pitch; 548 vfbs->base.base.pitches[0] = mode_cmd->pitch;
723 vfbs->base.base.depth = mode_cmd->depth; 549 vfbs->base.base.depth = mode_cmd->depth;
724 vfbs->base.base.width = mode_cmd->width; 550 vfbs->base.base.width = mode_cmd->width;
725 vfbs->base.base.height = mode_cmd->height; 551 vfbs->base.base.height = mode_cmd->height;
726 vfbs->surface = surface; 552 vfbs->surface = vmw_surface_reference(surface);
727 vfbs->base.user_handle = mode_cmd->handle; 553 vfbs->base.user_handle = mode_cmd->handle;
728 vfbs->master = drm_master_get(file_priv->master); 554 vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
729
730 mutex_lock(&vmaster->fb_surf_mutex);
731 list_add_tail(&vfbs->head, &vmaster->fb_surf);
732 mutex_unlock(&vmaster->fb_surf_mutex);
733 555
734 *out = &vfbs->base; 556 *out = &vfbs->base;
735 557
736 ret = drm_framebuffer_init(dev, &vfbs->base.base, 558 ret = drm_framebuffer_init(dev, &vfbs->base.base,
737 &vmw_framebuffer_surface_funcs); 559 &vmw_framebuffer_surface_funcs);
738 if (ret) 560 if (ret)
739 goto out_err3; 561 goto out_err2;
740 562
741 return 0; 563 return 0;
742 564
743out_err3:
744 vmw_surface_unreference(&surface);
745out_err2: 565out_err2:
566 vmw_surface_unreference(&surface);
746 kfree(vfbs); 567 kfree(vfbs);
747out_err1: 568out_err1:
748 return ret; 569 return ret;
@@ -752,14 +573,6 @@ out_err1:
752 * Dmabuf framebuffer code 573 * Dmabuf framebuffer code
753 */ 574 */
754 575
755#define vmw_framebuffer_to_vfbd(x) \
756 container_of(x, struct vmw_framebuffer_dmabuf, base.base)
757
758struct vmw_framebuffer_dmabuf {
759 struct vmw_framebuffer base;
760 struct vmw_dma_buffer *buffer;
761};
762
763static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) 576static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
764{ 577{
765 struct vmw_framebuffer_dmabuf *vfbd = 578 struct vmw_framebuffer_dmabuf *vfbd =
@@ -767,185 +580,12 @@ static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
767 580
768 drm_framebuffer_cleanup(framebuffer); 581 drm_framebuffer_cleanup(framebuffer);
769 vmw_dmabuf_unreference(&vfbd->buffer); 582 vmw_dmabuf_unreference(&vfbd->buffer);
770 ttm_base_object_unref(&vfbd->base.user_obj); 583 if (vfbd->base.user_obj)
584 ttm_base_object_unref(&vfbd->base.user_obj);
771 585
772 kfree(vfbd); 586 kfree(vfbd);
773} 587}
774 588
775static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
776 struct vmw_framebuffer *framebuffer,
777 unsigned flags, unsigned color,
778 struct drm_clip_rect *clips,
779 unsigned num_clips, int increment)
780{
781 size_t fifo_size;
782 int i;
783
784 struct {
785 uint32_t header;
786 SVGAFifoCmdUpdate body;
787 } *cmd;
788
789 fifo_size = sizeof(*cmd) * num_clips;
790 cmd = vmw_fifo_reserve(dev_priv, fifo_size);
791 if (unlikely(cmd == NULL)) {
792 DRM_ERROR("Fifo reserve failed.\n");
793 return -ENOMEM;
794 }
795
796 memset(cmd, 0, fifo_size);
797 for (i = 0; i < num_clips; i++, clips += increment) {
798 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
799 cmd[i].body.x = cpu_to_le32(clips->x1);
800 cmd[i].body.y = cpu_to_le32(clips->y1);
801 cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
802 cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
803 }
804
805 vmw_fifo_commit(dev_priv, fifo_size);
806 return 0;
807}
808
809static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
810 struct vmw_private *dev_priv,
811 struct vmw_framebuffer *framebuffer)
812{
813 int depth = framebuffer->base.depth;
814 size_t fifo_size;
815 int ret;
816
817 struct {
818 uint32_t header;
819 SVGAFifoCmdDefineGMRFB body;
820 } *cmd;
821
822 /* Emulate RGBA support, contrary to svga_reg.h this is not
823 * supported by hosts. This is only a problem if we are reading
824 * this value later and expecting what we uploaded back.
825 */
826 if (depth == 32)
827 depth = 24;
828
829 fifo_size = sizeof(*cmd);
830 cmd = kmalloc(fifo_size, GFP_KERNEL);
831 if (unlikely(cmd == NULL)) {
832 DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
833 return -ENOMEM;
834 }
835
836 memset(cmd, 0, fifo_size);
837 cmd->header = SVGA_CMD_DEFINE_GMRFB;
838 cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
839 cmd->body.format.colorDepth = depth;
840 cmd->body.format.reserved = 0;
841 cmd->body.bytesPerLine = framebuffer->base.pitches[0];
842 cmd->body.ptr.gmrId = framebuffer->user_handle;
843 cmd->body.ptr.offset = 0;
844
845 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
846 fifo_size, 0, NULL, NULL);
847
848 kfree(cmd);
849
850 return ret;
851}
852
853static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
854 struct vmw_private *dev_priv,
855 struct vmw_framebuffer *framebuffer,
856 unsigned flags, unsigned color,
857 struct drm_clip_rect *clips,
858 unsigned num_clips, int increment,
859 struct vmw_fence_obj **out_fence)
860{
861 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
862 struct drm_clip_rect *clips_ptr;
863 int i, k, num_units, ret;
864 struct drm_crtc *crtc;
865 size_t fifo_size;
866
867 struct {
868 uint32_t header;
869 SVGAFifoCmdBlitGMRFBToScreen body;
870 } *blits;
871
872 ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
873 if (unlikely(ret != 0))
874 return ret; /* define_gmrfb prints warnings */
875
876 fifo_size = sizeof(*blits) * num_clips;
877 blits = kmalloc(fifo_size, GFP_KERNEL);
878 if (unlikely(blits == NULL)) {
879 DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
880 return -ENOMEM;
881 }
882
883 num_units = 0;
884 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
885 if (crtc->primary->fb != &framebuffer->base)
886 continue;
887 units[num_units++] = vmw_crtc_to_du(crtc);
888 }
889
890 for (k = 0; k < num_units; k++) {
891 struct vmw_display_unit *unit = units[k];
892 int hit_num = 0;
893
894 clips_ptr = clips;
895 for (i = 0; i < num_clips; i++, clips_ptr += increment) {
896 int clip_x1 = clips_ptr->x1 - unit->crtc.x;
897 int clip_y1 = clips_ptr->y1 - unit->crtc.y;
898 int clip_x2 = clips_ptr->x2 - unit->crtc.x;
899 int clip_y2 = clips_ptr->y2 - unit->crtc.y;
900 int move_x, move_y;
901
902 /* skip any crtcs that misses the clip region */
903 if (clip_x1 >= unit->crtc.mode.hdisplay ||
904 clip_y1 >= unit->crtc.mode.vdisplay ||
905 clip_x2 <= 0 || clip_y2 <= 0)
906 continue;
907
908 /* clip size to crtc size */
909 clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
910 clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
911
912 /* translate both src and dest to bring clip into screen */
913 move_x = min_t(int, clip_x1, 0);
914 move_y = min_t(int, clip_y1, 0);
915
916 /* actual translate done here */
917 blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
918 blits[hit_num].body.destScreenId = unit->unit;
919 blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
920 blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
921 blits[hit_num].body.destRect.left = clip_x1 - move_x;
922 blits[hit_num].body.destRect.top = clip_y1 - move_y;
923 blits[hit_num].body.destRect.right = clip_x2;
924 blits[hit_num].body.destRect.bottom = clip_y2;
925 hit_num++;
926 }
927
928 /* no clips hit the crtc */
929 if (hit_num == 0)
930 continue;
931
932 /* only return the last fence */
933 if (out_fence && *out_fence)
934 vmw_fence_obj_unreference(out_fence);
935
936 fifo_size = sizeof(*blits) * hit_num;
937 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
938 fifo_size, 0, NULL, out_fence);
939
940 if (unlikely(ret != 0))
941 break;
942 }
943
944 kfree(blits);
945
946 return ret;
947}
948
949static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, 589static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
950 struct drm_file *file_priv, 590 struct drm_file *file_priv,
951 unsigned flags, unsigned color, 591 unsigned flags, unsigned color,
@@ -977,16 +617,29 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
977 increment = 2; 617 increment = 2;
978 } 618 }
979 619
980 if (dev_priv->ldu_priv) { 620 switch (dev_priv->active_display_unit) {
981 ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base, 621 case vmw_du_screen_target:
982 flags, color, 622 ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
983 clips, num_clips, increment); 623 clips, NULL, num_clips, increment,
984 } else { 624 true, true);
985 ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base, 625 break;
986 flags, color, 626 case vmw_du_screen_object:
987 clips, num_clips, increment, NULL); 627 ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
628 clips, num_clips, increment,
629 true,
630 NULL);
631 break;
632 case vmw_du_legacy:
633 ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
634 clips, num_clips, increment);
635 break;
636 default:
637 ret = -EINVAL;
638 WARN_ONCE(true, "Dirty called with invalid display system.\n");
639 break;
988 } 640 }
989 641
642 vmw_fifo_flush(dev_priv, false);
990 ttm_read_unlock(&dev_priv->reservation_sem); 643 ttm_read_unlock(&dev_priv->reservation_sem);
991 644
992 drm_modeset_unlock_all(dev_priv->dev); 645 drm_modeset_unlock_all(dev_priv->dev);
@@ -1002,41 +655,133 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
1002/** 655/**
1003 * Pin the dmabuffer to the start of vram. 656 * Pin the dmabuffer to the start of vram.
1004 */ 657 */
1005static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) 658static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1006{ 659{
1007 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 660 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1008 struct vmw_framebuffer_dmabuf *vfbd = 661 struct vmw_dma_buffer *buf;
1009 vmw_framebuffer_to_vfbd(&vfb->base);
1010 int ret; 662 int ret;
1011 663
1012 /* This code should not be used with screen objects */ 664 buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1013 BUG_ON(dev_priv->sou_priv); 665 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1014
1015 vmw_overlay_pause_all(dev_priv);
1016 666
1017 ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false); 667 if (!buf)
1018 668 return 0;
1019 vmw_overlay_resume_all(dev_priv);
1020 669
1021 WARN_ON(ret != 0); 670 switch (dev_priv->active_display_unit) {
671 case vmw_du_legacy:
672 vmw_overlay_pause_all(dev_priv);
673 ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
674 vmw_overlay_resume_all(dev_priv);
675 break;
676 case vmw_du_screen_object:
677 case vmw_du_screen_target:
678 if (vfb->dmabuf)
679 return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
680 false);
681
682 return vmw_dmabuf_pin_in_placement(dev_priv, buf,
683 &vmw_mob_placement, false);
684 default:
685 return -EINVAL;
686 }
1022 687
1023 return 0; 688 return ret;
1024} 689}
1025 690
1026static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb) 691static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
1027{ 692{
1028 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 693 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1029 struct vmw_framebuffer_dmabuf *vfbd = 694 struct vmw_dma_buffer *buf;
1030 vmw_framebuffer_to_vfbd(&vfb->base);
1031 695
1032 if (!vfbd->buffer) { 696 buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1033 WARN_ON(!vfbd->buffer); 697 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
698
699 if (WARN_ON(!buf))
1034 return 0; 700 return 0;
701
702 return vmw_dmabuf_unpin(dev_priv, buf, false);
703}
704
705/**
706 * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
707 *
708 * @dev: DRM device
709 * @mode_cmd: parameters for the new surface
710 * @dmabuf_mob: MOB backing the DMA buf
711 * @srf_out: newly created surface
712 *
713 * When the content FB is a DMA buf, we create a surface as a proxy to the
714 * same buffer. This way we can do a surface copy rather than a surface DMA.
715 * This is a more efficient approach
716 *
717 * RETURNS:
718 * 0 on success, error code otherwise
719 */
720static int vmw_create_dmabuf_proxy(struct drm_device *dev,
721 const struct drm_mode_fb_cmd *mode_cmd,
722 struct vmw_dma_buffer *dmabuf_mob,
723 struct vmw_surface **srf_out)
724{
725 uint32_t format;
726 struct drm_vmw_size content_base_size;
727 struct vmw_resource *res;
728 int ret;
729
730 switch (mode_cmd->depth) {
731 case 32:
732 case 24:
733 format = SVGA3D_X8R8G8B8;
734 break;
735
736 case 16:
737 case 15:
738 format = SVGA3D_R5G6B5;
739 break;
740
741 case 8:
742 format = SVGA3D_P8;
743 break;
744
745 default:
746 DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
747 return -EINVAL;
1035 } 748 }
1036 749
1037 return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false); 750 content_base_size.width = mode_cmd->width;
751 content_base_size.height = mode_cmd->height;
752 content_base_size.depth = 1;
753
754 ret = vmw_surface_gb_priv_define(dev,
755 0, /* kernel visible only */
756 0, /* flags */
757 format,
758 true, /* can be a scanout buffer */
759 1, /* num of mip levels */
760 0,
761 0,
762 content_base_size,
763 srf_out);
764 if (ret) {
765 DRM_ERROR("Failed to allocate proxy content buffer\n");
766 return ret;
767 }
768
769 res = &(*srf_out)->res;
770
771 /* Reserve and switch the backing mob. */
772 mutex_lock(&res->dev_priv->cmdbuf_mutex);
773 (void) vmw_resource_reserve(res, false, true);
774 vmw_dmabuf_unreference(&res->backup);
775 res->backup = vmw_dmabuf_reference(dmabuf_mob);
776 res->backup_offset = 0;
777 vmw_resource_unreserve(res, false, NULL, 0);
778 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
779
780 return 0;
1038} 781}
1039 782
783
784
1040static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, 785static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
1041 struct vmw_dma_buffer *dmabuf, 786 struct vmw_dma_buffer *dmabuf,
1042 struct vmw_framebuffer **out, 787 struct vmw_framebuffer **out,
@@ -1057,7 +802,7 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
1057 } 802 }
1058 803
1059 /* Limited framebuffer color depth support for screen objects */ 804 /* Limited framebuffer color depth support for screen objects */
1060 if (dev_priv->sou_priv) { 805 if (dev_priv->active_display_unit == vmw_du_screen_object) {
1061 switch (mode_cmd->depth) { 806 switch (mode_cmd->depth) {
1062 case 32: 807 case 32:
1063 case 24: 808 case 24:
@@ -1089,41 +834,96 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
1089 goto out_err1; 834 goto out_err1;
1090 } 835 }
1091 836
1092 if (!vmw_dmabuf_reference(dmabuf)) {
1093 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
1094 ret = -EINVAL;
1095 goto out_err2;
1096 }
1097
1098 vfbd->base.base.bits_per_pixel = mode_cmd->bpp; 837 vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
1099 vfbd->base.base.pitches[0] = mode_cmd->pitch; 838 vfbd->base.base.pitches[0] = mode_cmd->pitch;
1100 vfbd->base.base.depth = mode_cmd->depth; 839 vfbd->base.base.depth = mode_cmd->depth;
1101 vfbd->base.base.width = mode_cmd->width; 840 vfbd->base.base.width = mode_cmd->width;
1102 vfbd->base.base.height = mode_cmd->height; 841 vfbd->base.base.height = mode_cmd->height;
1103 if (!dev_priv->sou_priv) {
1104 vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
1105 vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
1106 }
1107 vfbd->base.dmabuf = true; 842 vfbd->base.dmabuf = true;
1108 vfbd->buffer = dmabuf; 843 vfbd->buffer = vmw_dmabuf_reference(dmabuf);
1109 vfbd->base.user_handle = mode_cmd->handle; 844 vfbd->base.user_handle = mode_cmd->handle;
1110 *out = &vfbd->base; 845 *out = &vfbd->base;
1111 846
1112 ret = drm_framebuffer_init(dev, &vfbd->base.base, 847 ret = drm_framebuffer_init(dev, &vfbd->base.base,
1113 &vmw_framebuffer_dmabuf_funcs); 848 &vmw_framebuffer_dmabuf_funcs);
1114 if (ret) 849 if (ret)
1115 goto out_err3; 850 goto out_err2;
1116 851
1117 return 0; 852 return 0;
1118 853
1119out_err3:
1120 vmw_dmabuf_unreference(&dmabuf);
1121out_err2: 854out_err2:
855 vmw_dmabuf_unreference(&dmabuf);
1122 kfree(vfbd); 856 kfree(vfbd);
1123out_err1: 857out_err1:
1124 return ret; 858 return ret;
1125} 859}
1126 860
861/**
862 * vmw_kms_new_framebuffer - Create a new framebuffer.
863 *
864 * @dev_priv: Pointer to device private struct.
865 * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
866 * Either @dmabuf or @surface must be NULL.
867 * @surface: Pointer to a surface to wrap the kms framebuffer around.
868 * Either @dmabuf or @surface must be NULL.
869 * @only_2d: No presents will occur to this dma buffer based framebuffer. This
870 * Helps the code to do some important optimizations.
871 * @mode_cmd: Frame-buffer metadata.
872 */
873struct vmw_framebuffer *
874vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
875 struct vmw_dma_buffer *dmabuf,
876 struct vmw_surface *surface,
877 bool only_2d,
878 const struct drm_mode_fb_cmd *mode_cmd)
879{
880 struct vmw_framebuffer *vfb = NULL;
881 bool is_dmabuf_proxy = false;
882 int ret;
883
884 /*
885 * We cannot use the SurfaceDMA command in an non-accelerated VM,
886 * therefore, wrap the DMA buf in a surface so we can use the
887 * SurfaceCopy command.
888 */
889 if (dmabuf && only_2d &&
890 dev_priv->active_display_unit == vmw_du_screen_target) {
891 ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
892 dmabuf, &surface);
893 if (ret)
894 return ERR_PTR(ret);
895
896 is_dmabuf_proxy = true;
897 }
898
899 /* Create the new framebuffer depending one what we have */
900 if (surface) {
901 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
902 mode_cmd,
903 is_dmabuf_proxy);
904
905 /*
906 * vmw_create_dmabuf_proxy() adds a reference that is no longer
907 * needed
908 */
909 if (is_dmabuf_proxy)
910 vmw_surface_unreference(&surface);
911 } else if (dmabuf) {
912 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
913 mode_cmd);
914 } else {
915 BUG();
916 }
917
918 if (ret)
919 return ERR_PTR(ret);
920
921 vfb->pin = vmw_framebuffer_pin;
922 vfb->unpin = vmw_framebuffer_unpin;
923
924 return vfb;
925}
926
1127/* 927/*
1128 * Generic Kernel modesetting functions 928 * Generic Kernel modesetting functions
1129 */ 929 */
@@ -1157,7 +957,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1157 if (!vmw_kms_validate_mode_vram(dev_priv, 957 if (!vmw_kms_validate_mode_vram(dev_priv,
1158 mode_cmd.pitch, 958 mode_cmd.pitch,
1159 mode_cmd.height)) { 959 mode_cmd.height)) {
1160 DRM_ERROR("VRAM size is too small for requested mode.\n"); 960 DRM_ERROR("Requested mode exceed bounding box limit.\n");
1161 return ERR_PTR(-ENOMEM); 961 return ERR_PTR(-ENOMEM);
1162 } 962 }
1163 963
@@ -1187,15 +987,13 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1187 if (ret) 987 if (ret)
1188 goto err_out; 988 goto err_out;
1189 989
1190 /* Create the new framebuffer depending one what we got back */ 990 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1191 if (bo) 991 !(dev_priv->capabilities & SVGA_CAP_3D),
1192 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, 992 &mode_cmd);
1193 &mode_cmd); 993 if (IS_ERR(vfb)) {
1194 else if (surface) 994 ret = PTR_ERR(vfb);
1195 ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, 995 goto err_out;
1196 surface, &vfb, &mode_cmd); 996 }
1197 else
1198 BUG();
1199 997
1200err_out: 998err_out:
1201 /* vmw_user_lookup_handle takes one ref so does new_fb */ 999 /* vmw_user_lookup_handle takes one ref so does new_fb */
@@ -1218,6 +1016,21 @@ static const struct drm_mode_config_funcs vmw_kms_funcs = {
1218 .fb_create = vmw_kms_fb_create, 1016 .fb_create = vmw_kms_fb_create,
1219}; 1017};
1220 1018
1019static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1020 struct drm_file *file_priv,
1021 struct vmw_framebuffer *vfb,
1022 struct vmw_surface *surface,
1023 uint32_t sid,
1024 int32_t destX, int32_t destY,
1025 struct drm_vmw_rect *clips,
1026 uint32_t num_clips)
1027{
1028 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1029 &surface->res, destX, destY,
1030 num_clips, 1, NULL);
1031}
1032
1033
1221int vmw_kms_present(struct vmw_private *dev_priv, 1034int vmw_kms_present(struct vmw_private *dev_priv,
1222 struct drm_file *file_priv, 1035 struct drm_file *file_priv,
1223 struct vmw_framebuffer *vfb, 1036 struct vmw_framebuffer *vfb,
@@ -1227,238 +1040,31 @@ int vmw_kms_present(struct vmw_private *dev_priv,
1227 struct drm_vmw_rect *clips, 1040 struct drm_vmw_rect *clips,
1228 uint32_t num_clips) 1041 uint32_t num_clips)
1229{ 1042{
1230 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 1043 int ret;
1231 struct drm_clip_rect *tmp;
1232 struct drm_crtc *crtc;
1233 size_t fifo_size;
1234 int i, k, num_units;
1235 int ret = 0; /* silence warning */
1236 int left, right, top, bottom;
1237
1238 struct {
1239 SVGA3dCmdHeader header;
1240 SVGA3dCmdBlitSurfaceToScreen body;
1241 } *cmd;
1242 SVGASignedRect *blits;
1243
1244 num_units = 0;
1245 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
1246 if (crtc->primary->fb != &vfb->base)
1247 continue;
1248 units[num_units++] = vmw_crtc_to_du(crtc);
1249 }
1250
1251 BUG_ON(surface == NULL);
1252 BUG_ON(!clips || !num_clips);
1253
1254 tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
1255 if (unlikely(tmp == NULL)) {
1256 DRM_ERROR("Temporary cliprect memory alloc failed.\n");
1257 return -ENOMEM;
1258 }
1259
1260 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
1261 cmd = kmalloc(fifo_size, GFP_KERNEL);
1262 if (unlikely(cmd == NULL)) {
1263 DRM_ERROR("Failed to allocate temporary fifo memory.\n");
1264 ret = -ENOMEM;
1265 goto out_free_tmp;
1266 }
1267
1268 left = clips->x;
1269 right = clips->x + clips->w;
1270 top = clips->y;
1271 bottom = clips->y + clips->h;
1272
1273 for (i = 1; i < num_clips; i++) {
1274 left = min_t(int, left, (int)clips[i].x);
1275 right = max_t(int, right, (int)clips[i].x + clips[i].w);
1276 top = min_t(int, top, (int)clips[i].y);
1277 bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
1278 }
1279
1280 /* only need to do this once */
1281 memset(cmd, 0, fifo_size);
1282 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
1283
1284 blits = (SVGASignedRect *)&cmd[1];
1285
1286 cmd->body.srcRect.left = left;
1287 cmd->body.srcRect.right = right;
1288 cmd->body.srcRect.top = top;
1289 cmd->body.srcRect.bottom = bottom;
1290
1291 for (i = 0; i < num_clips; i++) {
1292 tmp[i].x1 = clips[i].x - left;
1293 tmp[i].x2 = clips[i].x + clips[i].w - left;
1294 tmp[i].y1 = clips[i].y - top;
1295 tmp[i].y2 = clips[i].y + clips[i].h - top;
1296 }
1297
1298 for (k = 0; k < num_units; k++) {
1299 struct vmw_display_unit *unit = units[k];
1300 struct vmw_clip_rect clip;
1301 int num;
1302
1303 clip.x1 = left + destX - unit->crtc.x;
1304 clip.y1 = top + destY - unit->crtc.y;
1305 clip.x2 = right + destX - unit->crtc.x;
1306 clip.y2 = bottom + destY - unit->crtc.y;
1307
1308 /* skip any crtcs that misses the clip region */
1309 if (clip.x1 >= unit->crtc.mode.hdisplay ||
1310 clip.y1 >= unit->crtc.mode.vdisplay ||
1311 clip.x2 <= 0 || clip.y2 <= 0)
1312 continue;
1313
1314 /*
1315 * In order for the clip rects to be correctly scaled
1316 * the src and dest rects needs to be the same size.
1317 */
1318 cmd->body.destRect.left = clip.x1;
1319 cmd->body.destRect.right = clip.x2;
1320 cmd->body.destRect.top = clip.y1;
1321 cmd->body.destRect.bottom = clip.y2;
1322
1323 /* create a clip rect of the crtc in dest coords */
1324 clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
1325 clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
1326 clip.x1 = 0 - clip.x1;
1327 clip.y1 = 0 - clip.y1;
1328
1329 /* need to reset sid as it is changed by execbuf */
1330 cmd->body.srcImage.sid = sid;
1331 cmd->body.destScreenId = unit->unit;
1332
1333 /* clip and write blits to cmd stream */
1334 vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
1335
1336 /* if no cliprects hit skip this */
1337 if (num == 0)
1338 continue;
1339
1340 /* recalculate package length */
1341 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
1342 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
1343 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
1344 fifo_size, 0, NULL, NULL);
1345
1346 if (unlikely(ret != 0))
1347 break;
1348 }
1349
1350 kfree(cmd);
1351out_free_tmp:
1352 kfree(tmp);
1353
1354 return ret;
1355}
1356
1357int vmw_kms_readback(struct vmw_private *dev_priv,
1358 struct drm_file *file_priv,
1359 struct vmw_framebuffer *vfb,
1360 struct drm_vmw_fence_rep __user *user_fence_rep,
1361 struct drm_vmw_rect *clips,
1362 uint32_t num_clips)
1363{
1364 struct vmw_framebuffer_dmabuf *vfbd =
1365 vmw_framebuffer_to_vfbd(&vfb->base);
1366 struct vmw_dma_buffer *dmabuf = vfbd->buffer;
1367 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
1368 struct drm_crtc *crtc;
1369 size_t fifo_size;
1370 int i, k, ret, num_units, blits_pos;
1371
1372 struct {
1373 uint32_t header;
1374 SVGAFifoCmdDefineGMRFB body;
1375 } *cmd;
1376 struct {
1377 uint32_t header;
1378 SVGAFifoCmdBlitScreenToGMRFB body;
1379 } *blits;
1380
1381 num_units = 0;
1382 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
1383 if (crtc->primary->fb != &vfb->base)
1384 continue;
1385 units[num_units++] = vmw_crtc_to_du(crtc);
1386 }
1387
1388 BUG_ON(dmabuf == NULL);
1389 BUG_ON(!clips || !num_clips);
1390
1391 /* take a safe guess at fifo size */
1392 fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
1393 cmd = kmalloc(fifo_size, GFP_KERNEL);
1394 if (unlikely(cmd == NULL)) {
1395 DRM_ERROR("Failed to allocate temporary fifo memory.\n");
1396 return -ENOMEM;
1397 }
1398
1399 memset(cmd, 0, fifo_size);
1400 cmd->header = SVGA_CMD_DEFINE_GMRFB;
1401 cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
1402 cmd->body.format.colorDepth = vfb->base.depth;
1403 cmd->body.format.reserved = 0;
1404 cmd->body.bytesPerLine = vfb->base.pitches[0];
1405 cmd->body.ptr.gmrId = vfb->user_handle;
1406 cmd->body.ptr.offset = 0;
1407
1408 blits = (void *)&cmd[1];
1409 blits_pos = 0;
1410 for (i = 0; i < num_units; i++) {
1411 struct drm_vmw_rect *c = clips;
1412 for (k = 0; k < num_clips; k++, c++) {
1413 /* transform clip coords to crtc origin based coords */
1414 int clip_x1 = c->x - units[i]->crtc.x;
1415 int clip_x2 = c->x - units[i]->crtc.x + c->w;
1416 int clip_y1 = c->y - units[i]->crtc.y;
1417 int clip_y2 = c->y - units[i]->crtc.y + c->h;
1418 int dest_x = c->x;
1419 int dest_y = c->y;
1420
1421 /* compensate for clipping, we negate
1422 * a negative number and add that.
1423 */
1424 if (clip_x1 < 0)
1425 dest_x += -clip_x1;
1426 if (clip_y1 < 0)
1427 dest_y += -clip_y1;
1428
1429 /* clip */
1430 clip_x1 = max(clip_x1, 0);
1431 clip_y1 = max(clip_y1, 0);
1432 clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
1433 clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
1434
1435 /* and cull any rects that misses the crtc */
1436 if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
1437 clip_y1 >= units[i]->crtc.mode.vdisplay ||
1438 clip_x2 <= 0 || clip_y2 <= 0)
1439 continue;
1440
1441 blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
1442 blits[blits_pos].body.srcScreenId = units[i]->unit;
1443 blits[blits_pos].body.destOrigin.x = dest_x;
1444 blits[blits_pos].body.destOrigin.y = dest_y;
1445 1044
1446 blits[blits_pos].body.srcRect.left = clip_x1; 1045 switch (dev_priv->active_display_unit) {
1447 blits[blits_pos].body.srcRect.top = clip_y1; 1046 case vmw_du_screen_target:
1448 blits[blits_pos].body.srcRect.right = clip_x2; 1047 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1449 blits[blits_pos].body.srcRect.bottom = clip_y2; 1048 &surface->res, destX, destY,
1450 blits_pos++; 1049 num_clips, 1, NULL);
1451 } 1050 break;
1051 case vmw_du_screen_object:
1052 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1053 sid, destX, destY, clips,
1054 num_clips);
1055 break;
1056 default:
1057 WARN_ONCE(true,
1058 "Present called with invalid display system.\n");
1059 ret = -ENOSYS;
1060 break;
1452 } 1061 }
1453 /* reset size here and use calculated exact size from loops */ 1062 if (ret)
1454 fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos; 1063 return ret;
1455
1456 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
1457 0, user_fence_rep, NULL);
1458 1064
1459 kfree(cmd); 1065 vmw_fifo_flush(dev_priv, false);
1460 1066
1461 return ret; 1067 return 0;
1462} 1068}
1463 1069
1464int vmw_kms_init(struct vmw_private *dev_priv) 1070int vmw_kms_init(struct vmw_private *dev_priv)
@@ -1470,30 +1076,37 @@ int vmw_kms_init(struct vmw_private *dev_priv)
1470 dev->mode_config.funcs = &vmw_kms_funcs; 1076 dev->mode_config.funcs = &vmw_kms_funcs;
1471 dev->mode_config.min_width = 1; 1077 dev->mode_config.min_width = 1;
1472 dev->mode_config.min_height = 1; 1078 dev->mode_config.min_height = 1;
1473 /* assumed largest fb size */ 1079 dev->mode_config.max_width = dev_priv->texture_max_width;
1474 dev->mode_config.max_width = 8192; 1080 dev->mode_config.max_height = dev_priv->texture_max_height;
1475 dev->mode_config.max_height = 8192;
1476 1081
1477 ret = vmw_kms_init_screen_object_display(dev_priv); 1082 ret = vmw_kms_stdu_init_display(dev_priv);
1478 if (ret) /* Fallback */ 1083 if (ret) {
1479 (void)vmw_kms_init_legacy_display_system(dev_priv); 1084 ret = vmw_kms_sou_init_display(dev_priv);
1085 if (ret) /* Fallback */
1086 ret = vmw_kms_ldu_init_display(dev_priv);
1087 }
1480 1088
1481 return 0; 1089 return ret;
1482} 1090}
1483 1091
1484int vmw_kms_close(struct vmw_private *dev_priv) 1092int vmw_kms_close(struct vmw_private *dev_priv)
1485{ 1093{
1094 int ret;
1095
1486 /* 1096 /*
1487 * Docs says we should take the lock before calling this function 1097 * Docs says we should take the lock before calling this function
1488 * but since it destroys encoders and our destructor calls 1098 * but since it destroys encoders and our destructor calls
1489 * drm_encoder_cleanup which takes the lock we deadlock. 1099 * drm_encoder_cleanup which takes the lock we deadlock.
1490 */ 1100 */
1491 drm_mode_config_cleanup(dev_priv->dev); 1101 drm_mode_config_cleanup(dev_priv->dev);
1492 if (dev_priv->sou_priv) 1102 if (dev_priv->active_display_unit == vmw_du_screen_object)
1493 vmw_kms_close_screen_object_display(dev_priv); 1103 ret = vmw_kms_sou_close_display(dev_priv);
1104 else if (dev_priv->active_display_unit == vmw_du_screen_target)
1105 ret = vmw_kms_stdu_close_display(dev_priv);
1494 else 1106 else
1495 vmw_kms_close_legacy_display_system(dev_priv); 1107 ret = vmw_kms_ldu_close_display(dev_priv);
1496 return 0; 1108
1109 return ret;
1497} 1110}
1498 1111
1499int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, 1112int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
@@ -1569,7 +1182,7 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
1569 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); 1182 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
1570 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1183 else if (vmw_fifo_have_pitchlock(vmw_priv))
1571 vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt + 1184 vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
1572 SVGA_FIFO_PITCHLOCK); 1185 SVGA_FIFO_PITCHLOCK);
1573 1186
1574 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) 1187 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1575 return 0; 1188 return 0;
@@ -1641,7 +1254,9 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1641 uint32_t pitch, 1254 uint32_t pitch,
1642 uint32_t height) 1255 uint32_t height)
1643{ 1256{
1644 return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem; 1257 return ((u64) pitch * (u64) height) < (u64)
1258 ((dev_priv->active_display_unit == vmw_du_screen_target) ?
1259 dev_priv->prim_bb_mem : dev_priv->vram_size);
1645} 1260}
1646 1261
1647 1262
@@ -1715,75 +1330,6 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1715 return 0; 1330 return 0;
1716} 1331}
1717 1332
1718int vmw_du_page_flip(struct drm_crtc *crtc,
1719 struct drm_framebuffer *fb,
1720 struct drm_pending_vblank_event *event,
1721 uint32_t page_flip_flags)
1722{
1723 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1724 struct drm_framebuffer *old_fb = crtc->primary->fb;
1725 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
1726 struct drm_file *file_priv ;
1727 struct vmw_fence_obj *fence = NULL;
1728 struct drm_clip_rect clips;
1729 int ret;
1730
1731 if (event == NULL)
1732 return -EINVAL;
1733
1734 /* require ScreenObject support for page flipping */
1735 if (!dev_priv->sou_priv)
1736 return -ENOSYS;
1737
1738 file_priv = event->base.file_priv;
1739 if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
1740 return -EINVAL;
1741
1742 crtc->primary->fb = fb;
1743
1744 /* do a full screen dirty update */
1745 clips.x1 = clips.y1 = 0;
1746 clips.x2 = fb->width;
1747 clips.y2 = fb->height;
1748
1749 if (vfb->dmabuf)
1750 ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
1751 0, 0, &clips, 1, 1, &fence);
1752 else
1753 ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
1754 0, 0, &clips, 1, 1, &fence);
1755
1756
1757 if (ret != 0)
1758 goto out_no_fence;
1759 if (!fence) {
1760 ret = -EINVAL;
1761 goto out_no_fence;
1762 }
1763
1764 ret = vmw_event_fence_action_queue(file_priv, fence,
1765 &event->base,
1766 &event->event.tv_sec,
1767 &event->event.tv_usec,
1768 true);
1769
1770 /*
1771 * No need to hold on to this now. The only cleanup
1772 * we need to do if we fail is unref the fence.
1773 */
1774 vmw_fence_obj_unreference(&fence);
1775
1776 if (vmw_crtc_to_du(crtc)->is_implicit)
1777 vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
1778
1779 return ret;
1780
1781out_no_fence:
1782 crtc->primary->fb = old_fb;
1783 return ret;
1784}
1785
1786
1787void vmw_du_crtc_save(struct drm_crtc *crtc) 1333void vmw_du_crtc_save(struct drm_crtc *crtc)
1788{ 1334{
1789} 1335}
@@ -1920,7 +1466,7 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = {
1920 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay 1466 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
1921 * members filled in. 1467 * members filled in.
1922 */ 1468 */
1923static void vmw_guess_mode_timing(struct drm_display_mode *mode) 1469void vmw_guess_mode_timing(struct drm_display_mode *mode)
1924{ 1470{
1925 mode->hsync_start = mode->hdisplay + 50; 1471 mode->hsync_start = mode->hdisplay + 50;
1926 mode->hsync_end = mode->hsync_start + 50; 1472 mode->hsync_end = mode->hsync_start + 50;
@@ -1955,36 +1501,39 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
1955 * If using screen objects, then assume 32-bpp because that's what the 1501 * If using screen objects, then assume 32-bpp because that's what the
1956 * SVGA device is assuming 1502 * SVGA device is assuming
1957 */ 1503 */
1958 if (dev_priv->sou_priv) 1504 if (dev_priv->active_display_unit == vmw_du_screen_object)
1959 assumed_bpp = 4; 1505 assumed_bpp = 4;
1960 1506
1507 if (dev_priv->active_display_unit == vmw_du_screen_target) {
1508 max_width = min(max_width, dev_priv->stdu_max_width);
1509 max_height = min(max_height, dev_priv->stdu_max_height);
1510 }
1511
1961 /* Add preferred mode */ 1512 /* Add preferred mode */
1962 { 1513 mode = drm_mode_duplicate(dev, &prefmode);
1963 mode = drm_mode_duplicate(dev, &prefmode); 1514 if (!mode)
1964 if (!mode) 1515 return 0;
1965 return 0; 1516 mode->hdisplay = du->pref_width;
1966 mode->hdisplay = du->pref_width; 1517 mode->vdisplay = du->pref_height;
1967 mode->vdisplay = du->pref_height; 1518 vmw_guess_mode_timing(mode);
1968 vmw_guess_mode_timing(mode);
1969
1970 if (vmw_kms_validate_mode_vram(dev_priv,
1971 mode->hdisplay * assumed_bpp,
1972 mode->vdisplay)) {
1973 drm_mode_probed_add(connector, mode);
1974 } else {
1975 drm_mode_destroy(dev, mode);
1976 mode = NULL;
1977 }
1978 1519
1979 if (du->pref_mode) { 1520 if (vmw_kms_validate_mode_vram(dev_priv,
1980 list_del_init(&du->pref_mode->head); 1521 mode->hdisplay * assumed_bpp,
1981 drm_mode_destroy(dev, du->pref_mode); 1522 mode->vdisplay)) {
1982 } 1523 drm_mode_probed_add(connector, mode);
1524 } else {
1525 drm_mode_destroy(dev, mode);
1526 mode = NULL;
1527 }
1983 1528
1984 /* mode might be null here, this is intended */ 1529 if (du->pref_mode) {
1985 du->pref_mode = mode; 1530 list_del_init(&du->pref_mode->head);
1531 drm_mode_destroy(dev, du->pref_mode);
1986 } 1532 }
1987 1533
1534 /* mode might be null here, this is intended */
1535 du->pref_mode = mode;
1536
1988 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { 1537 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
1989 bmode = &vmw_kms_connector_builtin[i]; 1538 bmode = &vmw_kms_connector_builtin[i];
1990 if (bmode->hdisplay > max_width || 1539 if (bmode->hdisplay > max_width ||
@@ -2004,11 +1553,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
2004 drm_mode_probed_add(connector, mode); 1553 drm_mode_probed_add(connector, mode);
2005 } 1554 }
2006 1555
2007 /* Move the prefered mode first, help apps pick the right mode. */
2008 if (du->pref_mode)
2009 list_move(&du->pref_mode->head, &connector->probed_modes);
2010
2011 drm_mode_connector_list_update(connector, true); 1556 drm_mode_connector_list_update(connector, true);
1557 /* Move the prefered mode first, help apps pick the right mode. */
1558 drm_mode_sort(&connector->modes);
2012 1559
2013 return 1; 1560 return 1;
2014} 1561}
@@ -2032,7 +1579,9 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2032 unsigned rects_size; 1579 unsigned rects_size;
2033 int ret; 1580 int ret;
2034 int i; 1581 int i;
1582 u64 total_pixels = 0;
2035 struct drm_mode_config *mode_config = &dev->mode_config; 1583 struct drm_mode_config *mode_config = &dev->mode_config;
1584 struct drm_vmw_rect bounding_box = {0};
2036 1585
2037 if (!arg->num_outputs) { 1586 if (!arg->num_outputs) {
2038 struct drm_vmw_rect def_rect = {0, 0, 800, 600}; 1587 struct drm_vmw_rect def_rect = {0, 0, 800, 600};
@@ -2063,6 +1612,40 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2063 ret = -EINVAL; 1612 ret = -EINVAL;
2064 goto out_free; 1613 goto out_free;
2065 } 1614 }
1615
1616 /*
1617 * bounding_box.w and bunding_box.h are used as
1618 * lower-right coordinates
1619 */
1620 if (rects[i].x + rects[i].w > bounding_box.w)
1621 bounding_box.w = rects[i].x + rects[i].w;
1622
1623 if (rects[i].y + rects[i].h > bounding_box.h)
1624 bounding_box.h = rects[i].y + rects[i].h;
1625
1626 total_pixels += (u64) rects[i].w * (u64) rects[i].h;
1627 }
1628
1629 if (dev_priv->active_display_unit == vmw_du_screen_target) {
1630 /*
1631 * For Screen Targets, the limits for a toplogy are:
1632 * 1. Bounding box (assuming 32bpp) must be < prim_bb_mem
1633 * 2. Total pixels (assuming 32bpp) must be < prim_bb_mem
1634 */
1635 u64 bb_mem = bounding_box.w * bounding_box.h * 4;
1636 u64 pixel_mem = total_pixels * 4;
1637
1638 if (bb_mem > dev_priv->prim_bb_mem) {
1639 DRM_ERROR("Topology is beyond supported limits.\n");
1640 ret = -EINVAL;
1641 goto out_free;
1642 }
1643
1644 if (pixel_mem > dev_priv->prim_bb_mem) {
1645 DRM_ERROR("Combined output size too large\n");
1646 ret = -EINVAL;
1647 goto out_free;
1648 }
2066 } 1649 }
2067 1650
2068 vmw_du_update_layout(dev_priv, arg->num_outputs, rects); 1651 vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
@@ -2071,3 +1654,419 @@ out_free:
2071 kfree(rects); 1654 kfree(rects);
2072 return ret; 1655 return ret;
2073} 1656}
1657
1658/**
1659 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
1660 * on a set of cliprects and a set of display units.
1661 *
1662 * @dev_priv: Pointer to a device private structure.
1663 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
1664 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
1665 * Cliprects are given in framebuffer coordinates.
1666 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
1667 * be NULL. Cliprects are given in source coordinates.
1668 * @dest_x: X coordinate offset for the crtc / destination clip rects.
1669 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
1670 * @num_clips: Number of cliprects in the @clips or @vclips array.
1671 * @increment: Integer with which to increment the clip counter when looping.
1672 * Used to skip a predetermined number of clip rects.
1673 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
1674 */
1675int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
1676 struct vmw_framebuffer *framebuffer,
1677 const struct drm_clip_rect *clips,
1678 const struct drm_vmw_rect *vclips,
1679 s32 dest_x, s32 dest_y,
1680 int num_clips,
1681 int increment,
1682 struct vmw_kms_dirty *dirty)
1683{
1684 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
1685 struct drm_crtc *crtc;
1686 u32 num_units = 0;
1687 u32 i, k;
1688 int ret;
1689
1690 dirty->dev_priv = dev_priv;
1691
1692 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
1693 if (crtc->primary->fb != &framebuffer->base)
1694 continue;
1695 units[num_units++] = vmw_crtc_to_du(crtc);
1696 }
1697
1698 for (k = 0; k < num_units; k++) {
1699 struct vmw_display_unit *unit = units[k];
1700 s32 crtc_x = unit->crtc.x;
1701 s32 crtc_y = unit->crtc.y;
1702 s32 crtc_width = unit->crtc.mode.hdisplay;
1703 s32 crtc_height = unit->crtc.mode.vdisplay;
1704 const struct drm_clip_rect *clips_ptr = clips;
1705 const struct drm_vmw_rect *vclips_ptr = vclips;
1706
1707 dirty->unit = unit;
1708 if (dirty->fifo_reserve_size > 0) {
1709 dirty->cmd = vmw_fifo_reserve(dev_priv,
1710 dirty->fifo_reserve_size);
1711 if (!dirty->cmd) {
1712 DRM_ERROR("Couldn't reserve fifo space "
1713 "for dirty blits.\n");
1714 return ret;
1715 }
1716 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
1717 }
1718 dirty->num_hits = 0;
1719 for (i = 0; i < num_clips; i++, clips_ptr += increment,
1720 vclips_ptr += increment) {
1721 s32 clip_left;
1722 s32 clip_top;
1723
1724 /*
1725 * Select clip array type. Note that integer type
1726 * in @clips is unsigned short, whereas in @vclips
1727 * it's 32-bit.
1728 */
1729 if (clips) {
1730 dirty->fb_x = (s32) clips_ptr->x1;
1731 dirty->fb_y = (s32) clips_ptr->y1;
1732 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
1733 crtc_x;
1734 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
1735 crtc_y;
1736 } else {
1737 dirty->fb_x = vclips_ptr->x;
1738 dirty->fb_y = vclips_ptr->y;
1739 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
1740 dest_x - crtc_x;
1741 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
1742 dest_y - crtc_y;
1743 }
1744
1745 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
1746 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
1747
1748 /* Skip this clip if it's outside the crtc region */
1749 if (dirty->unit_x1 >= crtc_width ||
1750 dirty->unit_y1 >= crtc_height ||
1751 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
1752 continue;
1753
1754 /* Clip right and bottom to crtc limits */
1755 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
1756 crtc_width);
1757 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
1758 crtc_height);
1759
1760 /* Clip left and top to crtc limits */
1761 clip_left = min_t(s32, dirty->unit_x1, 0);
1762 clip_top = min_t(s32, dirty->unit_y1, 0);
1763 dirty->unit_x1 -= clip_left;
1764 dirty->unit_y1 -= clip_top;
1765 dirty->fb_x -= clip_left;
1766 dirty->fb_y -= clip_top;
1767
1768 dirty->clip(dirty);
1769 }
1770
1771 dirty->fifo_commit(dirty);
1772 }
1773
1774 return 0;
1775}
1776
1777/**
1778 * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
1779 * command submission.
1780 *
1781 * @dev_priv. Pointer to a device private structure.
1782 * @buf: The buffer object
1783 * @interruptible: Whether to perform waits as interruptible.
1784 * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
1785 * The buffer will be validated as a GMR. Already pinned buffers will not be
1786 * validated.
1787 *
1788 * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
1789 * interrupted by a signal.
1790 */
1791int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
1792 struct vmw_dma_buffer *buf,
1793 bool interruptible,
1794 bool validate_as_mob)
1795{
1796 struct ttm_buffer_object *bo = &buf->base;
1797 int ret;
1798
1799 ttm_bo_reserve(bo, false, false, interruptible, NULL);
1800 ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
1801 validate_as_mob);
1802 if (ret)
1803 ttm_bo_unreserve(bo);
1804
1805 return ret;
1806}
1807
1808/**
1809 * vmw_kms_helper_buffer_revert - Undo the actions of
1810 * vmw_kms_helper_buffer_prepare.
1811 *
1812 * @res: Pointer to the buffer object.
1813 *
1814 * Helper to be used if an error forces the caller to undo the actions of
1815 * vmw_kms_helper_buffer_prepare.
1816 */
1817void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
1818{
1819 if (buf)
1820 ttm_bo_unreserve(&buf->base);
1821}
1822
1823/**
1824 * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
1825 * kms command submission.
1826 *
1827 * @dev_priv: Pointer to a device private structure.
1828 * @file_priv: Pointer to a struct drm_file representing the caller's
1829 * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
1830 * if non-NULL, @user_fence_rep must be non-NULL.
1831 * @buf: The buffer object.
1832 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
1833 * ref-counted fence pointer is returned here.
1834 * @user_fence_rep: Optional pointer to a user-space provided struct
1835 * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
1836 * function copies fence data to user-space in a fail-safe manner.
1837 */
1838void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
1839 struct drm_file *file_priv,
1840 struct vmw_dma_buffer *buf,
1841 struct vmw_fence_obj **out_fence,
1842 struct drm_vmw_fence_rep __user *
1843 user_fence_rep)
1844{
1845 struct vmw_fence_obj *fence;
1846 uint32_t handle;
1847 int ret;
1848
1849 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
1850 file_priv ? &handle : NULL);
1851 if (buf)
1852 vmw_fence_single_bo(&buf->base, fence);
1853 if (file_priv)
1854 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
1855 ret, user_fence_rep, fence,
1856 handle);
1857 if (out_fence)
1858 *out_fence = fence;
1859 else
1860 vmw_fence_obj_unreference(&fence);
1861
1862 vmw_kms_helper_buffer_revert(buf);
1863}
1864
1865
1866/**
1867 * vmw_kms_helper_resource_revert - Undo the actions of
1868 * vmw_kms_helper_resource_prepare.
1869 *
1870 * @res: Pointer to the resource. Typically a surface.
1871 *
1872 * Helper to be used if an error forces the caller to undo the actions of
1873 * vmw_kms_helper_resource_prepare.
1874 */
1875void vmw_kms_helper_resource_revert(struct vmw_resource *res)
1876{
1877 vmw_kms_helper_buffer_revert(res->backup);
1878 vmw_resource_unreserve(res, false, NULL, 0);
1879 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1880}
1881
1882/**
1883 * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
1884 * command submission.
1885 *
1886 * @res: Pointer to the resource. Typically a surface.
1887 * @interruptible: Whether to perform waits as interruptible.
1888 *
1889 * Reserves and validates also the backup buffer if a guest-backed resource.
1890 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1891 * interrupted by a signal.
1892 */
1893int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
1894 bool interruptible)
1895{
1896 int ret = 0;
1897
1898 if (interruptible)
1899 ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
1900 else
1901 mutex_lock(&res->dev_priv->cmdbuf_mutex);
1902
1903 if (unlikely(ret != 0))
1904 return -ERESTARTSYS;
1905
1906 ret = vmw_resource_reserve(res, interruptible, false);
1907 if (ret)
1908 goto out_unlock;
1909
1910 if (res->backup) {
1911 ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
1912 interruptible,
1913 res->dev_priv->has_mob);
1914 if (ret)
1915 goto out_unreserve;
1916 }
1917 ret = vmw_resource_validate(res);
1918 if (ret)
1919 goto out_revert;
1920 return 0;
1921
1922out_revert:
1923 vmw_kms_helper_buffer_revert(res->backup);
1924out_unreserve:
1925 vmw_resource_unreserve(res, false, NULL, 0);
1926out_unlock:
1927 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1928 return ret;
1929}
1930
1931/**
1932 * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
1933 * kms command submission.
1934 *
1935 * @res: Pointer to the resource. Typically a surface.
1936 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
1937 * ref-counted fence pointer is returned here.
1938 */
1939void vmw_kms_helper_resource_finish(struct vmw_resource *res,
1940 struct vmw_fence_obj **out_fence)
1941{
1942 if (res->backup || out_fence)
1943 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
1944 out_fence, NULL);
1945
1946 vmw_resource_unreserve(res, false, NULL, 0);
1947 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1948}
1949
1950/**
1951 * vmw_kms_update_proxy - Helper function to update a proxy surface from
1952 * its backing MOB.
1953 *
1954 * @res: Pointer to the surface resource
1955 * @clips: Clip rects in framebuffer (surface) space.
1956 * @num_clips: Number of clips in @clips.
1957 * @increment: Integer with which to increment the clip counter when looping.
1958 * Used to skip a predetermined number of clip rects.
1959 *
1960 * This function makes sure the proxy surface is updated from its backing MOB
1961 * using the region given by @clips. The surface resource @res and its backing
1962 * MOB needs to be reserved and validated on call.
1963 */
1964int vmw_kms_update_proxy(struct vmw_resource *res,
1965 const struct drm_clip_rect *clips,
1966 unsigned num_clips,
1967 int increment)
1968{
1969 struct vmw_private *dev_priv = res->dev_priv;
1970 struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
1971 struct {
1972 SVGA3dCmdHeader header;
1973 SVGA3dCmdUpdateGBImage body;
1974 } *cmd;
1975 SVGA3dBox *box;
1976 size_t copy_size = 0;
1977 int i;
1978
1979 if (!clips)
1980 return 0;
1981
1982 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
1983 if (!cmd) {
1984 DRM_ERROR("Couldn't reserve fifo space for proxy surface "
1985 "update.\n");
1986 return -ENOMEM;
1987 }
1988
1989 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
1990 box = &cmd->body.box;
1991
1992 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
1993 cmd->header.size = sizeof(cmd->body);
1994 cmd->body.image.sid = res->id;
1995 cmd->body.image.face = 0;
1996 cmd->body.image.mipmap = 0;
1997
1998 if (clips->x1 > size->width || clips->x2 > size->width ||
1999 clips->y1 > size->height || clips->y2 > size->height) {
2000 DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2001 return -EINVAL;
2002 }
2003
2004 box->x = clips->x1;
2005 box->y = clips->y1;
2006 box->z = 0;
2007 box->w = clips->x2 - clips->x1;
2008 box->h = clips->y2 - clips->y1;
2009 box->d = 1;
2010
2011 copy_size += sizeof(*cmd);
2012 }
2013
2014 vmw_fifo_commit(dev_priv, copy_size);
2015
2016 return 0;
2017}
2018
2019int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2020 unsigned unit,
2021 u32 max_width,
2022 u32 max_height,
2023 struct drm_connector **p_con,
2024 struct drm_crtc **p_crtc,
2025 struct drm_display_mode **p_mode)
2026{
2027 struct drm_connector *con;
2028 struct vmw_display_unit *du;
2029 struct drm_display_mode *mode;
2030 int i = 0;
2031
2032 list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
2033 head) {
2034 if (i == unit)
2035 break;
2036
2037 ++i;
2038 }
2039
2040 if (i != unit) {
2041 DRM_ERROR("Could not find initial display unit.\n");
2042 return -EINVAL;
2043 }
2044
2045 if (list_empty(&con->modes))
2046 (void) vmw_du_connector_fill_modes(con, max_width, max_height);
2047
2048 if (list_empty(&con->modes)) {
2049 DRM_ERROR("Could not find initial display mode.\n");
2050 return -EINVAL;
2051 }
2052
2053 du = vmw_connector_to_du(con);
2054 *p_con = con;
2055 *p_crtc = &du->crtc;
2056
2057 list_for_each_entry(mode, &con->modes, head) {
2058 if (mode->type & DRM_MODE_TYPE_PREFERRED)
2059 break;
2060 }
2061
2062 if (mode->type & DRM_MODE_TYPE_PREFERRED)
2063 *p_mode = mode;
2064 else {
2065 WARN_ONCE(true, "Could not find initial preferred mode.\n");
2066 *p_mode = list_first_entry(&con->modes,
2067 struct drm_display_mode,
2068 head);
2069 }
2070
2071 return 0;
2072}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index f1a324cfb4c3..782df7ca9794 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,11 +32,60 @@
32#include <drm/drm_crtc_helper.h> 32#include <drm/drm_crtc_helper.h>
33#include "vmwgfx_drv.h" 33#include "vmwgfx_drv.h"
34 34
35/**
36 * struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
37 * function.
38 *
39 * @fifo_commit: Callback that is called once for each display unit after
40 * all clip rects. This function must commit the fifo space reserved by the
41 * helper. Set up by the caller.
42 * @clip: Callback that is called for each cliprect on each display unit.
43 * Set up by the caller.
44 * @fifo_reserve_size: Fifo size that the helper should try to allocat for
45 * each display unit. Set up by the caller.
46 * @dev_priv: Pointer to the device private. Set up by the helper.
47 * @unit: The current display unit. Set up by the helper before a call to @clip.
48 * @cmd: The allocated fifo space. Set up by the helper before the first @clip
49 * call.
50 * @num_hits: Number of clip rect commands for this display unit.
51 * Cleared by the helper before the first @clip call. Updated by the @clip
52 * callback.
53 * @fb_x: Clip rect left side in framebuffer coordinates.
54 * @fb_y: Clip rect right side in framebuffer coordinates.
55 * @unit_x1: Clip rect left side in crtc coordinates.
56 * @unit_y1: Clip rect top side in crtc coordinates.
57 * @unit_x2: Clip rect right side in crtc coordinates.
58 * @unit_y2: Clip rect bottom side in crtc coordinates.
59 *
60 * The clip rect coordinates are updated by the helper for each @clip call.
61 * Note that this may be derived from if more info needs to be passed between
62 * helper caller and helper callbacks.
63 */
64struct vmw_kms_dirty {
65 void (*fifo_commit)(struct vmw_kms_dirty *);
66 void (*clip)(struct vmw_kms_dirty *);
67 size_t fifo_reserve_size;
68 struct vmw_private *dev_priv;
69 struct vmw_display_unit *unit;
70 void *cmd;
71 u32 num_hits;
72 s32 fb_x;
73 s32 fb_y;
74 s32 unit_x1;
75 s32 unit_y1;
76 s32 unit_x2;
77 s32 unit_y2;
78};
79
35#define VMWGFX_NUM_DISPLAY_UNITS 8 80#define VMWGFX_NUM_DISPLAY_UNITS 8
36 81
37 82
38#define vmw_framebuffer_to_vfb(x) \ 83#define vmw_framebuffer_to_vfb(x) \
39 container_of(x, struct vmw_framebuffer, base) 84 container_of(x, struct vmw_framebuffer, base)
85#define vmw_framebuffer_to_vfbs(x) \
86 container_of(x, struct vmw_framebuffer_surface, base.base)
87#define vmw_framebuffer_to_vfbd(x) \
88 container_of(x, struct vmw_framebuffer_dmabuf, base.base)
40 89
41/** 90/**
42 * Base class for framebuffers 91 * Base class for framebuffers
@@ -53,9 +102,27 @@ struct vmw_framebuffer {
53 uint32_t user_handle; 102 uint32_t user_handle;
54}; 103};
55 104
105/*
106 * Clip rectangle
107 */
108struct vmw_clip_rect {
109 int x1, x2, y1, y2;
110};
111
112struct vmw_framebuffer_surface {
113 struct vmw_framebuffer base;
114 struct vmw_surface *surface;
115 struct vmw_dma_buffer *buffer;
116 struct list_head head;
117 bool is_dmabuf_proxy; /* true if this is proxy surface for DMA buf */
118};
119
120
121struct vmw_framebuffer_dmabuf {
122 struct vmw_framebuffer base;
123 struct vmw_dma_buffer *buffer;
124};
56 125
57#define vmw_crtc_to_du(x) \
58 container_of(x, struct vmw_display_unit, crtc)
59 126
60/* 127/*
61 * Basic cursor manipulation 128 * Basic cursor manipulation
@@ -120,11 +187,7 @@ struct vmw_display_unit {
120/* 187/*
121 * Shared display unit functions - vmwgfx_kms.c 188 * Shared display unit functions - vmwgfx_kms.c
122 */ 189 */
123void vmw_display_unit_cleanup(struct vmw_display_unit *du); 190void vmw_du_cleanup(struct vmw_display_unit *du);
124int vmw_du_page_flip(struct drm_crtc *crtc,
125 struct drm_framebuffer *fb,
126 struct drm_pending_vblank_event *event,
127 uint32_t page_flip_flags);
128void vmw_du_crtc_save(struct drm_crtc *crtc); 191void vmw_du_crtc_save(struct drm_crtc *crtc);
129void vmw_du_crtc_restore(struct drm_crtc *crtc); 192void vmw_du_crtc_restore(struct drm_crtc *crtc);
130void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 193void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
@@ -143,25 +206,118 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
143int vmw_du_connector_set_property(struct drm_connector *connector, 206int vmw_du_connector_set_property(struct drm_connector *connector,
144 struct drm_property *property, 207 struct drm_property *property,
145 uint64_t val); 208 uint64_t val);
209int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
210 struct vmw_framebuffer *framebuffer,
211 const struct drm_clip_rect *clips,
212 const struct drm_vmw_rect *vclips,
213 s32 dest_x, s32 dest_y,
214 int num_clips,
215 int increment,
216 struct vmw_kms_dirty *dirty);
146 217
218int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
219 struct vmw_dma_buffer *buf,
220 bool interruptible,
221 bool validate_as_mob);
222void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
223void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
224 struct drm_file *file_priv,
225 struct vmw_dma_buffer *buf,
226 struct vmw_fence_obj **out_fence,
227 struct drm_vmw_fence_rep __user *
228 user_fence_rep);
229int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
230 bool interruptible);
231void vmw_kms_helper_resource_revert(struct vmw_resource *res);
232void vmw_kms_helper_resource_finish(struct vmw_resource *res,
233 struct vmw_fence_obj **out_fence);
234int vmw_kms_readback(struct vmw_private *dev_priv,
235 struct drm_file *file_priv,
236 struct vmw_framebuffer *vfb,
237 struct drm_vmw_fence_rep __user *user_fence_rep,
238 struct drm_vmw_rect *vclips,
239 uint32_t num_clips);
240struct vmw_framebuffer *
241vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
242 struct vmw_dma_buffer *dmabuf,
243 struct vmw_surface *surface,
244 bool only_2d,
245 const struct drm_mode_fb_cmd *mode_cmd);
246int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
247 unsigned unit,
248 u32 max_width,
249 u32 max_height,
250 struct drm_connector **p_con,
251 struct drm_crtc **p_crtc,
252 struct drm_display_mode **p_mode);
253void vmw_guess_mode_timing(struct drm_display_mode *mode);
147 254
148/* 255/*
149 * Legacy display unit functions - vmwgfx_ldu.c 256 * Legacy display unit functions - vmwgfx_ldu.c
150 */ 257 */
151int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); 258int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
152int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); 259int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
260int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
261 struct vmw_framebuffer *framebuffer,
262 unsigned flags, unsigned color,
263 struct drm_clip_rect *clips,
264 unsigned num_clips, int increment);
265int vmw_kms_update_proxy(struct vmw_resource *res,
266 const struct drm_clip_rect *clips,
267 unsigned num_clips,
268 int increment);
153 269
154/* 270/*
155 * Screen Objects display functions - vmwgfx_scrn.c 271 * Screen Objects display functions - vmwgfx_scrn.c
156 */ 272 */
157int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv); 273int vmw_kms_sou_init_display(struct vmw_private *dev_priv);
158int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv); 274int vmw_kms_sou_close_display(struct vmw_private *dev_priv);
159int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num, 275int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
160 struct drm_vmw_rect *rects); 276 struct vmw_framebuffer *framebuffer,
161bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv, 277 struct drm_clip_rect *clips,
162 struct drm_crtc *crtc); 278 struct drm_vmw_rect *vclips,
163void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv, 279 struct vmw_resource *srf,
164 struct drm_crtc *crtc); 280 s32 dest_x,
281 s32 dest_y,
282 unsigned num_clips, int inc,
283 struct vmw_fence_obj **out_fence);
284int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
285 struct vmw_framebuffer *framebuffer,
286 struct drm_clip_rect *clips,
287 unsigned num_clips, int increment,
288 bool interruptible,
289 struct vmw_fence_obj **out_fence);
290int vmw_kms_sou_readback(struct vmw_private *dev_priv,
291 struct drm_file *file_priv,
292 struct vmw_framebuffer *vfb,
293 struct drm_vmw_fence_rep __user *user_fence_rep,
294 struct drm_vmw_rect *vclips,
295 uint32_t num_clips);
296
297/*
298 * Screen Target Display Unit functions - vmwgfx_stdu.c
299 */
300int vmw_kms_stdu_init_display(struct vmw_private *dev_priv);
301int vmw_kms_stdu_close_display(struct vmw_private *dev_priv);
302int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
303 struct vmw_framebuffer *framebuffer,
304 struct drm_clip_rect *clips,
305 struct drm_vmw_rect *vclips,
306 struct vmw_resource *srf,
307 s32 dest_x,
308 s32 dest_y,
309 unsigned num_clips, int inc,
310 struct vmw_fence_obj **out_fence);
311int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
312 struct drm_file *file_priv,
313 struct vmw_framebuffer *vfb,
314 struct drm_vmw_fence_rep __user *user_fence_rep,
315 struct drm_clip_rect *clips,
316 struct drm_vmw_rect *vclips,
317 uint32_t num_clips,
318 int increment,
319 bool to_surface,
320 bool interruptible);
165 321
166 322
167#endif 323#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 5c289f748ab4..bb63e4d795fa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -57,7 +57,7 @@ struct vmw_legacy_display_unit {
57static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) 57static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
58{ 58{
59 list_del_init(&ldu->active); 59 list_del_init(&ldu->active);
60 vmw_display_unit_cleanup(&ldu->base); 60 vmw_du_cleanup(&ldu->base);
61 kfree(ldu); 61 kfree(ldu);
62} 62}
63 63
@@ -279,7 +279,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
279 return -EINVAL; 279 return -EINVAL;
280 } 280 }
281 281
282 vmw_fb_off(dev_priv); 282 vmw_svga_enable(dev_priv);
283 283
284 crtc->primary->fb = fb; 284 crtc->primary->fb = fb;
285 encoder->crtc = crtc; 285 encoder->crtc = crtc;
@@ -385,7 +385,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
385 return 0; 385 return 0;
386} 386}
387 387
388int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) 388int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
389{ 389{
390 struct drm_device *dev = dev_priv->dev; 390 struct drm_device *dev = dev_priv->dev;
391 int i, ret; 391 int i, ret;
@@ -422,6 +422,10 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
422 else 422 else
423 vmw_ldu_init(dev_priv, 0); 423 vmw_ldu_init(dev_priv, 0);
424 424
425 dev_priv->active_display_unit = vmw_du_legacy;
426
427 DRM_INFO("Legacy Display Unit initialized\n");
428
425 return 0; 429 return 0;
426 430
427err_vblank_cleanup: 431err_vblank_cleanup:
@@ -432,7 +436,7 @@ err_free:
432 return ret; 436 return ret;
433} 437}
434 438
435int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) 439int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
436{ 440{
437 struct drm_device *dev = dev_priv->dev; 441 struct drm_device *dev = dev_priv->dev;
438 442
@@ -447,3 +451,38 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
447 451
448 return 0; 452 return 0;
449} 453}
454
455
456int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
457 struct vmw_framebuffer *framebuffer,
458 unsigned flags, unsigned color,
459 struct drm_clip_rect *clips,
460 unsigned num_clips, int increment)
461{
462 size_t fifo_size;
463 int i;
464
465 struct {
466 uint32_t header;
467 SVGAFifoCmdUpdate body;
468 } *cmd;
469
470 fifo_size = sizeof(*cmd) * num_clips;
471 cmd = vmw_fifo_reserve(dev_priv, fifo_size);
472 if (unlikely(cmd == NULL)) {
473 DRM_ERROR("Fifo reserve failed.\n");
474 return -ENOMEM;
475 }
476
477 memset(cmd, 0, fifo_size);
478 for (i = 0; i < num_clips; i++, clips += increment) {
479 cmd[i].header = SVGA_CMD_UPDATE;
480 cmd[i].body.x = clips->x1;
481 cmd[i].body.y = clips->y1;
482 cmd[i].body.width = clips->x2 - clips->x1;
483 cmd[i].body.height = clips->y2 - clips->y1;
484 }
485
486 vmw_fifo_commit(dev_priv, fifo_size);
487 return 0;
488}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 04a64b8cd3cd..23db16008e39 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,7 +31,7 @@
31 * If we set up the screen target otable, screen objects stop working. 31 * If we set up the screen target otable, screen objects stop working.
32 */ 32 */
33 33
34#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1) 34#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
35 35
36#ifdef CONFIG_64BIT 36#ifdef CONFIG_64BIT
37#define VMW_PPN_SIZE 8 37#define VMW_PPN_SIZE 8
@@ -67,9 +67,23 @@ struct vmw_mob {
67 * @size: Size of the table (page-aligned). 67 * @size: Size of the table (page-aligned).
68 * @page_table: Pointer to a struct vmw_mob holding the page table. 68 * @page_table: Pointer to a struct vmw_mob holding the page table.
69 */ 69 */
70struct vmw_otable { 70static const struct vmw_otable pre_dx_tables[] = {
71 unsigned long size; 71 {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
72 struct vmw_mob *page_table; 72 {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
73 {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
74 {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
75 {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
76 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
77};
78
79static const struct vmw_otable dx_tables[] = {
80 {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
81 {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
82 {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
83 {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
84 {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
85 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
86 {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
73}; 87};
74 88
75static int vmw_mob_pt_populate(struct vmw_private *dev_priv, 89static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
@@ -92,6 +106,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
92 */ 106 */
93static int vmw_setup_otable_base(struct vmw_private *dev_priv, 107static int vmw_setup_otable_base(struct vmw_private *dev_priv,
94 SVGAOTableType type, 108 SVGAOTableType type,
109 struct ttm_buffer_object *otable_bo,
95 unsigned long offset, 110 unsigned long offset,
96 struct vmw_otable *otable) 111 struct vmw_otable *otable)
97{ 112{
@@ -106,7 +121,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
106 121
107 BUG_ON(otable->page_table != NULL); 122 BUG_ON(otable->page_table != NULL);
108 123
109 vsgt = vmw_bo_sg_table(dev_priv->otable_bo); 124 vsgt = vmw_bo_sg_table(otable_bo);
110 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); 125 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
111 WARN_ON(!vmw_piter_next(&iter)); 126 WARN_ON(!vmw_piter_next(&iter));
112 127
@@ -142,7 +157,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
142 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64; 157 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
143 cmd->header.size = sizeof(cmd->body); 158 cmd->header.size = sizeof(cmd->body);
144 cmd->body.type = type; 159 cmd->body.type = type;
145 cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT); 160 cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
146 cmd->body.sizeInBytes = otable->size; 161 cmd->body.sizeInBytes = otable->size;
147 cmd->body.validSizeInBytes = 0; 162 cmd->body.validSizeInBytes = 0;
148 cmd->body.ptDepth = mob->pt_level; 163 cmd->body.ptDepth = mob->pt_level;
@@ -191,18 +206,19 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
191 if (unlikely(cmd == NULL)) { 206 if (unlikely(cmd == NULL)) {
192 DRM_ERROR("Failed reserving FIFO space for OTable " 207 DRM_ERROR("Failed reserving FIFO space for OTable "
193 "takedown.\n"); 208 "takedown.\n");
194 } else { 209 return;
195 memset(cmd, 0, sizeof(*cmd));
196 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
197 cmd->header.size = sizeof(cmd->body);
198 cmd->body.type = type;
199 cmd->body.baseAddress = 0;
200 cmd->body.sizeInBytes = 0;
201 cmd->body.validSizeInBytes = 0;
202 cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
203 vmw_fifo_commit(dev_priv, sizeof(*cmd));
204 } 210 }
205 211
212 memset(cmd, 0, sizeof(*cmd));
213 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
214 cmd->header.size = sizeof(cmd->body);
215 cmd->body.type = type;
216 cmd->body.baseAddress = 0;
217 cmd->body.sizeInBytes = 0;
218 cmd->body.validSizeInBytes = 0;
219 cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
220 vmw_fifo_commit(dev_priv, sizeof(*cmd));
221
206 if (bo) { 222 if (bo) {
207 int ret; 223 int ret;
208 224
@@ -217,47 +233,21 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
217 otable->page_table = NULL; 233 otable->page_table = NULL;
218} 234}
219 235
220/* 236
221 * vmw_otables_setup - Set up guest backed memory object tables 237static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
222 * 238 struct vmw_otable_batch *batch)
223 * @dev_priv: Pointer to a device private structure
224 *
225 * Takes care of the device guest backed surface
226 * initialization, by setting up the guest backed memory object tables.
227 * Returns 0 on success and various error codes on failure. A succesful return
228 * means the object tables can be taken down using the vmw_otables_takedown
229 * function.
230 */
231int vmw_otables_setup(struct vmw_private *dev_priv)
232{ 239{
233 unsigned long offset; 240 unsigned long offset;
234 unsigned long bo_size; 241 unsigned long bo_size;
235 struct vmw_otable *otables; 242 struct vmw_otable *otables = batch->otables;
236 SVGAOTableType i; 243 SVGAOTableType i;
237 int ret; 244 int ret;
238 245
239 otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
240 GFP_KERNEL);
241 if (unlikely(otables == NULL)) {
242 DRM_ERROR("Failed to allocate space for otable "
243 "metadata.\n");
244 return -ENOMEM;
245 }
246
247 otables[SVGA_OTABLE_MOB].size =
248 VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
249 otables[SVGA_OTABLE_SURFACE].size =
250 VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
251 otables[SVGA_OTABLE_CONTEXT].size =
252 VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
253 otables[SVGA_OTABLE_SHADER].size =
254 VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
255 otables[SVGA_OTABLE_SCREEN_TARGET].size =
256 VMWGFX_NUM_GB_SCREEN_TARGET *
257 SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
258
259 bo_size = 0; 246 bo_size = 0;
260 for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) { 247 for (i = 0; i < batch->num_otables; ++i) {
248 if (!otables[i].enabled)
249 continue;
250
261 otables[i].size = 251 otables[i].size =
262 (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; 252 (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
263 bo_size += otables[i].size; 253 bo_size += otables[i].size;
@@ -267,63 +257,105 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
267 ttm_bo_type_device, 257 ttm_bo_type_device,
268 &vmw_sys_ne_placement, 258 &vmw_sys_ne_placement,
269 0, false, NULL, 259 0, false, NULL,
270 &dev_priv->otable_bo); 260 &batch->otable_bo);
271 261
272 if (unlikely(ret != 0)) 262 if (unlikely(ret != 0))
273 goto out_no_bo; 263 goto out_no_bo;
274 264
275 ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL); 265 ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
276 BUG_ON(ret != 0); 266 BUG_ON(ret != 0);
277 ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); 267 ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
278 if (unlikely(ret != 0)) 268 if (unlikely(ret != 0))
279 goto out_unreserve; 269 goto out_unreserve;
280 ret = vmw_bo_map_dma(dev_priv->otable_bo); 270 ret = vmw_bo_map_dma(batch->otable_bo);
281 if (unlikely(ret != 0)) 271 if (unlikely(ret != 0))
282 goto out_unreserve; 272 goto out_unreserve;
283 273
284 ttm_bo_unreserve(dev_priv->otable_bo); 274 ttm_bo_unreserve(batch->otable_bo);
285 275
286 offset = 0; 276 offset = 0;
287 for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) { 277 for (i = 0; i < batch->num_otables; ++i) {
288 ret = vmw_setup_otable_base(dev_priv, i, offset, 278 if (!batch->otables[i].enabled)
279 continue;
280
281 ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
282 offset,
289 &otables[i]); 283 &otables[i]);
290 if (unlikely(ret != 0)) 284 if (unlikely(ret != 0))
291 goto out_no_setup; 285 goto out_no_setup;
292 offset += otables[i].size; 286 offset += otables[i].size;
293 } 287 }
294 288
295 dev_priv->otables = otables;
296 return 0; 289 return 0;
297 290
298out_unreserve: 291out_unreserve:
299 ttm_bo_unreserve(dev_priv->otable_bo); 292 ttm_bo_unreserve(batch->otable_bo);
300out_no_setup: 293out_no_setup:
301 for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) 294 for (i = 0; i < batch->num_otables; ++i) {
302 vmw_takedown_otable_base(dev_priv, i, &otables[i]); 295 if (batch->otables[i].enabled)
296 vmw_takedown_otable_base(dev_priv, i,
297 &batch->otables[i]);
298 }
303 299
304 ttm_bo_unref(&dev_priv->otable_bo); 300 ttm_bo_unref(&batch->otable_bo);
305out_no_bo: 301out_no_bo:
306 kfree(otables);
307 return ret; 302 return ret;
308} 303}
309 304
310
311/* 305/*
312 * vmw_otables_takedown - Take down guest backed memory object tables 306 * vmw_otables_setup - Set up guest backed memory object tables
313 * 307 *
314 * @dev_priv: Pointer to a device private structure 308 * @dev_priv: Pointer to a device private structure
315 * 309 *
316 * Take down the Guest Memory Object tables. 310 * Takes care of the device guest backed surface
311 * initialization, by setting up the guest backed memory object tables.
312 * Returns 0 on success and various error codes on failure. A successful return
313 * means the object tables can be taken down using the vmw_otables_takedown
314 * function.
317 */ 315 */
318void vmw_otables_takedown(struct vmw_private *dev_priv) 316int vmw_otables_setup(struct vmw_private *dev_priv)
317{
318 struct vmw_otable **otables = &dev_priv->otable_batch.otables;
319 int ret;
320
321 if (dev_priv->has_dx) {
322 *otables = kmalloc(sizeof(dx_tables), GFP_KERNEL);
323 if (*otables == NULL)
324 return -ENOMEM;
325
326 memcpy(*otables, dx_tables, sizeof(dx_tables));
327 dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
328 } else {
329 *otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL);
330 if (*otables == NULL)
331 return -ENOMEM;
332
333 memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables));
334 dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
335 }
336
337 ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
338 if (unlikely(ret != 0))
339 goto out_setup;
340
341 return 0;
342
343out_setup:
344 kfree(*otables);
345 return ret;
346}
347
348static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
349 struct vmw_otable_batch *batch)
319{ 350{
320 SVGAOTableType i; 351 SVGAOTableType i;
321 struct ttm_buffer_object *bo = dev_priv->otable_bo; 352 struct ttm_buffer_object *bo = batch->otable_bo;
322 int ret; 353 int ret;
323 354
324 for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) 355 for (i = 0; i < batch->num_otables; ++i)
325 vmw_takedown_otable_base(dev_priv, i, 356 if (batch->otables[i].enabled)
326 &dev_priv->otables[i]); 357 vmw_takedown_otable_base(dev_priv, i,
358 &batch->otables[i]);
327 359
328 ret = ttm_bo_reserve(bo, false, true, false, NULL); 360 ret = ttm_bo_reserve(bo, false, true, false, NULL);
329 BUG_ON(ret != 0); 361 BUG_ON(ret != 0);
@@ -331,11 +363,21 @@ void vmw_otables_takedown(struct vmw_private *dev_priv)
331 vmw_fence_single_bo(bo, NULL); 363 vmw_fence_single_bo(bo, NULL);
332 ttm_bo_unreserve(bo); 364 ttm_bo_unreserve(bo);
333 365
334 ttm_bo_unref(&dev_priv->otable_bo); 366 ttm_bo_unref(&batch->otable_bo);
335 kfree(dev_priv->otables);
336 dev_priv->otables = NULL;
337} 367}
338 368
369/*
370 * vmw_otables_takedown - Take down guest backed memory object tables
371 *
372 * @dev_priv: Pointer to a device private structure
373 *
374 * Take down the Guest Memory Object tables.
375 */
376void vmw_otables_takedown(struct vmw_private *dev_priv)
377{
378 vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
379 kfree(dev_priv->otable_batch.otables);
380}
339 381
340/* 382/*
341 * vmw_mob_calculate_pt_pages - Calculate the number of page table pages 383 * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
@@ -409,7 +451,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
409 goto out_unreserve; 451 goto out_unreserve;
410 452
411 ttm_bo_unreserve(mob->pt_bo); 453 ttm_bo_unreserve(mob->pt_bo);
412 454
413 return 0; 455 return 0;
414 456
415out_unreserve: 457out_unreserve:
@@ -429,15 +471,15 @@ out_unreserve:
429 * *@addr according to the page table entry size. 471 * *@addr according to the page table entry size.
430 */ 472 */
431#if (VMW_PPN_SIZE == 8) 473#if (VMW_PPN_SIZE == 8)
432static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val) 474static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
433{ 475{
434 *((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT); 476 *((u64 *) *addr) = val >> PAGE_SHIFT;
435 *addr += 2; 477 *addr += 2;
436} 478}
437#else 479#else
438static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val) 480static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
439{ 481{
440 *(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT); 482 *(*addr)++ = val >> PAGE_SHIFT;
441} 483}
442#endif 484#endif
443 485
@@ -459,7 +501,7 @@ static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
459 unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; 501 unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
460 unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); 502 unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
461 unsigned long pt_page; 503 unsigned long pt_page;
462 __le32 *addr, *save_addr; 504 u32 *addr, *save_addr;
463 unsigned long i; 505 unsigned long i;
464 struct page *page; 506 struct page *page;
465 507
@@ -574,7 +616,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
574 vmw_fence_single_bo(bo, NULL); 616 vmw_fence_single_bo(bo, NULL);
575 ttm_bo_unreserve(bo); 617 ttm_bo_unreserve(bo);
576 } 618 }
577 vmw_3d_resource_dec(dev_priv, false); 619 vmw_fifo_resource_dec(dev_priv);
578} 620}
579 621
580/* 622/*
@@ -627,7 +669,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
627 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; 669 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
628 } 670 }
629 671
630 (void) vmw_3d_resource_inc(dev_priv, false); 672 vmw_fifo_resource_inc(dev_priv);
631 673
632 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 674 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
633 if (unlikely(cmd == NULL)) { 675 if (unlikely(cmd == NULL)) {
@@ -640,7 +682,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
640 cmd->header.size = sizeof(cmd->body); 682 cmd->header.size = sizeof(cmd->body);
641 cmd->body.mobid = mob_id; 683 cmd->body.mobid = mob_id;
642 cmd->body.ptDepth = mob->pt_level; 684 cmd->body.ptDepth = mob->pt_level;
643 cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT); 685 cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
644 cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; 686 cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
645 687
646 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 688 vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -648,7 +690,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
648 return 0; 690 return 0;
649 691
650out_no_cmd_space: 692out_no_cmd_space:
651 vmw_3d_resource_dec(dev_priv, false); 693 vmw_fifo_resource_dec(dev_priv);
652 if (pt_set_up) 694 if (pt_set_up)
653 ttm_bo_unref(&mob->pt_bo); 695 ttm_bo_unref(&mob->pt_bo);
654 696
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 87e39f68e9d0..76069f093ccf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,8 +31,8 @@
31 31
32#include <drm/ttm/ttm_placement.h> 32#include <drm/ttm/ttm_placement.h>
33 33
34#include "svga_overlay.h" 34#include "device_include/svga_overlay.h"
35#include "svga_escape.h" 35#include "device_include/svga_escape.h"
36 36
37#define VMW_MAX_NUM_STREAMS 1 37#define VMW_MAX_NUM_STREAMS 1
38#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE) 38#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
@@ -100,7 +100,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
100{ 100{
101 struct vmw_escape_video_flush *flush; 101 struct vmw_escape_video_flush *flush;
102 size_t fifo_size; 102 size_t fifo_size;
103 bool have_so = dev_priv->sou_priv ? true : false; 103 bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
104 int i, num_items; 104 int i, num_items;
105 SVGAGuestPtr ptr; 105 SVGAGuestPtr ptr;
106 106
@@ -231,10 +231,10 @@ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
231 if (!pin) 231 if (!pin)
232 return vmw_dmabuf_unpin(dev_priv, buf, inter); 232 return vmw_dmabuf_unpin(dev_priv, buf, inter);
233 233
234 if (!dev_priv->sou_priv) 234 if (dev_priv->active_display_unit == vmw_du_legacy)
235 return vmw_dmabuf_to_vram(dev_priv, buf, true, inter); 235 return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
236 236
237 return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter); 237 return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
238} 238}
239 239
240/** 240/**
@@ -453,7 +453,7 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv)
453 453
454static bool vmw_overlay_available(const struct vmw_private *dev_priv) 454static bool vmw_overlay_available(const struct vmw_private *dev_priv)
455{ 455{
456 return (dev_priv->overlay_priv != NULL && 456 return (dev_priv->overlay_priv != NULL &&
457 ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) == 457 ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
458 VMW_OVERLAY_CAP_MASK)); 458 VMW_OVERLAY_CAP_MASK));
459} 459}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
index 9d0dd3a342eb..dce798053a96 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -39,19 +39,17 @@
39#define VMWGFX_IRQSTATUS_PORT 0x8 39#define VMWGFX_IRQSTATUS_PORT 0x8
40 40
41struct svga_guest_mem_descriptor { 41struct svga_guest_mem_descriptor {
42 __le32 ppn; 42 u32 ppn;
43 __le32 num_pages; 43 u32 num_pages;
44}; 44};
45 45
46struct svga_fifo_cmd_fence { 46struct svga_fifo_cmd_fence {
47 __le32 fence; 47 u32 fence;
48}; 48};
49 49
50#define SVGA_SYNC_GENERIC 1 50#define SVGA_SYNC_GENERIC 1
51#define SVGA_SYNC_FIFOFULL 2 51#define SVGA_SYNC_FIFOFULL 2
52 52
53#include "svga_types.h" 53#include "device_include/svga3d_reg.h"
54
55#include "svga3d_reg.h"
56 54
57#endif 55#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 210ef15b1d09..c1912f852b42 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,6 +31,7 @@
31#include <drm/ttm/ttm_placement.h> 31#include <drm/ttm/ttm_placement.h>
32#include <drm/drmP.h> 32#include <drm/drmP.h>
33#include "vmwgfx_resource_priv.h" 33#include "vmwgfx_resource_priv.h"
34#include "vmwgfx_binding.h"
34 35
35#define VMW_RES_EVICT_ERR_COUNT 10 36#define VMW_RES_EVICT_ERR_COUNT 10
36 37
@@ -121,6 +122,7 @@ static void vmw_resource_release(struct kref *kref)
121 int id; 122 int id;
122 struct idr *idr = &dev_priv->res_idr[res->func->res_type]; 123 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
123 124
125 write_lock(&dev_priv->resource_lock);
124 res->avail = false; 126 res->avail = false;
125 list_del_init(&res->lru_head); 127 list_del_init(&res->lru_head);
126 write_unlock(&dev_priv->resource_lock); 128 write_unlock(&dev_priv->resource_lock);
@@ -143,10 +145,10 @@ static void vmw_resource_release(struct kref *kref)
143 } 145 }
144 146
145 if (likely(res->hw_destroy != NULL)) { 147 if (likely(res->hw_destroy != NULL)) {
146 res->hw_destroy(res);
147 mutex_lock(&dev_priv->binding_mutex); 148 mutex_lock(&dev_priv->binding_mutex);
148 vmw_context_binding_res_list_kill(&res->binding_head); 149 vmw_binding_res_list_kill(&res->binding_head);
149 mutex_unlock(&dev_priv->binding_mutex); 150 mutex_unlock(&dev_priv->binding_mutex);
151 res->hw_destroy(res);
150 } 152 }
151 153
152 id = res->id; 154 id = res->id;
@@ -156,20 +158,17 @@ static void vmw_resource_release(struct kref *kref)
156 kfree(res); 158 kfree(res);
157 159
158 write_lock(&dev_priv->resource_lock); 160 write_lock(&dev_priv->resource_lock);
159
160 if (id != -1) 161 if (id != -1)
161 idr_remove(idr, id); 162 idr_remove(idr, id);
163 write_unlock(&dev_priv->resource_lock);
162} 164}
163 165
164void vmw_resource_unreference(struct vmw_resource **p_res) 166void vmw_resource_unreference(struct vmw_resource **p_res)
165{ 167{
166 struct vmw_resource *res = *p_res; 168 struct vmw_resource *res = *p_res;
167 struct vmw_private *dev_priv = res->dev_priv;
168 169
169 *p_res = NULL; 170 *p_res = NULL;
170 write_lock(&dev_priv->resource_lock);
171 kref_put(&res->kref, vmw_resource_release); 171 kref_put(&res->kref, vmw_resource_release);
172 write_unlock(&dev_priv->resource_lock);
173} 172}
174 173
175 174
@@ -260,17 +259,16 @@ void vmw_resource_activate(struct vmw_resource *res,
260 write_unlock(&dev_priv->resource_lock); 259 write_unlock(&dev_priv->resource_lock);
261} 260}
262 261
263struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, 262static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
264 struct idr *idr, int id) 263 struct idr *idr, int id)
265{ 264{
266 struct vmw_resource *res; 265 struct vmw_resource *res;
267 266
268 read_lock(&dev_priv->resource_lock); 267 read_lock(&dev_priv->resource_lock);
269 res = idr_find(idr, id); 268 res = idr_find(idr, id);
270 if (res && res->avail) 269 if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
271 kref_get(&res->kref);
272 else
273 res = NULL; 270 res = NULL;
271
274 read_unlock(&dev_priv->resource_lock); 272 read_unlock(&dev_priv->resource_lock);
275 273
276 if (unlikely(res == NULL)) 274 if (unlikely(res == NULL))
@@ -900,20 +898,21 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
900 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 898 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
901 vmw_user_stream_size, 899 vmw_user_stream_size,
902 false, true); 900 false, true);
901 ttm_read_unlock(&dev_priv->reservation_sem);
903 if (unlikely(ret != 0)) { 902 if (unlikely(ret != 0)) {
904 if (ret != -ERESTARTSYS) 903 if (ret != -ERESTARTSYS)
905 DRM_ERROR("Out of graphics memory for stream" 904 DRM_ERROR("Out of graphics memory for stream"
906 " creation.\n"); 905 " creation.\n");
907 goto out_unlock;
908 }
909 906
907 goto out_ret;
908 }
910 909
911 stream = kmalloc(sizeof(*stream), GFP_KERNEL); 910 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
912 if (unlikely(stream == NULL)) { 911 if (unlikely(stream == NULL)) {
913 ttm_mem_global_free(vmw_mem_glob(dev_priv), 912 ttm_mem_global_free(vmw_mem_glob(dev_priv),
914 vmw_user_stream_size); 913 vmw_user_stream_size);
915 ret = -ENOMEM; 914 ret = -ENOMEM;
916 goto out_unlock; 915 goto out_ret;
917 } 916 }
918 917
919 res = &stream->stream.res; 918 res = &stream->stream.res;
@@ -926,7 +925,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
926 925
927 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); 926 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
928 if (unlikely(ret != 0)) 927 if (unlikely(ret != 0))
929 goto out_unlock; 928 goto out_ret;
930 929
931 tmp = vmw_resource_reference(res); 930 tmp = vmw_resource_reference(res);
932 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM, 931 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
@@ -940,8 +939,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
940 arg->stream_id = res->id; 939 arg->stream_id = res->id;
941out_err: 940out_err:
942 vmw_resource_unreference(&res); 941 vmw_resource_unreference(&res);
943out_unlock: 942out_ret:
944 ttm_read_unlock(&dev_priv->reservation_sem);
945 return ret; 943 return ret;
946} 944}
947 945
@@ -1152,14 +1150,16 @@ out_bind_failed:
1152 * command submission. 1150 * command submission.
1153 * 1151 *
1154 * @res: Pointer to the struct vmw_resource to unreserve. 1152 * @res: Pointer to the struct vmw_resource to unreserve.
1153 * @switch_backup: Backup buffer has been switched.
1155 * @new_backup: Pointer to new backup buffer if command submission 1154 * @new_backup: Pointer to new backup buffer if command submission
1156 * switched. 1155 * switched. May be NULL.
1157 * @new_backup_offset: New backup offset if @new_backup is !NULL. 1156 * @new_backup_offset: New backup offset if @switch_backup is true.
1158 * 1157 *
1159 * Currently unreserving a resource means putting it back on the device's 1158 * Currently unreserving a resource means putting it back on the device's
1160 * resource lru list, so that it can be evicted if necessary. 1159 * resource lru list, so that it can be evicted if necessary.
1161 */ 1160 */
1162void vmw_resource_unreserve(struct vmw_resource *res, 1161void vmw_resource_unreserve(struct vmw_resource *res,
1162 bool switch_backup,
1163 struct vmw_dma_buffer *new_backup, 1163 struct vmw_dma_buffer *new_backup,
1164 unsigned long new_backup_offset) 1164 unsigned long new_backup_offset)
1165{ 1165{
@@ -1168,22 +1168,25 @@ void vmw_resource_unreserve(struct vmw_resource *res,
1168 if (!list_empty(&res->lru_head)) 1168 if (!list_empty(&res->lru_head))
1169 return; 1169 return;
1170 1170
1171 if (new_backup && new_backup != res->backup) { 1171 if (switch_backup && new_backup != res->backup) {
1172
1173 if (res->backup) { 1172 if (res->backup) {
1174 lockdep_assert_held(&res->backup->base.resv->lock.base); 1173 lockdep_assert_held(&res->backup->base.resv->lock.base);
1175 list_del_init(&res->mob_head); 1174 list_del_init(&res->mob_head);
1176 vmw_dmabuf_unreference(&res->backup); 1175 vmw_dmabuf_unreference(&res->backup);
1177 } 1176 }
1178 1177
1179 res->backup = vmw_dmabuf_reference(new_backup); 1178 if (new_backup) {
1180 lockdep_assert_held(&new_backup->base.resv->lock.base); 1179 res->backup = vmw_dmabuf_reference(new_backup);
1181 list_add_tail(&res->mob_head, &new_backup->res_list); 1180 lockdep_assert_held(&new_backup->base.resv->lock.base);
1181 list_add_tail(&res->mob_head, &new_backup->res_list);
1182 } else {
1183 res->backup = NULL;
1184 }
1182 } 1185 }
1183 if (new_backup) 1186 if (switch_backup)
1184 res->backup_offset = new_backup_offset; 1187 res->backup_offset = new_backup_offset;
1185 1188
1186 if (!res->func->may_evict || res->id == -1) 1189 if (!res->func->may_evict || res->id == -1 || res->pin_count)
1187 return; 1190 return;
1188 1191
1189 write_lock(&dev_priv->resource_lock); 1192 write_lock(&dev_priv->resource_lock);
@@ -1259,7 +1262,8 @@ out_no_reserve:
1259 * the buffer may not be bound to the resource at this point. 1262 * the buffer may not be bound to the resource at this point.
1260 * 1263 *
1261 */ 1264 */
1262int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) 1265int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
1266 bool no_backup)
1263{ 1267{
1264 struct vmw_private *dev_priv = res->dev_priv; 1268 struct vmw_private *dev_priv = res->dev_priv;
1265 int ret; 1269 int ret;
@@ -1270,9 +1274,13 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1270 1274
1271 if (res->func->needs_backup && res->backup == NULL && 1275 if (res->func->needs_backup && res->backup == NULL &&
1272 !no_backup) { 1276 !no_backup) {
1273 ret = vmw_resource_buf_alloc(res, true); 1277 ret = vmw_resource_buf_alloc(res, interruptible);
1274 if (unlikely(ret != 0)) 1278 if (unlikely(ret != 0)) {
1279 DRM_ERROR("Failed to allocate a backup buffer "
1280 "of size %lu. bytes\n",
1281 (unsigned long) res->backup_size);
1275 return ret; 1282 return ret;
1283 }
1276 } 1284 }
1277 1285
1278 return 0; 1286 return 0;
@@ -1305,7 +1313,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1305 * @res: The resource to evict. 1313 * @res: The resource to evict.
1306 * @interruptible: Whether to wait interruptible. 1314 * @interruptible: Whether to wait interruptible.
1307 */ 1315 */
1308int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) 1316static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1309{ 1317{
1310 struct ttm_validate_buffer val_buf; 1318 struct ttm_validate_buffer val_buf;
1311 const struct vmw_res_func *func = res->func; 1319 const struct vmw_res_func *func = res->func;
@@ -1356,7 +1364,7 @@ int vmw_resource_validate(struct vmw_resource *res)
1356 struct ttm_validate_buffer val_buf; 1364 struct ttm_validate_buffer val_buf;
1357 unsigned err_count = 0; 1365 unsigned err_count = 0;
1358 1366
1359 if (likely(!res->func->may_evict)) 1367 if (!res->func->create)
1360 return 0; 1368 return 0;
1361 1369
1362 val_buf.bo = NULL; 1370 val_buf.bo = NULL;
@@ -1443,9 +1451,9 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1443/** 1451/**
1444 * vmw_resource_move_notify - TTM move_notify_callback 1452 * vmw_resource_move_notify - TTM move_notify_callback
1445 * 1453 *
1446 * @bo: The TTM buffer object about to move. 1454 * @bo: The TTM buffer object about to move.
1447 * @mem: The truct ttm_mem_reg indicating to what memory 1455 * @mem: The struct ttm_mem_reg indicating to what memory
1448 * region the move is taking place. 1456 * region the move is taking place.
1449 * 1457 *
1450 * Evicts the Guest Backed hardware resource if the backup 1458 * Evicts the Guest Backed hardware resource if the backup
1451 * buffer is being moved out of MOB memory. 1459 * buffer is being moved out of MOB memory.
@@ -1495,6 +1503,101 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1495 } 1503 }
1496} 1504}
1497 1505
1506
1507
1508/**
1509 * vmw_query_readback_all - Read back cached query states
1510 *
1511 * @dx_query_mob: Buffer containing the DX query MOB
1512 *
1513 * Read back cached states from the device if they exist. This function
1514 * assumings binding_mutex is held.
1515 */
1516int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
1517{
1518 struct vmw_resource *dx_query_ctx;
1519 struct vmw_private *dev_priv;
1520 struct {
1521 SVGA3dCmdHeader header;
1522 SVGA3dCmdDXReadbackAllQuery body;
1523 } *cmd;
1524
1525
1526 /* No query bound, so do nothing */
1527 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
1528 return 0;
1529
1530 dx_query_ctx = dx_query_mob->dx_query_ctx;
1531 dev_priv = dx_query_ctx->dev_priv;
1532
1533 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
1534 if (unlikely(cmd == NULL)) {
1535 DRM_ERROR("Failed reserving FIFO space for "
1536 "query MOB read back.\n");
1537 return -ENOMEM;
1538 }
1539
1540 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
1541 cmd->header.size = sizeof(cmd->body);
1542 cmd->body.cid = dx_query_ctx->id;
1543
1544 vmw_fifo_commit(dev_priv, sizeof(*cmd));
1545
1546 /* Triggers a rebind the next time affected context is bound */
1547 dx_query_mob->dx_query_ctx = NULL;
1548
1549 return 0;
1550}
1551
1552
1553
1554/**
1555 * vmw_query_move_notify - Read back cached query states
1556 *
1557 * @bo: The TTM buffer object about to move.
1558 * @mem: The memory region @bo is moving to.
1559 *
1560 * Called before the query MOB is swapped out to read back cached query
1561 * states from the device.
1562 */
1563void vmw_query_move_notify(struct ttm_buffer_object *bo,
1564 struct ttm_mem_reg *mem)
1565{
1566 struct vmw_dma_buffer *dx_query_mob;
1567 struct ttm_bo_device *bdev = bo->bdev;
1568 struct vmw_private *dev_priv;
1569
1570
1571 dev_priv = container_of(bdev, struct vmw_private, bdev);
1572
1573 mutex_lock(&dev_priv->binding_mutex);
1574
1575 dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
1576 if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
1577 mutex_unlock(&dev_priv->binding_mutex);
1578 return;
1579 }
1580
1581 /* If BO is being moved from MOB to system memory */
1582 if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
1583 struct vmw_fence_obj *fence;
1584
1585 (void) vmw_query_readback_all(dx_query_mob);
1586 mutex_unlock(&dev_priv->binding_mutex);
1587
1588 /* Create a fence and attach the BO to it */
1589 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1590 vmw_fence_single_bo(bo, fence);
1591
1592 if (fence != NULL)
1593 vmw_fence_obj_unreference(&fence);
1594
1595 (void) ttm_bo_wait(bo, false, false, false);
1596 } else
1597 mutex_unlock(&dev_priv->binding_mutex);
1598
1599}
1600
1498/** 1601/**
1499 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. 1602 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1500 * 1603 *
@@ -1573,3 +1676,107 @@ void vmw_resource_evict_all(struct vmw_private *dev_priv)
1573 1676
1574 mutex_unlock(&dev_priv->cmdbuf_mutex); 1677 mutex_unlock(&dev_priv->cmdbuf_mutex);
1575} 1678}
1679
1680/**
1681 * vmw_resource_pin - Add a pin reference on a resource
1682 *
1683 * @res: The resource to add a pin reference on
1684 *
1685 * This function adds a pin reference, and if needed validates the resource.
1686 * Having a pin reference means that the resource can never be evicted, and
1687 * its id will never change as long as there is a pin reference.
1688 * This function returns 0 on success and a negative error code on failure.
1689 */
1690int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1691{
1692 struct vmw_private *dev_priv = res->dev_priv;
1693 int ret;
1694
1695 ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1696 mutex_lock(&dev_priv->cmdbuf_mutex);
1697 ret = vmw_resource_reserve(res, interruptible, false);
1698 if (ret)
1699 goto out_no_reserve;
1700
1701 if (res->pin_count == 0) {
1702 struct vmw_dma_buffer *vbo = NULL;
1703
1704 if (res->backup) {
1705 vbo = res->backup;
1706
1707 ttm_bo_reserve(&vbo->base, interruptible, false, false,
1708 NULL);
1709 if (!vbo->pin_count) {
1710 ret = ttm_bo_validate
1711 (&vbo->base,
1712 res->func->backup_placement,
1713 interruptible, false);
1714 if (ret) {
1715 ttm_bo_unreserve(&vbo->base);
1716 goto out_no_validate;
1717 }
1718 }
1719
1720 /* Do we really need to pin the MOB as well? */
1721 vmw_bo_pin_reserved(vbo, true);
1722 }
1723 ret = vmw_resource_validate(res);
1724 if (vbo)
1725 ttm_bo_unreserve(&vbo->base);
1726 if (ret)
1727 goto out_no_validate;
1728 }
1729 res->pin_count++;
1730
1731out_no_validate:
1732 vmw_resource_unreserve(res, false, NULL, 0UL);
1733out_no_reserve:
1734 mutex_unlock(&dev_priv->cmdbuf_mutex);
1735 ttm_write_unlock(&dev_priv->reservation_sem);
1736
1737 return ret;
1738}
1739
1740/**
1741 * vmw_resource_unpin - Remove a pin reference from a resource
1742 *
1743 * @res: The resource to remove a pin reference from
1744 *
1745 * Having a pin reference means that the resource can never be evicted, and
1746 * its id will never change as long as there is a pin reference.
1747 */
1748void vmw_resource_unpin(struct vmw_resource *res)
1749{
1750 struct vmw_private *dev_priv = res->dev_priv;
1751 int ret;
1752
1753 ttm_read_lock(&dev_priv->reservation_sem, false);
1754 mutex_lock(&dev_priv->cmdbuf_mutex);
1755
1756 ret = vmw_resource_reserve(res, false, true);
1757 WARN_ON(ret);
1758
1759 WARN_ON(res->pin_count == 0);
1760 if (--res->pin_count == 0 && res->backup) {
1761 struct vmw_dma_buffer *vbo = res->backup;
1762
1763 ttm_bo_reserve(&vbo->base, false, false, false, NULL);
1764 vmw_bo_pin_reserved(vbo, false);
1765 ttm_bo_unreserve(&vbo->base);
1766 }
1767
1768 vmw_resource_unreserve(res, false, NULL, 0UL);
1769
1770 mutex_unlock(&dev_priv->cmdbuf_mutex);
1771 ttm_read_unlock(&dev_priv->reservation_sem);
1772}
1773
1774/**
1775 * vmw_res_type - Return the resource type
1776 *
1777 * @res: Pointer to the resource
1778 */
1779enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1780{
1781 return res->func->res_type;
1782}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index f3adeed2854c..5994ef6265e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2012-2014 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -30,6 +30,12 @@
30 30
31#include "vmwgfx_drv.h" 31#include "vmwgfx_drv.h"
32 32
33enum vmw_cmdbuf_res_state {
34 VMW_CMDBUF_RES_COMMITTED,
35 VMW_CMDBUF_RES_ADD,
36 VMW_CMDBUF_RES_DEL
37};
38
33/** 39/**
34 * struct vmw_user_resource_conv - Identify a derived user-exported resource 40 * struct vmw_user_resource_conv - Identify a derived user-exported resource
35 * type and provide a function to convert its ttm_base_object pointer to 41 * type and provide a function to convert its ttm_base_object pointer to
@@ -55,8 +61,10 @@ struct vmw_user_resource_conv {
55 * @bind: Bind a hardware resource to persistent buffer storage. 61 * @bind: Bind a hardware resource to persistent buffer storage.
56 * @unbind: Unbind a hardware resource from persistent 62 * @unbind: Unbind a hardware resource from persistent
57 * buffer storage. 63 * buffer storage.
64 * @commit_notify: If the resource is a command buffer managed resource,
65 * callback to notify that a define or remove command
66 * has been committed to the device.
58 */ 67 */
59
60struct vmw_res_func { 68struct vmw_res_func {
61 enum vmw_res_type res_type; 69 enum vmw_res_type res_type;
62 bool needs_backup; 70 bool needs_backup;
@@ -71,6 +79,8 @@ struct vmw_res_func {
71 int (*unbind) (struct vmw_resource *res, 79 int (*unbind) (struct vmw_resource *res,
72 bool readback, 80 bool readback,
73 struct ttm_validate_buffer *val_buf); 81 struct ttm_validate_buffer *val_buf);
82 void (*commit_notify)(struct vmw_resource *res,
83 enum vmw_cmdbuf_res_state state);
74}; 84};
75 85
76int vmw_resource_alloc_id(struct vmw_resource *res); 86int vmw_resource_alloc_id(struct vmw_resource *res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 7dc591d04d9a..b96d1ab610c5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -36,10 +36,55 @@
36#define vmw_connector_to_sou(x) \ 36#define vmw_connector_to_sou(x) \
37 container_of(x, struct vmw_screen_object_unit, base.connector) 37 container_of(x, struct vmw_screen_object_unit, base.connector)
38 38
39/**
40 * struct vmw_kms_sou_surface_dirty - Closure structure for
41 * blit surface to screen command.
42 * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
43 * @left: Left side of bounding box.
44 * @right: Right side of bounding box.
45 * @top: Top side of bounding box.
46 * @bottom: Bottom side of bounding box.
47 * @dst_x: Difference between source clip rects and framebuffer coordinates.
48 * @dst_y: Difference between source clip rects and framebuffer coordinates.
49 * @sid: Surface id of surface to copy from.
50 */
51struct vmw_kms_sou_surface_dirty {
52 struct vmw_kms_dirty base;
53 s32 left, right, top, bottom;
54 s32 dst_x, dst_y;
55 u32 sid;
56};
57
58/*
59 * SVGA commands that are used by this code. Please see the device headers
60 * for explanation.
61 */
62struct vmw_kms_sou_readback_blit {
63 uint32 header;
64 SVGAFifoCmdBlitScreenToGMRFB body;
65};
66
67struct vmw_kms_sou_dmabuf_blit {
68 uint32 header;
69 SVGAFifoCmdBlitGMRFBToScreen body;
70};
71
72struct vmw_kms_sou_dirty_cmd {
73 SVGA3dCmdHeader header;
74 SVGA3dCmdBlitSurfaceToScreen body;
75};
76
77
78/*
79 * Other structs.
80 */
81
39struct vmw_screen_object_display { 82struct vmw_screen_object_display {
40 unsigned num_implicit; 83 unsigned num_implicit;
41 84
42 struct vmw_framebuffer *implicit_fb; 85 struct vmw_framebuffer *implicit_fb;
86 SVGAFifoCmdDefineGMRFB cur;
87 struct vmw_dma_buffer *pinned_gmrfb;
43}; 88};
44 89
45/** 90/**
@@ -57,7 +102,7 @@ struct vmw_screen_object_unit {
57 102
58static void vmw_sou_destroy(struct vmw_screen_object_unit *sou) 103static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
59{ 104{
60 vmw_display_unit_cleanup(&sou->base); 105 vmw_du_cleanup(&sou->base);
61 kfree(sou); 106 kfree(sou);
62} 107}
63 108
@@ -72,7 +117,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
72} 117}
73 118
74static void vmw_sou_del_active(struct vmw_private *vmw_priv, 119static void vmw_sou_del_active(struct vmw_private *vmw_priv,
75 struct vmw_screen_object_unit *sou) 120 struct vmw_screen_object_unit *sou)
76{ 121{
77 struct vmw_screen_object_display *ld = vmw_priv->sou_priv; 122 struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
78 123
@@ -84,8 +129,8 @@ static void vmw_sou_del_active(struct vmw_private *vmw_priv,
84} 129}
85 130
86static void vmw_sou_add_active(struct vmw_private *vmw_priv, 131static void vmw_sou_add_active(struct vmw_private *vmw_priv,
87 struct vmw_screen_object_unit *sou, 132 struct vmw_screen_object_unit *sou,
88 struct vmw_framebuffer *vfb) 133 struct vmw_framebuffer *vfb)
89{ 134{
90 struct vmw_screen_object_display *ld = vmw_priv->sou_priv; 135 struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
91 136
@@ -202,14 +247,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
202static void vmw_sou_backing_free(struct vmw_private *dev_priv, 247static void vmw_sou_backing_free(struct vmw_private *dev_priv,
203 struct vmw_screen_object_unit *sou) 248 struct vmw_screen_object_unit *sou)
204{ 249{
205 struct ttm_buffer_object *bo; 250 vmw_dmabuf_unreference(&sou->buffer);
206
207 if (unlikely(sou->buffer == NULL))
208 return;
209
210 bo = &sou->buffer->base;
211 ttm_bo_unref(&bo);
212 sou->buffer = NULL;
213 sou->buffer_size = 0; 251 sou->buffer_size = 0;
214} 252}
215 253
@@ -274,13 +312,13 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
274 dev_priv = vmw_priv(crtc->dev); 312 dev_priv = vmw_priv(crtc->dev);
275 313
276 if (set->num_connectors > 1) { 314 if (set->num_connectors > 1) {
277 DRM_ERROR("to many connectors\n"); 315 DRM_ERROR("Too many connectors\n");
278 return -EINVAL; 316 return -EINVAL;
279 } 317 }
280 318
281 if (set->num_connectors == 1 && 319 if (set->num_connectors == 1 &&
282 set->connectors[0] != &sou->base.connector) { 320 set->connectors[0] != &sou->base.connector) {
283 DRM_ERROR("connector doesn't match %p %p\n", 321 DRM_ERROR("Connector doesn't match %p %p\n",
284 set->connectors[0], &sou->base.connector); 322 set->connectors[0], &sou->base.connector);
285 return -EINVAL; 323 return -EINVAL;
286 } 324 }
@@ -331,7 +369,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
331 return -EINVAL; 369 return -EINVAL;
332 } 370 }
333 371
334 vmw_fb_off(dev_priv); 372 vmw_svga_enable(dev_priv);
335 373
336 if (mode->hdisplay != crtc->mode.hdisplay || 374 if (mode->hdisplay != crtc->mode.hdisplay ||
337 mode->vdisplay != crtc->mode.vdisplay) { 375 mode->vdisplay != crtc->mode.vdisplay) {
@@ -390,6 +428,108 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
390 return 0; 428 return 0;
391} 429}
392 430
431/**
432 * Returns if this unit can be page flipped.
433 * Must be called with the mode_config mutex held.
434 */
435static bool vmw_sou_screen_object_flippable(struct vmw_private *dev_priv,
436 struct drm_crtc *crtc)
437{
438 struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
439
440 if (!sou->base.is_implicit)
441 return true;
442
443 if (dev_priv->sou_priv->num_implicit != 1)
444 return false;
445
446 return true;
447}
448
449/**
450 * Update the implicit fb to the current fb of this crtc.
451 * Must be called with the mode_config mutex held.
452 */
453static void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv,
454 struct drm_crtc *crtc)
455{
456 struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
457
458 BUG_ON(!sou->base.is_implicit);
459
460 dev_priv->sou_priv->implicit_fb =
461 vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
462}
463
464static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
465 struct drm_framebuffer *fb,
466 struct drm_pending_vblank_event *event,
467 uint32_t flags)
468{
469 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
470 struct drm_framebuffer *old_fb = crtc->primary->fb;
471 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
472 struct vmw_fence_obj *fence = NULL;
473 struct drm_clip_rect clips;
474 int ret;
475
476 /* require ScreenObject support for page flipping */
477 if (!dev_priv->sou_priv)
478 return -ENOSYS;
479
480 if (!vmw_sou_screen_object_flippable(dev_priv, crtc))
481 return -EINVAL;
482
483 crtc->primary->fb = fb;
484
485 /* do a full screen dirty update */
486 clips.x1 = clips.y1 = 0;
487 clips.x2 = fb->width;
488 clips.y2 = fb->height;
489
490 if (vfb->dmabuf)
491 ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
492 &clips, 1, 1,
493 true, &fence);
494 else
495 ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
496 &clips, NULL, NULL,
497 0, 0, 1, 1, &fence);
498
499
500 if (ret != 0)
501 goto out_no_fence;
502 if (!fence) {
503 ret = -EINVAL;
504 goto out_no_fence;
505 }
506
507 if (event) {
508 struct drm_file *file_priv = event->base.file_priv;
509
510 ret = vmw_event_fence_action_queue(file_priv, fence,
511 &event->base,
512 &event->event.tv_sec,
513 &event->event.tv_usec,
514 true);
515 }
516
517 /*
518 * No need to hold on to this now. The only cleanup
519 * we need to do if we fail is unref the fence.
520 */
521 vmw_fence_obj_unreference(&fence);
522
523 if (vmw_crtc_to_du(crtc)->is_implicit)
524 vmw_sou_update_implicit_fb(dev_priv, crtc);
525
526 return ret;
527
528out_no_fence:
529 crtc->primary->fb = old_fb;
530 return ret;
531}
532
393static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { 533static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
394 .save = vmw_du_crtc_save, 534 .save = vmw_du_crtc_save,
395 .restore = vmw_du_crtc_restore, 535 .restore = vmw_du_crtc_restore,
@@ -398,7 +538,7 @@ static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
398 .gamma_set = vmw_du_crtc_gamma_set, 538 .gamma_set = vmw_du_crtc_gamma_set,
399 .destroy = vmw_sou_crtc_destroy, 539 .destroy = vmw_sou_crtc_destroy,
400 .set_config = vmw_sou_crtc_set_config, 540 .set_config = vmw_sou_crtc_set_config,
401 .page_flip = vmw_du_page_flip, 541 .page_flip = vmw_sou_crtc_page_flip,
402}; 542};
403 543
404/* 544/*
@@ -423,7 +563,7 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
423 vmw_sou_destroy(vmw_connector_to_sou(connector)); 563 vmw_sou_destroy(vmw_connector_to_sou(connector));
424} 564}
425 565
426static struct drm_connector_funcs vmw_legacy_connector_funcs = { 566static struct drm_connector_funcs vmw_sou_connector_funcs = {
427 .dpms = vmw_du_connector_dpms, 567 .dpms = vmw_du_connector_dpms,
428 .save = vmw_du_connector_save, 568 .save = vmw_du_connector_save,
429 .restore = vmw_du_connector_restore, 569 .restore = vmw_du_connector_restore,
@@ -458,7 +598,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
458 sou->base.pref_mode = NULL; 598 sou->base.pref_mode = NULL;
459 sou->base.is_implicit = true; 599 sou->base.is_implicit = true;
460 600
461 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, 601 drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
462 DRM_MODE_CONNECTOR_VIRTUAL); 602 DRM_MODE_CONNECTOR_VIRTUAL);
463 connector->status = vmw_du_connector_detect(connector, true); 603 connector->status = vmw_du_connector_detect(connector, true);
464 604
@@ -481,7 +621,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
481 return 0; 621 return 0;
482} 622}
483 623
484int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv) 624int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
485{ 625{
486 struct drm_device *dev = dev_priv->dev; 626 struct drm_device *dev = dev_priv->dev;
487 int i, ret; 627 int i, ret;
@@ -516,7 +656,9 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
516 for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) 656 for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
517 vmw_sou_init(dev_priv, i); 657 vmw_sou_init(dev_priv, i);
518 658
519 DRM_INFO("Screen objects system initialized\n"); 659 dev_priv->active_display_unit = vmw_du_screen_object;
660
661 DRM_INFO("Screen Objects Display Unit initialized\n");
520 662
521 return 0; 663 return 0;
522 664
@@ -529,7 +671,7 @@ err_no_mem:
529 return ret; 671 return ret;
530} 672}
531 673
532int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv) 674int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
533{ 675{
534 struct drm_device *dev = dev_priv->dev; 676 struct drm_device *dev = dev_priv->dev;
535 677
@@ -543,35 +685,369 @@ int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
543 return 0; 685 return 0;
544} 686}
545 687
688static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
689 struct vmw_framebuffer *framebuffer)
690{
691 struct vmw_dma_buffer *buf =
692 container_of(framebuffer, struct vmw_framebuffer_dmabuf,
693 base)->buffer;
694 int depth = framebuffer->base.depth;
695 struct {
696 uint32_t header;
697 SVGAFifoCmdDefineGMRFB body;
698 } *cmd;
699
700 /* Emulate RGBA support, contrary to svga_reg.h this is not
701 * supported by hosts. This is only a problem if we are reading
702 * this value later and expecting what we uploaded back.
703 */
704 if (depth == 32)
705 depth = 24;
706
707 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
708 if (!cmd) {
709 DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
710 return -ENOMEM;
711 }
712
713 cmd->header = SVGA_CMD_DEFINE_GMRFB;
714 cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
715 cmd->body.format.colorDepth = depth;
716 cmd->body.format.reserved = 0;
717 cmd->body.bytesPerLine = framebuffer->base.pitches[0];
718 /* Buffer is reserved in vram or GMR */
719 vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
720 vmw_fifo_commit(dev_priv, sizeof(*cmd));
721
722 return 0;
723}
724
546/** 725/**
547 * Returns if this unit can be page flipped. 726 * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
548 * Must be called with the mode_config mutex held. 727 * blit surface to screen command.
728 *
729 * @dirty: The closure structure.
730 *
731 * Fills in the missing fields in the command, and translates the cliprects
732 * to match the destination bounding box encoded.
549 */ 733 */
550bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv, 734static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
551 struct drm_crtc *crtc)
552{ 735{
553 struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc); 736 struct vmw_kms_sou_surface_dirty *sdirty =
737 container_of(dirty, typeof(*sdirty), base);
738 struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
739 s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
740 s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
741 size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
742 SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
743 int i;
744
745 cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
746 cmd->header.size = sizeof(cmd->body) + region_size;
747
748 /*
749 * Use the destination bounding box to specify destination - and
750 * source bounding regions.
751 */
752 cmd->body.destRect.left = sdirty->left;
753 cmd->body.destRect.right = sdirty->right;
754 cmd->body.destRect.top = sdirty->top;
755 cmd->body.destRect.bottom = sdirty->bottom;
756
757 cmd->body.srcRect.left = sdirty->left + trans_x;
758 cmd->body.srcRect.right = sdirty->right + trans_x;
759 cmd->body.srcRect.top = sdirty->top + trans_y;
760 cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
761
762 cmd->body.srcImage.sid = sdirty->sid;
763 cmd->body.destScreenId = dirty->unit->unit;
764
765 /* Blits are relative to the destination rect. Translate. */
766 for (i = 0; i < dirty->num_hits; ++i, ++blit) {
767 blit->left -= sdirty->left;
768 blit->right -= sdirty->left;
769 blit->top -= sdirty->top;
770 blit->bottom -= sdirty->top;
771 }
554 772
555 if (!sou->base.is_implicit) 773 vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
556 return true;
557 774
558 if (dev_priv->sou_priv->num_implicit != 1) 775 sdirty->left = sdirty->top = S32_MAX;
559 return false; 776 sdirty->right = sdirty->bottom = S32_MIN;
777}
560 778
561 return true; 779/**
780 * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
781 *
782 * @dirty: The closure structure
783 *
784 * Encodes a SVGASignedRect cliprect and updates the bounding box of the
785 * BLIT_SURFACE_TO_SCREEN command.
786 */
787static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
788{
789 struct vmw_kms_sou_surface_dirty *sdirty =
790 container_of(dirty, typeof(*sdirty), base);
791 struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
792 SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
793
794 /* Destination rect. */
795 blit += dirty->num_hits;
796 blit->left = dirty->unit_x1;
797 blit->top = dirty->unit_y1;
798 blit->right = dirty->unit_x2;
799 blit->bottom = dirty->unit_y2;
800
801 /* Destination bounding box */
802 sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
803 sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
804 sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
805 sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
806
807 dirty->num_hits++;
562} 808}
563 809
564/** 810/**
565 * Update the implicit fb to the current fb of this crtc. 811 * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
566 * Must be called with the mode_config mutex held. 812 *
813 * @dev_priv: Pointer to the device private structure.
814 * @framebuffer: Pointer to the surface-buffer backed framebuffer.
815 * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
816 * @vclips: Alternate array of clip rects. Either @clips or @vclips must
817 * be NULL.
818 * @srf: Pointer to surface to blit from. If NULL, the surface attached
819 * to @framebuffer will be used.
820 * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
821 * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
822 * @num_clips: Number of clip rects in @clips.
823 * @inc: Increment to use when looping over @clips.
824 * @out_fence: If non-NULL, will return a ref-counted pointer to a
825 * struct vmw_fence_obj. The returned fence pointer may be NULL in which
826 * case the device has already synchronized.
827 *
828 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
829 * interrupted.
567 */ 830 */
568void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv, 831int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
569 struct drm_crtc *crtc) 832 struct vmw_framebuffer *framebuffer,
833 struct drm_clip_rect *clips,
834 struct drm_vmw_rect *vclips,
835 struct vmw_resource *srf,
836 s32 dest_x,
837 s32 dest_y,
838 unsigned num_clips, int inc,
839 struct vmw_fence_obj **out_fence)
570{ 840{
571 struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc); 841 struct vmw_framebuffer_surface *vfbs =
842 container_of(framebuffer, typeof(*vfbs), base);
843 struct vmw_kms_sou_surface_dirty sdirty;
844 int ret;
572 845
573 BUG_ON(!sou->base.is_implicit); 846 if (!srf)
847 srf = &vfbs->surface->res;
574 848
575 dev_priv->sou_priv->implicit_fb = 849 ret = vmw_kms_helper_resource_prepare(srf, true);
576 vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb); 850 if (ret)
851 return ret;
852
853 sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
854 sdirty.base.clip = vmw_sou_surface_clip;
855 sdirty.base.dev_priv = dev_priv;
856 sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
857 sizeof(SVGASignedRect) * num_clips;
858
859 sdirty.sid = srf->id;
860 sdirty.left = sdirty.top = S32_MAX;
861 sdirty.right = sdirty.bottom = S32_MIN;
862 sdirty.dst_x = dest_x;
863 sdirty.dst_y = dest_y;
864
865 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
866 dest_x, dest_y, num_clips, inc,
867 &sdirty.base);
868 vmw_kms_helper_resource_finish(srf, out_fence);
869
870 return ret;
871}
872
873/**
874 * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
875 *
876 * @dirty: The closure structure.
877 *
878 * Commits a previously built command buffer of readback clips.
879 */
880static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
881{
882 vmw_fifo_commit(dirty->dev_priv,
883 sizeof(struct vmw_kms_sou_dmabuf_blit) *
884 dirty->num_hits);
885}
886
887/**
888 * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
889 *
890 * @dirty: The closure structure
891 *
892 * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
893 */
894static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
895{
896 struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
897
898 blit += dirty->num_hits;
899 blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
900 blit->body.destScreenId = dirty->unit->unit;
901 blit->body.srcOrigin.x = dirty->fb_x;
902 blit->body.srcOrigin.y = dirty->fb_y;
903 blit->body.destRect.left = dirty->unit_x1;
904 blit->body.destRect.top = dirty->unit_y1;
905 blit->body.destRect.right = dirty->unit_x2;
906 blit->body.destRect.bottom = dirty->unit_y2;
907 dirty->num_hits++;
908}
909
910/**
911 * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
912 *
913 * @dev_priv: Pointer to the device private structure.
914 * @framebuffer: Pointer to the dma-buffer backed framebuffer.
915 * @clips: Array of clip rects.
916 * @num_clips: Number of clip rects in @clips.
917 * @increment: Increment to use when looping over @clips.
918 * @interruptible: Whether to perform waits interruptible if possible.
919 * @out_fence: If non-NULL, will return a ref-counted pointer to a
920 * struct vmw_fence_obj. The returned fence pointer may be NULL in which
921 * case the device has already synchronized.
922 *
923 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
924 * interrupted.
925 */
926int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
927 struct vmw_framebuffer *framebuffer,
928 struct drm_clip_rect *clips,
929 unsigned num_clips, int increment,
930 bool interruptible,
931 struct vmw_fence_obj **out_fence)
932{
933 struct vmw_dma_buffer *buf =
934 container_of(framebuffer, struct vmw_framebuffer_dmabuf,
935 base)->buffer;
936 struct vmw_kms_dirty dirty;
937 int ret;
938
939 ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
940 false);
941 if (ret)
942 return ret;
943
944 ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
945 if (unlikely(ret != 0))
946 goto out_revert;
947
948 dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
949 dirty.clip = vmw_sou_dmabuf_clip;
950 dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
951 num_clips;
952 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, NULL,
953 0, 0, num_clips, increment, &dirty);
954 vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
955
956 return ret;
957
958out_revert:
959 vmw_kms_helper_buffer_revert(buf);
960
961 return ret;
962}
963
964
965/**
966 * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
967 *
968 * @dirty: The closure structure.
969 *
970 * Commits a previously built command buffer of readback clips.
971 */
972static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
973{
974 vmw_fifo_commit(dirty->dev_priv,
975 sizeof(struct vmw_kms_sou_readback_blit) *
976 dirty->num_hits);
977}
978
979/**
980 * vmw_sou_readback_clip - Callback to encode a readback cliprect.
981 *
982 * @dirty: The closure structure
983 *
984 * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
985 */
986static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
987{
988 struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
989
990 blit += dirty->num_hits;
991 blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
992 blit->body.srcScreenId = dirty->unit->unit;
993 blit->body.destOrigin.x = dirty->fb_x;
994 blit->body.destOrigin.y = dirty->fb_y;
995 blit->body.srcRect.left = dirty->unit_x1;
996 blit->body.srcRect.top = dirty->unit_y1;
997 blit->body.srcRect.right = dirty->unit_x2;
998 blit->body.srcRect.bottom = dirty->unit_y2;
999 dirty->num_hits++;
1000}
1001
1002/**
1003 * vmw_kms_sou_readback - Perform a readback from the screen object system to
1004 * a dma-buffer backed framebuffer.
1005 *
1006 * @dev_priv: Pointer to the device private structure.
1007 * @file_priv: Pointer to a struct drm_file identifying the caller.
1008 * Must be set to NULL if @user_fence_rep is NULL.
1009 * @vfb: Pointer to the dma-buffer backed framebuffer.
1010 * @user_fence_rep: User-space provided structure for fence information.
1011 * Must be set to non-NULL if @file_priv is non-NULL.
1012 * @vclips: Array of clip rects.
1013 * @num_clips: Number of clip rects in @vclips.
1014 *
1015 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1016 * interrupted.
1017 */
1018int vmw_kms_sou_readback(struct vmw_private *dev_priv,
1019 struct drm_file *file_priv,
1020 struct vmw_framebuffer *vfb,
1021 struct drm_vmw_fence_rep __user *user_fence_rep,
1022 struct drm_vmw_rect *vclips,
1023 uint32_t num_clips)
1024{
1025 struct vmw_dma_buffer *buf =
1026 container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
1027 struct vmw_kms_dirty dirty;
1028 int ret;
1029
1030 ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
1031 if (ret)
1032 return ret;
1033
1034 ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
1035 if (unlikely(ret != 0))
1036 goto out_revert;
1037
1038 dirty.fifo_commit = vmw_sou_readback_fifo_commit;
1039 dirty.clip = vmw_sou_readback_clip;
1040 dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
1041 num_clips;
1042 ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
1043 0, 0, num_clips, 1, &dirty);
1044 vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
1045 user_fence_rep);
1046
1047 return ret;
1048
1049out_revert:
1050 vmw_kms_helper_buffer_revert(buf);
1051
1052 return ret;
577} 1053}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 6a4584a43aa6..bba1ee395478 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,12 +27,15 @@
27 27
28#include "vmwgfx_drv.h" 28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h" 29#include "vmwgfx_resource_priv.h"
30#include "vmwgfx_binding.h"
30#include "ttm/ttm_placement.h" 31#include "ttm/ttm_placement.h"
31 32
32struct vmw_shader { 33struct vmw_shader {
33 struct vmw_resource res; 34 struct vmw_resource res;
34 SVGA3dShaderType type; 35 SVGA3dShaderType type;
35 uint32_t size; 36 uint32_t size;
37 uint8_t num_input_sig;
38 uint8_t num_output_sig;
36}; 39};
37 40
38struct vmw_user_shader { 41struct vmw_user_shader {
@@ -40,8 +43,18 @@ struct vmw_user_shader {
40 struct vmw_shader shader; 43 struct vmw_shader shader;
41}; 44};
42 45
46struct vmw_dx_shader {
47 struct vmw_resource res;
48 struct vmw_resource *ctx;
49 struct vmw_resource *cotable;
50 u32 id;
51 bool committed;
52 struct list_head cotable_head;
53};
54
43static uint64_t vmw_user_shader_size; 55static uint64_t vmw_user_shader_size;
44static uint64_t vmw_shader_size; 56static uint64_t vmw_shader_size;
57static size_t vmw_shader_dx_size;
45 58
46static void vmw_user_shader_free(struct vmw_resource *res); 59static void vmw_user_shader_free(struct vmw_resource *res);
47static struct vmw_resource * 60static struct vmw_resource *
@@ -55,6 +68,18 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
55 struct ttm_validate_buffer *val_buf); 68 struct ttm_validate_buffer *val_buf);
56static int vmw_gb_shader_destroy(struct vmw_resource *res); 69static int vmw_gb_shader_destroy(struct vmw_resource *res);
57 70
71static int vmw_dx_shader_create(struct vmw_resource *res);
72static int vmw_dx_shader_bind(struct vmw_resource *res,
73 struct ttm_validate_buffer *val_buf);
74static int vmw_dx_shader_unbind(struct vmw_resource *res,
75 bool readback,
76 struct ttm_validate_buffer *val_buf);
77static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
78 enum vmw_cmdbuf_res_state state);
79static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
80static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
81static uint64_t vmw_user_shader_size;
82
58static const struct vmw_user_resource_conv user_shader_conv = { 83static const struct vmw_user_resource_conv user_shader_conv = {
59 .object_type = VMW_RES_SHADER, 84 .object_type = VMW_RES_SHADER,
60 .base_obj_to_res = vmw_user_shader_base_to_res, 85 .base_obj_to_res = vmw_user_shader_base_to_res,
@@ -77,6 +102,24 @@ static const struct vmw_res_func vmw_gb_shader_func = {
77 .unbind = vmw_gb_shader_unbind 102 .unbind = vmw_gb_shader_unbind
78}; 103};
79 104
105static const struct vmw_res_func vmw_dx_shader_func = {
106 .res_type = vmw_res_shader,
107 .needs_backup = true,
108 .may_evict = false,
109 .type_name = "dx shaders",
110 .backup_placement = &vmw_mob_placement,
111 .create = vmw_dx_shader_create,
112 /*
113 * The destroy callback is only called with a committed resource on
114 * context destroy, in which case we destroy the cotable anyway,
115 * so there's no need to destroy DX shaders separately.
116 */
117 .destroy = NULL,
118 .bind = vmw_dx_shader_bind,
119 .unbind = vmw_dx_shader_unbind,
120 .commit_notify = vmw_dx_shader_commit_notify,
121};
122
80/** 123/**
81 * Shader management: 124 * Shader management:
82 */ 125 */
@@ -87,25 +130,42 @@ vmw_res_to_shader(struct vmw_resource *res)
87 return container_of(res, struct vmw_shader, res); 130 return container_of(res, struct vmw_shader, res);
88} 131}
89 132
133/**
134 * vmw_res_to_dx_shader - typecast a struct vmw_resource to a
135 * struct vmw_dx_shader
136 *
137 * @res: Pointer to the struct vmw_resource.
138 */
139static inline struct vmw_dx_shader *
140vmw_res_to_dx_shader(struct vmw_resource *res)
141{
142 return container_of(res, struct vmw_dx_shader, res);
143}
144
90static void vmw_hw_shader_destroy(struct vmw_resource *res) 145static void vmw_hw_shader_destroy(struct vmw_resource *res)
91{ 146{
92 (void) vmw_gb_shader_destroy(res); 147 if (likely(res->func->destroy))
148 (void) res->func->destroy(res);
149 else
150 res->id = -1;
93} 151}
94 152
153
95static int vmw_gb_shader_init(struct vmw_private *dev_priv, 154static int vmw_gb_shader_init(struct vmw_private *dev_priv,
96 struct vmw_resource *res, 155 struct vmw_resource *res,
97 uint32_t size, 156 uint32_t size,
98 uint64_t offset, 157 uint64_t offset,
99 SVGA3dShaderType type, 158 SVGA3dShaderType type,
159 uint8_t num_input_sig,
160 uint8_t num_output_sig,
100 struct vmw_dma_buffer *byte_code, 161 struct vmw_dma_buffer *byte_code,
101 void (*res_free) (struct vmw_resource *res)) 162 void (*res_free) (struct vmw_resource *res))
102{ 163{
103 struct vmw_shader *shader = vmw_res_to_shader(res); 164 struct vmw_shader *shader = vmw_res_to_shader(res);
104 int ret; 165 int ret;
105 166
106 ret = vmw_resource_init(dev_priv, res, true, 167 ret = vmw_resource_init(dev_priv, res, true, res_free,
107 res_free, &vmw_gb_shader_func); 168 &vmw_gb_shader_func);
108
109 169
110 if (unlikely(ret != 0)) { 170 if (unlikely(ret != 0)) {
111 if (res_free) 171 if (res_free)
@@ -122,11 +182,17 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
122 } 182 }
123 shader->size = size; 183 shader->size = size;
124 shader->type = type; 184 shader->type = type;
185 shader->num_input_sig = num_input_sig;
186 shader->num_output_sig = num_output_sig;
125 187
126 vmw_resource_activate(res, vmw_hw_shader_destroy); 188 vmw_resource_activate(res, vmw_hw_shader_destroy);
127 return 0; 189 return 0;
128} 190}
129 191
192/*
193 * GB shader code:
194 */
195
130static int vmw_gb_shader_create(struct vmw_resource *res) 196static int vmw_gb_shader_create(struct vmw_resource *res)
131{ 197{
132 struct vmw_private *dev_priv = res->dev_priv; 198 struct vmw_private *dev_priv = res->dev_priv;
@@ -165,7 +231,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
165 cmd->body.type = shader->type; 231 cmd->body.type = shader->type;
166 cmd->body.sizeInBytes = shader->size; 232 cmd->body.sizeInBytes = shader->size;
167 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 233 vmw_fifo_commit(dev_priv, sizeof(*cmd));
168 (void) vmw_3d_resource_inc(dev_priv, false); 234 vmw_fifo_resource_inc(dev_priv);
169 235
170 return 0; 236 return 0;
171 237
@@ -259,7 +325,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
259 return 0; 325 return 0;
260 326
261 mutex_lock(&dev_priv->binding_mutex); 327 mutex_lock(&dev_priv->binding_mutex);
262 vmw_context_binding_res_list_scrub(&res->binding_head); 328 vmw_binding_res_list_scrub(&res->binding_head);
263 329
264 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 330 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
265 if (unlikely(cmd == NULL)) { 331 if (unlikely(cmd == NULL)) {
@@ -275,12 +341,327 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
275 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 341 vmw_fifo_commit(dev_priv, sizeof(*cmd));
276 mutex_unlock(&dev_priv->binding_mutex); 342 mutex_unlock(&dev_priv->binding_mutex);
277 vmw_resource_release_id(res); 343 vmw_resource_release_id(res);
278 vmw_3d_resource_dec(dev_priv, false); 344 vmw_fifo_resource_dec(dev_priv);
345
346 return 0;
347}
348
349/*
350 * DX shader code:
351 */
352
353/**
354 * vmw_dx_shader_commit_notify - Notify that a shader operation has been
355 * committed to hardware from a user-supplied command stream.
356 *
357 * @res: Pointer to the shader resource.
358 * @state: Indicating whether a creation or removal has been committed.
359 *
360 */
361static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
362 enum vmw_cmdbuf_res_state state)
363{
364 struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
365 struct vmw_private *dev_priv = res->dev_priv;
366
367 if (state == VMW_CMDBUF_RES_ADD) {
368 mutex_lock(&dev_priv->binding_mutex);
369 vmw_cotable_add_resource(shader->cotable,
370 &shader->cotable_head);
371 shader->committed = true;
372 res->id = shader->id;
373 mutex_unlock(&dev_priv->binding_mutex);
374 } else {
375 mutex_lock(&dev_priv->binding_mutex);
376 list_del_init(&shader->cotable_head);
377 shader->committed = false;
378 res->id = -1;
379 mutex_unlock(&dev_priv->binding_mutex);
380 }
381}
382
383/**
384 * vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader.
385 *
386 * @res: The shader resource
387 *
388 * This function reverts a scrub operation.
389 */
390static int vmw_dx_shader_unscrub(struct vmw_resource *res)
391{
392 struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
393 struct vmw_private *dev_priv = res->dev_priv;
394 struct {
395 SVGA3dCmdHeader header;
396 SVGA3dCmdDXBindShader body;
397 } *cmd;
398
399 if (!list_empty(&shader->cotable_head) || !shader->committed)
400 return 0;
401
402 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
403 shader->ctx->id);
404 if (unlikely(cmd == NULL)) {
405 DRM_ERROR("Failed reserving FIFO space for shader "
406 "scrubbing.\n");
407 return -ENOMEM;
408 }
409
410 cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
411 cmd->header.size = sizeof(cmd->body);
412 cmd->body.cid = shader->ctx->id;
413 cmd->body.shid = shader->id;
414 cmd->body.mobid = res->backup->base.mem.start;
415 cmd->body.offsetInBytes = res->backup_offset;
416 vmw_fifo_commit(dev_priv, sizeof(*cmd));
417
418 vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
419
420 return 0;
421}
422
423/**
424 * vmw_dx_shader_create - The DX shader create callback
425 *
426 * @res: The DX shader resource
427 *
428 * The create callback is called as part of resource validation and
429 * makes sure that we unscrub the shader if it's previously been scrubbed.
430 */
431static int vmw_dx_shader_create(struct vmw_resource *res)
432{
433 struct vmw_private *dev_priv = res->dev_priv;
434 struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
435 int ret = 0;
436
437 WARN_ON_ONCE(!shader->committed);
438
439 if (!list_empty(&res->mob_head)) {
440 mutex_lock(&dev_priv->binding_mutex);
441 ret = vmw_dx_shader_unscrub(res);
442 mutex_unlock(&dev_priv->binding_mutex);
443 }
444
445 res->id = shader->id;
446 return ret;
447}
448
449/**
450 * vmw_dx_shader_bind - The DX shader bind callback
451 *
452 * @res: The DX shader resource
453 * @val_buf: Pointer to the validate buffer.
454 *
455 */
456static int vmw_dx_shader_bind(struct vmw_resource *res,
457 struct ttm_validate_buffer *val_buf)
458{
459 struct vmw_private *dev_priv = res->dev_priv;
460 struct ttm_buffer_object *bo = val_buf->bo;
461
462 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
463 mutex_lock(&dev_priv->binding_mutex);
464 vmw_dx_shader_unscrub(res);
465 mutex_unlock(&dev_priv->binding_mutex);
466
467 return 0;
468}
469
470/**
471 * vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader.
472 *
473 * @res: The shader resource
474 *
475 * This function unbinds a MOB from the DX shader without requiring the
476 * MOB dma_buffer to be reserved. The driver still considers the MOB bound.
477 * However, once the driver eventually decides to unbind the MOB, it doesn't
478 * need to access the context.
479 */
480static int vmw_dx_shader_scrub(struct vmw_resource *res)
481{
482 struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
483 struct vmw_private *dev_priv = res->dev_priv;
484 struct {
485 SVGA3dCmdHeader header;
486 SVGA3dCmdDXBindShader body;
487 } *cmd;
488
489 if (list_empty(&shader->cotable_head))
490 return 0;
491
492 WARN_ON_ONCE(!shader->committed);
493 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
494 if (unlikely(cmd == NULL)) {
495 DRM_ERROR("Failed reserving FIFO space for shader "
496 "scrubbing.\n");
497 return -ENOMEM;
498 }
499
500 cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
501 cmd->header.size = sizeof(cmd->body);
502 cmd->body.cid = shader->ctx->id;
503 cmd->body.shid = res->id;
504 cmd->body.mobid = SVGA3D_INVALID_ID;
505 cmd->body.offsetInBytes = 0;
506 vmw_fifo_commit(dev_priv, sizeof(*cmd));
507 res->id = -1;
508 list_del_init(&shader->cotable_head);
279 509
280 return 0; 510 return 0;
281} 511}
282 512
283/** 513/**
514 * vmw_dx_shader_unbind - The dx shader unbind callback.
515 *
516 * @res: The shader resource
517 * @readback: Whether this is a readback unbind. Currently unused.
518 * @val_buf: MOB buffer information.
519 */
520static int vmw_dx_shader_unbind(struct vmw_resource *res,
521 bool readback,
522 struct ttm_validate_buffer *val_buf)
523{
524 struct vmw_private *dev_priv = res->dev_priv;
525 struct vmw_fence_obj *fence;
526 int ret;
527
528 BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
529
530 mutex_lock(&dev_priv->binding_mutex);
531 ret = vmw_dx_shader_scrub(res);
532 mutex_unlock(&dev_priv->binding_mutex);
533
534 if (ret)
535 return ret;
536
537 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
538 &fence, NULL);
539 vmw_fence_single_bo(val_buf->bo, fence);
540
541 if (likely(fence != NULL))
542 vmw_fence_obj_unreference(&fence);
543
544 return 0;
545}
546
547/**
548 * vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for
549 * DX shaders.
550 *
551 * @dev_priv: Pointer to device private structure.
552 * @list: The list of cotable resources.
553 * @readback: Whether the call was part of a readback unbind.
554 *
555 * Scrubs all shader MOBs so that any subsequent shader unbind or shader
556 * destroy operation won't need to swap in the context.
557 */
558void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
559 struct list_head *list,
560 bool readback)
561{
562 struct vmw_dx_shader *entry, *next;
563
564 WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
565
566 list_for_each_entry_safe(entry, next, list, cotable_head) {
567 WARN_ON(vmw_dx_shader_scrub(&entry->res));
568 if (!readback)
569 entry->committed = false;
570 }
571}
572
573/**
574 * vmw_dx_shader_res_free - The DX shader free callback
575 *
576 * @res: The shader resource
577 *
578 * Frees the DX shader resource and updates memory accounting.
579 */
580static void vmw_dx_shader_res_free(struct vmw_resource *res)
581{
582 struct vmw_private *dev_priv = res->dev_priv;
583 struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
584
585 vmw_resource_unreference(&shader->cotable);
586 kfree(shader);
587 ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
588}
589
590/**
591 * vmw_dx_shader_add - Add a shader resource as a command buffer managed
592 * resource.
593 *
594 * @man: The command buffer resource manager.
595 * @ctx: Pointer to the context resource.
596 * @user_key: The id used for this shader.
597 * @shader_type: The shader type.
598 * @list: The list of staged command buffer managed resources.
599 */
600int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
601 struct vmw_resource *ctx,
602 u32 user_key,
603 SVGA3dShaderType shader_type,
604 struct list_head *list)
605{
606 struct vmw_dx_shader *shader;
607 struct vmw_resource *res;
608 struct vmw_private *dev_priv = ctx->dev_priv;
609 int ret;
610
611 if (!vmw_shader_dx_size)
612 vmw_shader_dx_size = ttm_round_pot(sizeof(*shader));
613
614 if (!vmw_shader_id_ok(user_key, shader_type))
615 return -EINVAL;
616
617 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
618 false, true);
619 if (ret) {
620 if (ret != -ERESTARTSYS)
621 DRM_ERROR("Out of graphics memory for shader "
622 "creation.\n");
623 return ret;
624 }
625
626 shader = kmalloc(sizeof(*shader), GFP_KERNEL);
627 if (!shader) {
628 ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
629 return -ENOMEM;
630 }
631
632 res = &shader->res;
633 shader->ctx = ctx;
634 shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER);
635 shader->id = user_key;
636 shader->committed = false;
637 INIT_LIST_HEAD(&shader->cotable_head);
638 ret = vmw_resource_init(dev_priv, res, true,
639 vmw_dx_shader_res_free, &vmw_dx_shader_func);
640 if (ret)
641 goto out_resource_init;
642
643 /*
644 * The user_key name-space is not per shader type for DX shaders,
645 * so when hashing, use a single zero shader type.
646 */
647 ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
648 vmw_shader_key(user_key, 0),
649 res, list);
650 if (ret)
651 goto out_resource_init;
652
653 res->id = shader->id;
654 vmw_resource_activate(res, vmw_hw_shader_destroy);
655
656out_resource_init:
657 vmw_resource_unreference(&res);
658
659 return ret;
660}
661
662
663
664/**
284 * User-space shader management: 665 * User-space shader management:
285 */ 666 */
286 667
@@ -341,6 +722,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
341 size_t shader_size, 722 size_t shader_size,
342 size_t offset, 723 size_t offset,
343 SVGA3dShaderType shader_type, 724 SVGA3dShaderType shader_type,
725 uint8_t num_input_sig,
726 uint8_t num_output_sig,
344 struct ttm_object_file *tfile, 727 struct ttm_object_file *tfile,
345 u32 *handle) 728 u32 *handle)
346{ 729{
@@ -383,7 +766,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
383 */ 766 */
384 767
385 ret = vmw_gb_shader_init(dev_priv, res, shader_size, 768 ret = vmw_gb_shader_init(dev_priv, res, shader_size,
386 offset, shader_type, buffer, 769 offset, shader_type, num_input_sig,
770 num_output_sig, buffer,
387 vmw_user_shader_free); 771 vmw_user_shader_free);
388 if (unlikely(ret != 0)) 772 if (unlikely(ret != 0))
389 goto out; 773 goto out;
@@ -407,11 +791,11 @@ out:
407} 791}
408 792
409 793
410struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, 794static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
411 struct vmw_dma_buffer *buffer, 795 struct vmw_dma_buffer *buffer,
412 size_t shader_size, 796 size_t shader_size,
413 size_t offset, 797 size_t offset,
414 SVGA3dShaderType shader_type) 798 SVGA3dShaderType shader_type)
415{ 799{
416 struct vmw_shader *shader; 800 struct vmw_shader *shader;
417 struct vmw_resource *res; 801 struct vmw_resource *res;
@@ -449,7 +833,7 @@ struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
449 * From here on, the destructor takes over resource freeing. 833 * From here on, the destructor takes over resource freeing.
450 */ 834 */
451 ret = vmw_gb_shader_init(dev_priv, res, shader_size, 835 ret = vmw_gb_shader_init(dev_priv, res, shader_size,
452 offset, shader_type, buffer, 836 offset, shader_type, 0, 0, buffer,
453 vmw_shader_free); 837 vmw_shader_free);
454 838
455out_err: 839out_err:
@@ -457,19 +841,20 @@ out_err:
457} 841}
458 842
459 843
460int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 844static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
461 struct drm_file *file_priv) 845 enum drm_vmw_shader_type shader_type_drm,
846 u32 buffer_handle, size_t size, size_t offset,
847 uint8_t num_input_sig, uint8_t num_output_sig,
848 uint32_t *shader_handle)
462{ 849{
463 struct vmw_private *dev_priv = vmw_priv(dev); 850 struct vmw_private *dev_priv = vmw_priv(dev);
464 struct drm_vmw_shader_create_arg *arg =
465 (struct drm_vmw_shader_create_arg *)data;
466 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 851 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
467 struct vmw_dma_buffer *buffer = NULL; 852 struct vmw_dma_buffer *buffer = NULL;
468 SVGA3dShaderType shader_type; 853 SVGA3dShaderType shader_type;
469 int ret; 854 int ret;
470 855
471 if (arg->buffer_handle != SVGA3D_INVALID_ID) { 856 if (buffer_handle != SVGA3D_INVALID_ID) {
472 ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle, 857 ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
473 &buffer); 858 &buffer);
474 if (unlikely(ret != 0)) { 859 if (unlikely(ret != 0)) {
475 DRM_ERROR("Could not find buffer for shader " 860 DRM_ERROR("Could not find buffer for shader "
@@ -478,23 +863,20 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
478 } 863 }
479 864
480 if ((u64)buffer->base.num_pages * PAGE_SIZE < 865 if ((u64)buffer->base.num_pages * PAGE_SIZE <
481 (u64)arg->size + (u64)arg->offset) { 866 (u64)size + (u64)offset) {
482 DRM_ERROR("Illegal buffer- or shader size.\n"); 867 DRM_ERROR("Illegal buffer- or shader size.\n");
483 ret = -EINVAL; 868 ret = -EINVAL;
484 goto out_bad_arg; 869 goto out_bad_arg;
485 } 870 }
486 } 871 }
487 872
488 switch (arg->shader_type) { 873 switch (shader_type_drm) {
489 case drm_vmw_shader_type_vs: 874 case drm_vmw_shader_type_vs:
490 shader_type = SVGA3D_SHADERTYPE_VS; 875 shader_type = SVGA3D_SHADERTYPE_VS;
491 break; 876 break;
492 case drm_vmw_shader_type_ps: 877 case drm_vmw_shader_type_ps:
493 shader_type = SVGA3D_SHADERTYPE_PS; 878 shader_type = SVGA3D_SHADERTYPE_PS;
494 break; 879 break;
495 case drm_vmw_shader_type_gs:
496 shader_type = SVGA3D_SHADERTYPE_GS;
497 break;
498 default: 880 default:
499 DRM_ERROR("Illegal shader type.\n"); 881 DRM_ERROR("Illegal shader type.\n");
500 ret = -EINVAL; 882 ret = -EINVAL;
@@ -505,8 +887,9 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
505 if (unlikely(ret != 0)) 887 if (unlikely(ret != 0))
506 goto out_bad_arg; 888 goto out_bad_arg;
507 889
508 ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset, 890 ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset,
509 shader_type, tfile, &arg->shader_handle); 891 shader_type, num_input_sig,
892 num_output_sig, tfile, shader_handle);
510 893
511 ttm_read_unlock(&dev_priv->reservation_sem); 894 ttm_read_unlock(&dev_priv->reservation_sem);
512out_bad_arg: 895out_bad_arg:
@@ -515,7 +898,7 @@ out_bad_arg:
515} 898}
516 899
517/** 900/**
518 * vmw_compat_shader_id_ok - Check whether a compat shader user key and 901 * vmw_shader_id_ok - Check whether a compat shader user key and
519 * shader type are within valid bounds. 902 * shader type are within valid bounds.
520 * 903 *
521 * @user_key: User space id of the shader. 904 * @user_key: User space id of the shader.
@@ -523,13 +906,13 @@ out_bad_arg:
523 * 906 *
524 * Returns true if valid false if not. 907 * Returns true if valid false if not.
525 */ 908 */
526static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type) 909static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
527{ 910{
528 return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16; 911 return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
529} 912}
530 913
531/** 914/**
532 * vmw_compat_shader_key - Compute a hash key suitable for a compat shader. 915 * vmw_shader_key - Compute a hash key suitable for a compat shader.
533 * 916 *
534 * @user_key: User space id of the shader. 917 * @user_key: User space id of the shader.
535 * @shader_type: Shader type. 918 * @shader_type: Shader type.
@@ -537,13 +920,13 @@ static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
537 * Returns a hash key suitable for a command buffer managed resource 920 * Returns a hash key suitable for a command buffer managed resource
538 * manager hash table. 921 * manager hash table.
539 */ 922 */
540static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type) 923static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type)
541{ 924{
542 return user_key | (shader_type << 20); 925 return user_key | (shader_type << 20);
543} 926}
544 927
545/** 928/**
546 * vmw_compat_shader_remove - Stage a compat shader for removal. 929 * vmw_shader_remove - Stage a compat shader for removal.
547 * 930 *
548 * @man: Pointer to the compat shader manager identifying the shader namespace. 931 * @man: Pointer to the compat shader manager identifying the shader namespace.
549 * @user_key: The key that is used to identify the shader. The key is 932 * @user_key: The key that is used to identify the shader. The key is
@@ -551,17 +934,18 @@ static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
551 * @shader_type: Shader type. 934 * @shader_type: Shader type.
552 * @list: Caller's list of staged command buffer resource actions. 935 * @list: Caller's list of staged command buffer resource actions.
553 */ 936 */
554int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, 937int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
555 u32 user_key, SVGA3dShaderType shader_type, 938 u32 user_key, SVGA3dShaderType shader_type,
556 struct list_head *list) 939 struct list_head *list)
557{ 940{
558 if (!vmw_compat_shader_id_ok(user_key, shader_type)) 941 struct vmw_resource *dummy;
942
943 if (!vmw_shader_id_ok(user_key, shader_type))
559 return -EINVAL; 944 return -EINVAL;
560 945
561 return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader, 946 return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader,
562 vmw_compat_shader_key(user_key, 947 vmw_shader_key(user_key, shader_type),
563 shader_type), 948 list, &dummy);
564 list);
565} 949}
566 950
567/** 951/**
@@ -591,7 +975,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
591 int ret; 975 int ret;
592 struct vmw_resource *res; 976 struct vmw_resource *res;
593 977
594 if (!vmw_compat_shader_id_ok(user_key, shader_type)) 978 if (!vmw_shader_id_ok(user_key, shader_type))
595 return -EINVAL; 979 return -EINVAL;
596 980
597 /* Allocate and pin a DMA buffer */ 981 /* Allocate and pin a DMA buffer */
@@ -628,8 +1012,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
628 if (unlikely(ret != 0)) 1012 if (unlikely(ret != 0))
629 goto no_reserve; 1013 goto no_reserve;
630 1014
631 ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader, 1015 ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
632 vmw_compat_shader_key(user_key, shader_type), 1016 vmw_shader_key(user_key, shader_type),
633 res, list); 1017 res, list);
634 vmw_resource_unreference(&res); 1018 vmw_resource_unreference(&res);
635no_reserve: 1019no_reserve:
@@ -639,7 +1023,7 @@ out:
639} 1023}
640 1024
641/** 1025/**
642 * vmw_compat_shader_lookup - Look up a compat shader 1026 * vmw_shader_lookup - Look up a compat shader
643 * 1027 *
644 * @man: Pointer to the command buffer managed resource manager identifying 1028 * @man: Pointer to the command buffer managed resource manager identifying
645 * the shader namespace. 1029 * the shader namespace.
@@ -650,14 +1034,26 @@ out:
650 * found. An error pointer otherwise. 1034 * found. An error pointer otherwise.
651 */ 1035 */
652struct vmw_resource * 1036struct vmw_resource *
653vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, 1037vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
654 u32 user_key, 1038 u32 user_key,
655 SVGA3dShaderType shader_type) 1039 SVGA3dShaderType shader_type)
656{ 1040{
657 if (!vmw_compat_shader_id_ok(user_key, shader_type)) 1041 if (!vmw_shader_id_ok(user_key, shader_type))
658 return ERR_PTR(-EINVAL); 1042 return ERR_PTR(-EINVAL);
659 1043
660 return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader, 1044 return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader,
661 vmw_compat_shader_key(user_key, 1045 vmw_shader_key(user_key, shader_type));
662 shader_type)); 1046}
1047
1048int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1049 struct drm_file *file_priv)
1050{
1051 struct drm_vmw_shader_create_arg *arg =
1052 (struct drm_vmw_shader_create_arg *)data;
1053
1054 return vmw_shader_define(dev, file_priv, arg->shader_type,
1055 arg->buffer_handle,
1056 arg->size, arg->offset,
1057 0, 0,
1058 &arg->shader_handle);
663} 1059}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
new file mode 100644
index 000000000000..5a73eebd0f35
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -0,0 +1,555 @@
1/**************************************************************************
2 * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27#include "vmwgfx_drv.h"
28#include "vmwgfx_resource_priv.h"
29#include "vmwgfx_so.h"
30#include "vmwgfx_binding.h"
31
32/*
33 * The currently only reason we need to keep track of views is that if we
34 * destroy a hardware surface, all views pointing to it must also be destroyed,
35 * otherwise the device will error.
36 * So in particuar if a surface is evicted, we must destroy all views pointing
37 * to it, and all context bindings of that view. Similarly we must restore
38 * the view bindings, views and surfaces pointed to by the views when a
39 * context is referenced in the command stream.
40 */
41
42/**
43 * struct vmw_view - view metadata
44 *
45 * @res: The struct vmw_resource we derive from
46 * @ctx: Non-refcounted pointer to the context this view belongs to.
47 * @srf: Refcounted pointer to the surface pointed to by this view.
48 * @cotable: Refcounted pointer to the cotable holding this view.
49 * @srf_head: List head for the surface-to-view list.
50 * @cotable_head: List head for the cotable-to_view list.
51 * @view_type: View type.
52 * @view_id: User-space per context view id. Currently used also as per
53 * context device view id.
54 * @cmd_size: Size of the SVGA3D define view command that we've copied from the
55 * command stream.
56 * @committed: Whether the view is actually created or pending creation at the
57 * device level.
58 * @cmd: The SVGA3D define view command copied from the command stream.
59 */
60struct vmw_view {
61 struct rcu_head rcu;
62 struct vmw_resource res;
63 struct vmw_resource *ctx; /* Immutable */
64 struct vmw_resource *srf; /* Immutable */
65 struct vmw_resource *cotable; /* Immutable */
66 struct list_head srf_head; /* Protected by binding_mutex */
67 struct list_head cotable_head; /* Protected by binding_mutex */
68 unsigned view_type; /* Immutable */
69 unsigned view_id; /* Immutable */
70 u32 cmd_size; /* Immutable */
71 bool committed; /* Protected by binding_mutex */
72 u32 cmd[1]; /* Immutable */
73};
74
75static int vmw_view_create(struct vmw_resource *res);
76static int vmw_view_destroy(struct vmw_resource *res);
77static void vmw_hw_view_destroy(struct vmw_resource *res);
78static void vmw_view_commit_notify(struct vmw_resource *res,
79 enum vmw_cmdbuf_res_state state);
80
81static const struct vmw_res_func vmw_view_func = {
82 .res_type = vmw_res_view,
83 .needs_backup = false,
84 .may_evict = false,
85 .type_name = "DX view",
86 .backup_placement = NULL,
87 .create = vmw_view_create,
88 .commit_notify = vmw_view_commit_notify,
89};
90
91/**
92 * struct vmw_view - view define command body stub
93 *
94 * @view_id: The device id of the view being defined
95 * @sid: The surface id of the view being defined
96 *
97 * This generic struct is used by the code to change @view_id and @sid of a
98 * saved view define command.
99 */
100struct vmw_view_define {
101 uint32 view_id;
102 uint32 sid;
103};
104
105/**
106 * vmw_view - Convert a struct vmw_resource to a struct vmw_view
107 *
108 * @res: Pointer to the resource to convert.
109 *
110 * Returns a pointer to a struct vmw_view.
111 */
112static struct vmw_view *vmw_view(struct vmw_resource *res)
113{
114 return container_of(res, struct vmw_view, res);
115}
116
117/**
118 * vmw_view_commit_notify - Notify that a view operation has been committed to
119 * hardware from a user-supplied command stream.
120 *
121 * @res: Pointer to the view resource.
122 * @state: Indicating whether a creation or removal has been committed.
123 *
124 */
125static void vmw_view_commit_notify(struct vmw_resource *res,
126 enum vmw_cmdbuf_res_state state)
127{
128 struct vmw_view *view = vmw_view(res);
129 struct vmw_private *dev_priv = res->dev_priv;
130
131 mutex_lock(&dev_priv->binding_mutex);
132 if (state == VMW_CMDBUF_RES_ADD) {
133 struct vmw_surface *srf = vmw_res_to_srf(view->srf);
134
135 list_add_tail(&view->srf_head, &srf->view_list);
136 vmw_cotable_add_resource(view->cotable, &view->cotable_head);
137 view->committed = true;
138 res->id = view->view_id;
139
140 } else {
141 list_del_init(&view->cotable_head);
142 list_del_init(&view->srf_head);
143 view->committed = false;
144 res->id = -1;
145 }
146 mutex_unlock(&dev_priv->binding_mutex);
147}
148
149/**
150 * vmw_view_create - Create a hardware view.
151 *
152 * @res: Pointer to the view resource.
153 *
154 * Create a hardware view. Typically used if that view has previously been
155 * destroyed by an eviction operation.
156 */
157static int vmw_view_create(struct vmw_resource *res)
158{
159 struct vmw_view *view = vmw_view(res);
160 struct vmw_surface *srf = vmw_res_to_srf(view->srf);
161 struct vmw_private *dev_priv = res->dev_priv;
162 struct {
163 SVGA3dCmdHeader header;
164 struct vmw_view_define body;
165 } *cmd;
166
167 mutex_lock(&dev_priv->binding_mutex);
168 if (!view->committed) {
169 mutex_unlock(&dev_priv->binding_mutex);
170 return 0;
171 }
172
173 cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size,
174 view->ctx->id);
175 if (!cmd) {
176 DRM_ERROR("Failed reserving FIFO space for view creation.\n");
177 mutex_unlock(&dev_priv->binding_mutex);
178 return -ENOMEM;
179 }
180 memcpy(cmd, &view->cmd, view->cmd_size);
181 WARN_ON(cmd->body.view_id != view->view_id);
182 /* Sid may have changed due to surface eviction. */
183 WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
184 cmd->body.sid = view->srf->id;
185 vmw_fifo_commit(res->dev_priv, view->cmd_size);
186 res->id = view->view_id;
187 list_add_tail(&view->srf_head, &srf->view_list);
188 vmw_cotable_add_resource(view->cotable, &view->cotable_head);
189 mutex_unlock(&dev_priv->binding_mutex);
190
191 return 0;
192}
193
194/**
195 * vmw_view_destroy - Destroy a hardware view.
196 *
197 * @res: Pointer to the view resource.
198 *
199 * Destroy a hardware view. Typically used on unexpected termination of the
200 * owning process or if the surface the view is pointing to is destroyed.
201 */
202static int vmw_view_destroy(struct vmw_resource *res)
203{
204 struct vmw_private *dev_priv = res->dev_priv;
205 struct vmw_view *view = vmw_view(res);
206 struct {
207 SVGA3dCmdHeader header;
208 union vmw_view_destroy body;
209 } *cmd;
210
211 WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
212 vmw_binding_res_list_scrub(&res->binding_head);
213
214 if (!view->committed || res->id == -1)
215 return 0;
216
217 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id);
218 if (!cmd) {
219 DRM_ERROR("Failed reserving FIFO space for view "
220 "destruction.\n");
221 return -ENOMEM;
222 }
223
224 cmd->header.id = vmw_view_destroy_cmds[view->view_type];
225 cmd->header.size = sizeof(cmd->body);
226 cmd->body.view_id = view->view_id;
227 vmw_fifo_commit(dev_priv, sizeof(*cmd));
228 res->id = -1;
229 list_del_init(&view->cotable_head);
230 list_del_init(&view->srf_head);
231
232 return 0;
233}
234
235/**
236 * vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup.
237 *
238 * @res: Pointer to the view resource.
239 *
240 * Destroy a hardware view if it's still present.
241 */
242static void vmw_hw_view_destroy(struct vmw_resource *res)
243{
244 struct vmw_private *dev_priv = res->dev_priv;
245
246 mutex_lock(&dev_priv->binding_mutex);
247 WARN_ON(vmw_view_destroy(res));
248 res->id = -1;
249 mutex_unlock(&dev_priv->binding_mutex);
250}
251
252/**
253 * vmw_view_key - Compute a view key suitable for the cmdbuf resource manager
254 *
255 * @user_key: The user-space id used for the view.
256 * @view_type: The view type.
257 *
258 * Destroy a hardware view if it's still present.
259 */
260static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type)
261{
262 return user_key | (view_type << 20);
263}
264
265/**
266 * vmw_view_id_ok - Basic view id and type range checks.
267 *
268 * @user_key: The user-space id used for the view.
269 * @view_type: The view type.
270 *
271 * Checks that the view id and type (typically provided by user-space) is
272 * valid.
273 */
274static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
275{
276 return (user_key < SVGA_COTABLE_MAX_IDS &&
277 view_type < vmw_view_max);
278}
279
280/**
281 * vmw_view_res_free - resource res_free callback for view resources
282 *
283 * @res: Pointer to a struct vmw_resource
284 *
285 * Frees memory and memory accounting held by a struct vmw_view.
286 */
287static void vmw_view_res_free(struct vmw_resource *res)
288{
289 struct vmw_view *view = vmw_view(res);
290 size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size;
291 struct vmw_private *dev_priv = res->dev_priv;
292
293 vmw_resource_unreference(&view->cotable);
294 vmw_resource_unreference(&view->srf);
295 kfree_rcu(view, rcu);
296 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
297}
298
299/**
300 * vmw_view_add - Create a view resource and stage it for addition
301 * as a command buffer managed resource.
302 *
303 * @man: Pointer to the compat shader manager identifying the shader namespace.
304 * @ctx: Pointer to a struct vmw_resource identifying the active context.
305 * @srf: Pointer to a struct vmw_resource identifying the surface the view
306 * points to.
307 * @view_type: The view type deduced from the view create command.
308 * @user_key: The key that is used to identify the shader. The key is
309 * unique to the view type and to the context.
310 * @cmd: Pointer to the view create command in the command stream.
311 * @cmd_size: Size of the view create command in the command stream.
312 * @list: Caller's list of staged command buffer resource actions.
313 */
314int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
315 struct vmw_resource *ctx,
316 struct vmw_resource *srf,
317 enum vmw_view_type view_type,
318 u32 user_key,
319 const void *cmd,
320 size_t cmd_size,
321 struct list_head *list)
322{
323 static const size_t vmw_view_define_sizes[] = {
324 [vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
325 [vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
326 [vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView)
327 };
328
329 struct vmw_private *dev_priv = ctx->dev_priv;
330 struct vmw_resource *res;
331 struct vmw_view *view;
332 size_t size;
333 int ret;
334
335 if (cmd_size != vmw_view_define_sizes[view_type] +
336 sizeof(SVGA3dCmdHeader)) {
337 DRM_ERROR("Illegal view create command size.\n");
338 return -EINVAL;
339 }
340
341 if (!vmw_view_id_ok(user_key, view_type)) {
342 DRM_ERROR("Illegal view add view id.\n");
343 return -EINVAL;
344 }
345
346 size = offsetof(struct vmw_view, cmd) + cmd_size;
347
348 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true);
349 if (ret) {
350 if (ret != -ERESTARTSYS)
351 DRM_ERROR("Out of graphics memory for view"
352 " creation.\n");
353 return ret;
354 }
355
356 view = kmalloc(size, GFP_KERNEL);
357 if (!view) {
358 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
359 return -ENOMEM;
360 }
361
362 res = &view->res;
363 view->ctx = ctx;
364 view->srf = vmw_resource_reference(srf);
365 view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]);
366 view->view_type = view_type;
367 view->view_id = user_key;
368 view->cmd_size = cmd_size;
369 view->committed = false;
370 INIT_LIST_HEAD(&view->srf_head);
371 INIT_LIST_HEAD(&view->cotable_head);
372 memcpy(&view->cmd, cmd, cmd_size);
373 ret = vmw_resource_init(dev_priv, res, true,
374 vmw_view_res_free, &vmw_view_func);
375 if (ret)
376 goto out_resource_init;
377
378 ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view,
379 vmw_view_key(user_key, view_type),
380 res, list);
381 if (ret)
382 goto out_resource_init;
383
384 res->id = view->view_id;
385 vmw_resource_activate(res, vmw_hw_view_destroy);
386
387out_resource_init:
388 vmw_resource_unreference(&res);
389
390 return ret;
391}
392
393/**
394 * vmw_view_remove - Stage a view for removal.
395 *
396 * @man: Pointer to the view manager identifying the shader namespace.
397 * @user_key: The key that is used to identify the view. The key is
398 * unique to the view type.
399 * @view_type: View type
400 * @list: Caller's list of staged command buffer resource actions.
401 * @res_p: If the resource is in an already committed state, points to the
402 * struct vmw_resource on successful return. The pointer will be
403 * non ref-counted.
404 */
405int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
406 u32 user_key, enum vmw_view_type view_type,
407 struct list_head *list,
408 struct vmw_resource **res_p)
409{
410 if (!vmw_view_id_ok(user_key, view_type)) {
411 DRM_ERROR("Illegal view remove view id.\n");
412 return -EINVAL;
413 }
414
415 return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view,
416 vmw_view_key(user_key, view_type),
417 list, res_p);
418}
419
420/**
421 * vmw_view_cotable_list_destroy - Evict all views belonging to a cotable.
422 *
423 * @dev_priv: Pointer to a device private struct.
424 * @list: List of views belonging to a cotable.
425 * @readback: Unused. Needed for function interface only.
426 *
427 * This function evicts all views belonging to a cotable.
428 * It must be called with the binding_mutex held, and the caller must hold
429 * a reference to the view resource. This is typically called before the
430 * cotable is paged out.
431 */
432void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
433 struct list_head *list,
434 bool readback)
435{
436 struct vmw_view *entry, *next;
437
438 WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
439
440 list_for_each_entry_safe(entry, next, list, cotable_head)
441 WARN_ON(vmw_view_destroy(&entry->res));
442}
443
444/**
445 * vmw_view_surface_list_destroy - Evict all views pointing to a surface
446 *
447 * @dev_priv: Pointer to a device private struct.
448 * @list: List of views pointing to a surface.
449 *
450 * This function evicts all views pointing to a surface. This is typically
451 * called before the surface is evicted.
452 */
453void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
454 struct list_head *list)
455{
456 struct vmw_view *entry, *next;
457
458 WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
459
460 list_for_each_entry_safe(entry, next, list, srf_head)
461 WARN_ON(vmw_view_destroy(&entry->res));
462}
463
464/**
465 * vmw_view_srf - Return a non-refcounted pointer to the surface a view is
466 * pointing to.
467 *
468 * @res: pointer to a view resource.
469 *
470 * Note that the view itself is holding a reference, so as long
471 * the view resource is alive, the surface resource will be.
472 */
473struct vmw_resource *vmw_view_srf(struct vmw_resource *res)
474{
475 return vmw_view(res)->srf;
476}
477
478/**
479 * vmw_view_lookup - Look up a view.
480 *
481 * @man: The context's cmdbuf ref manager.
482 * @view_type: The view type.
483 * @user_key: The view user id.
484 *
485 * returns a refcounted pointer to a view or an error pointer if not found.
486 */
487struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
488 enum vmw_view_type view_type,
489 u32 user_key)
490{
491 return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view,
492 vmw_view_key(user_key, view_type));
493}
494
495const u32 vmw_view_destroy_cmds[] = {
496 [vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
497 [vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
498 [vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
499};
500
501const SVGACOTableType vmw_view_cotables[] = {
502 [vmw_view_sr] = SVGA_COTABLE_SRVIEW,
503 [vmw_view_rt] = SVGA_COTABLE_RTVIEW,
504 [vmw_view_ds] = SVGA_COTABLE_DSVIEW,
505};
506
507const SVGACOTableType vmw_so_cotables[] = {
508 [vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT,
509 [vmw_so_bs] = SVGA_COTABLE_BLENDSTATE,
510 [vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL,
511 [vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE,
512 [vmw_so_ss] = SVGA_COTABLE_SAMPLER,
513 [vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT
514};
515
516
517/* To remove unused function warning */
518static void vmw_so_build_asserts(void) __attribute__((used));
519
520
521/*
522 * This function is unused at run-time, and only used to dump various build
523 * asserts important for code optimization assumptions.
524 */
525static void vmw_so_build_asserts(void)
526{
527 /* Assert that our vmw_view_cmd_to_type() function is correct. */
528 BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW !=
529 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1);
530 BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW !=
531 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2);
532 BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW !=
533 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3);
534 BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW !=
535 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4);
536 BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW !=
537 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5);
538
539 /* Assert that our "one body fits all" assumption is valid */
540 BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32));
541
542 /* Assert that the view key space can hold all view ids. */
543 BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1));
544
545 /*
546 * Assert that the offset of sid in all view define commands
547 * is what we assume it to be.
548 */
549 BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
550 offsetof(SVGA3dCmdDXDefineShaderResourceView, sid));
551 BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
552 offsetof(SVGA3dCmdDXDefineRenderTargetView, sid));
553 BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
554 offsetof(SVGA3dCmdDXDefineDepthStencilView, sid));
555}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
new file mode 100644
index 000000000000..268738387b5e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -0,0 +1,160 @@
1/**************************************************************************
2 * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26#ifndef VMW_SO_H
27#define VMW_SO_H
28
29enum vmw_view_type {
30 vmw_view_sr,
31 vmw_view_rt,
32 vmw_view_ds,
33 vmw_view_max,
34};
35
36enum vmw_so_type {
37 vmw_so_el,
38 vmw_so_bs,
39 vmw_so_ds,
40 vmw_so_rs,
41 vmw_so_ss,
42 vmw_so_so,
43 vmw_so_max,
44};
45
46/**
47 * union vmw_view_destroy - view destruction command body
48 *
49 * @rtv: RenderTarget view destruction command body
50 * @srv: ShaderResource view destruction command body
51 * @dsv: DepthStencil view destruction command body
52 * @view_id: A single u32 view id.
53 *
54 * The assumption here is that all union members are really represented by a
55 * single u32 in the command stream. If that's not the case,
56 * the size of this union will not equal the size of an u32, and the
57 * assumption is invalid, and we detect that at compile time in the
58 * vmw_so_build_asserts() function.
59 */
60union vmw_view_destroy {
61 struct SVGA3dCmdDXDestroyRenderTargetView rtv;
62 struct SVGA3dCmdDXDestroyShaderResourceView srv;
63 struct SVGA3dCmdDXDestroyDepthStencilView dsv;
64 u32 view_id;
65};
66
67/* Map enum vmw_view_type to view destroy command ids*/
68extern const u32 vmw_view_destroy_cmds[];
69
70/* Map enum vmw_view_type to SVGACOTableType */
71extern const SVGACOTableType vmw_view_cotables[];
72
73/* Map enum vmw_so_type to SVGACOTableType */
74extern const SVGACOTableType vmw_so_cotables[];
75
76/*
77 * vmw_view_cmd_to_type - Return the view type for a create or destroy command
78 *
79 * @id: The SVGA3D command id.
80 *
81 * For a given view create or destroy command id, return the corresponding
82 * enum vmw_view_type. If the command is unknown, return vmw_view_max.
83 * The validity of the simplified calculation is verified in the
84 * vmw_so_build_asserts() function.
85 */
86static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
87{
88 u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2;
89
90 if (tmp > (u32)vmw_view_max)
91 return vmw_view_max;
92
93 return (enum vmw_view_type) tmp;
94}
95
96/*
97 * vmw_so_cmd_to_type - Return the state object type for a
98 * create or destroy command
99 *
100 * @id: The SVGA3D command id.
101 *
102 * For a given state object create or destroy command id,
103 * return the corresponding enum vmw_so_type. If the command is uknown,
104 * return vmw_so_max. We should perhaps optimize this function using
105 * a similar strategy as vmw_view_cmd_to_type().
106 */
107static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
108{
109 switch (id) {
110 case SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT:
111 case SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT:
112 return vmw_so_el;
113 case SVGA_3D_CMD_DX_DEFINE_BLEND_STATE:
114 case SVGA_3D_CMD_DX_DESTROY_BLEND_STATE:
115 return vmw_so_bs;
116 case SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE:
117 case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE:
118 return vmw_so_ds;
119 case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE:
120 case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE:
121 return vmw_so_rs;
122 case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE:
123 case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE:
124 return vmw_so_ss;
125 case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT:
126 case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT:
127 return vmw_so_so;
128 default:
129 break;
130 }
131 return vmw_so_max;
132}
133
134/*
135 * View management - vmwgfx_so.c
136 */
137extern int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
138 struct vmw_resource *ctx,
139 struct vmw_resource *srf,
140 enum vmw_view_type view_type,
141 u32 user_key,
142 const void *cmd,
143 size_t cmd_size,
144 struct list_head *list);
145
146extern int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
147 u32 user_key, enum vmw_view_type view_type,
148 struct list_head *list,
149 struct vmw_resource **res_p);
150
151extern void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
152 struct list_head *view_list);
153extern void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
154 struct list_head *list,
155 bool readback);
156extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res);
157extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
158 enum vmw_view_type view_type,
159 u32 user_key);
160#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
new file mode 100644
index 000000000000..c22e2df1b336
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -0,0 +1,1266 @@
1/******************************************************************************
2 *
3 * COPYRIGHT © 2014-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 ******************************************************************************/
27
28#include "vmwgfx_kms.h"
29#include "device_include/svga3d_surfacedefs.h"
30#include <drm/drm_plane_helper.h>
31
32#define vmw_crtc_to_stdu(x) \
33 container_of(x, struct vmw_screen_target_display_unit, base.crtc)
34#define vmw_encoder_to_stdu(x) \
35 container_of(x, struct vmw_screen_target_display_unit, base.encoder)
36#define vmw_connector_to_stdu(x) \
37 container_of(x, struct vmw_screen_target_display_unit, base.connector)
38
39
40
41enum stdu_content_type {
42 SAME_AS_DISPLAY = 0,
43 SEPARATE_SURFACE,
44 SEPARATE_DMA
45};
46
47/**
48 * struct vmw_stdu_dirty - closure structure for the update functions
49 *
50 * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
51 * @transfer: Transfer direction for DMA command.
52 * @left: Left side of bounding box.
53 * @right: Right side of bounding box.
54 * @top: Top side of bounding box.
55 * @bottom: Bottom side of bounding box.
56 * @buf: DMA buffer when DMA-ing between buffer and screen targets.
57 * @sid: Surface ID when copying between surface and screen targets.
58 */
59struct vmw_stdu_dirty {
60 struct vmw_kms_dirty base;
61 SVGA3dTransferType transfer;
62 s32 left, right, top, bottom;
63 u32 pitch;
64 union {
65 struct vmw_dma_buffer *buf;
66 u32 sid;
67 };
68};
69
70/*
71 * SVGA commands that are used by this code. Please see the device headers
72 * for explanation.
73 */
74struct vmw_stdu_update {
75 SVGA3dCmdHeader header;
76 SVGA3dCmdUpdateGBScreenTarget body;
77};
78
79struct vmw_stdu_dma {
80 SVGA3dCmdHeader header;
81 SVGA3dCmdSurfaceDMA body;
82};
83
84struct vmw_stdu_surface_copy {
85 SVGA3dCmdHeader header;
86 SVGA3dCmdSurfaceCopy body;
87};
88
89
90/**
91 * struct vmw_screen_target_display_unit
92 *
93 * @base: VMW specific DU structure
94 * @display_srf: surface to be displayed. The dimension of this will always
95 * match the display mode. If the display mode matches
96 * content_vfbs dimensions, then this is a pointer into the
97 * corresponding field in content_vfbs. If not, then this
98 * is a separate buffer to which content_vfbs will blit to.
99 * @content_fb: holds the rendered content, can be a surface or DMA buffer
100 * @content_type: content_fb type
101 * @defined: true if the current display unit has been initialized
102 */
103struct vmw_screen_target_display_unit {
104 struct vmw_display_unit base;
105
106 struct vmw_surface *display_srf;
107 struct drm_framebuffer *content_fb;
108
109 enum stdu_content_type content_fb_type;
110
111 bool defined;
112};
113
114
115
116static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu);
117
118
119
120/******************************************************************************
121 * Screen Target Display Unit helper Functions
122 *****************************************************************************/
123
124/**
125 * vmw_stdu_pin_display - pins the resource associated with the display surface
126 *
127 * @stdu: contains the display surface
128 *
129 * Since the display surface can either be a private surface allocated by us,
130 * or it can point to the content surface, we use this function to not pin the
131 * same resource twice.
132 */
133static int vmw_stdu_pin_display(struct vmw_screen_target_display_unit *stdu)
134{
135 return vmw_resource_pin(&stdu->display_srf->res, false);
136}
137
138
139
140/**
141 * vmw_stdu_unpin_display - unpins the resource associated with display surface
142 *
143 * @stdu: contains the display surface
144 *
145 * If the display surface was privatedly allocated by
146 * vmw_surface_gb_priv_define() and not registered as a framebuffer, then it
147 * won't be automatically cleaned up when all the framebuffers are freed. As
148 * such, we have to explicitly call vmw_resource_unreference() to get it freed.
149 */
150static void vmw_stdu_unpin_display(struct vmw_screen_target_display_unit *stdu)
151{
152 if (stdu->display_srf) {
153 struct vmw_resource *res = &stdu->display_srf->res;
154
155 vmw_resource_unpin(res);
156
157 if (stdu->content_fb_type != SAME_AS_DISPLAY) {
158 vmw_resource_unreference(&res);
159 stdu->content_fb_type = SAME_AS_DISPLAY;
160 }
161
162 stdu->display_srf = NULL;
163 }
164}
165
166
167
168/******************************************************************************
169 * Screen Target Display Unit CRTC Functions
170 *****************************************************************************/
171
172
173/**
174 * vmw_stdu_crtc_destroy - cleans up the STDU
175 *
176 * @crtc: used to get a reference to the containing STDU
177 */
178static void vmw_stdu_crtc_destroy(struct drm_crtc *crtc)
179{
180 vmw_stdu_destroy(vmw_crtc_to_stdu(crtc));
181}
182
183/**
184 * vmw_stdu_define_st - Defines a Screen Target
185 *
186 * @dev_priv: VMW DRM device
187 * @stdu: display unit to create a Screen Target for
188 *
189 * Creates a STDU that we can used later. This function is called whenever the
190 * framebuffer size changes.
191 *
192 * RETURNs:
193 * 0 on success, error code on failure
194 */
195static int vmw_stdu_define_st(struct vmw_private *dev_priv,
196 struct vmw_screen_target_display_unit *stdu)
197{
198 struct {
199 SVGA3dCmdHeader header;
200 SVGA3dCmdDefineGBScreenTarget body;
201 } *cmd;
202
203 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
204
205 if (unlikely(cmd == NULL)) {
206 DRM_ERROR("Out of FIFO space defining Screen Target\n");
207 return -ENOMEM;
208 }
209
210 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SCREENTARGET;
211 cmd->header.size = sizeof(cmd->body);
212
213 cmd->body.stid = stdu->base.unit;
214 cmd->body.width = stdu->display_srf->base_size.width;
215 cmd->body.height = stdu->display_srf->base_size.height;
216 cmd->body.flags = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
217 cmd->body.dpi = 0;
218 cmd->body.xRoot = stdu->base.crtc.x;
219 cmd->body.yRoot = stdu->base.crtc.y;
220
221 if (!stdu->base.is_implicit) {
222 cmd->body.xRoot = stdu->base.gui_x;
223 cmd->body.yRoot = stdu->base.gui_y;
224 }
225
226 vmw_fifo_commit(dev_priv, sizeof(*cmd));
227
228 stdu->defined = true;
229
230 return 0;
231}
232
233
234
235/**
236 * vmw_stdu_bind_st - Binds a surface to a Screen Target
237 *
238 * @dev_priv: VMW DRM device
239 * @stdu: display unit affected
240 * @res: Buffer to bind to the screen target. Set to NULL to blank screen.
241 *
242 * Binding a surface to a Screen Target the same as flipping
243 */
244static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
245 struct vmw_screen_target_display_unit *stdu,
246 struct vmw_resource *res)
247{
248 SVGA3dSurfaceImageId image;
249
250 struct {
251 SVGA3dCmdHeader header;
252 SVGA3dCmdBindGBScreenTarget body;
253 } *cmd;
254
255
256 if (!stdu->defined) {
257 DRM_ERROR("No screen target defined\n");
258 return -EINVAL;
259 }
260
261 /* Set up image using information in vfb */
262 memset(&image, 0, sizeof(image));
263 image.sid = res ? res->id : SVGA3D_INVALID_ID;
264
265 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
266
267 if (unlikely(cmd == NULL)) {
268 DRM_ERROR("Out of FIFO space binding a screen target\n");
269 return -ENOMEM;
270 }
271
272 cmd->header.id = SVGA_3D_CMD_BIND_GB_SCREENTARGET;
273 cmd->header.size = sizeof(cmd->body);
274
275 cmd->body.stid = stdu->base.unit;
276 cmd->body.image = image;
277
278 vmw_fifo_commit(dev_priv, sizeof(*cmd));
279
280 return 0;
281}
282
283/**
284 * vmw_stdu_populate_update - populate an UPDATE_GB_SCREENTARGET command with a
285 * bounding box.
286 *
287 * @cmd: Pointer to command stream.
288 * @unit: Screen target unit.
289 * @left: Left side of bounding box.
290 * @right: Right side of bounding box.
291 * @top: Top side of bounding box.
292 * @bottom: Bottom side of bounding box.
293 */
294static void vmw_stdu_populate_update(void *cmd, int unit,
295 s32 left, s32 right, s32 top, s32 bottom)
296{
297 struct vmw_stdu_update *update = cmd;
298
299 update->header.id = SVGA_3D_CMD_UPDATE_GB_SCREENTARGET;
300 update->header.size = sizeof(update->body);
301
302 update->body.stid = unit;
303 update->body.rect.x = left;
304 update->body.rect.y = top;
305 update->body.rect.w = right - left;
306 update->body.rect.h = bottom - top;
307}
308
309/**
310 * vmw_stdu_update_st - Full update of a Screen Target
311 *
312 * @dev_priv: VMW DRM device
313 * @stdu: display unit affected
314 *
315 * This function needs to be called whenever the content of a screen
316 * target has changed completely. Typically as a result of a backing
317 * surface change.
318 *
319 * RETURNS:
320 * 0 on success, error code on failure
321 */
322static int vmw_stdu_update_st(struct vmw_private *dev_priv,
323 struct vmw_screen_target_display_unit *stdu)
324{
325 struct vmw_stdu_update *cmd;
326 struct drm_crtc *crtc = &stdu->base.crtc;
327
328 if (!stdu->defined) {
329 DRM_ERROR("No screen target defined");
330 return -EINVAL;
331 }
332
333 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
334
335 if (unlikely(cmd == NULL)) {
336 DRM_ERROR("Out of FIFO space updating a Screen Target\n");
337 return -ENOMEM;
338 }
339
340 vmw_stdu_populate_update(cmd, stdu->base.unit, 0, crtc->mode.hdisplay,
341 0, crtc->mode.vdisplay);
342
343 vmw_fifo_commit(dev_priv, sizeof(*cmd));
344
345 return 0;
346}
347
348
349
350/**
351 * vmw_stdu_destroy_st - Destroy a Screen Target
352 *
353 * @dev_priv: VMW DRM device
354 * @stdu: display unit to destroy
355 */
356static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
357 struct vmw_screen_target_display_unit *stdu)
358{
359 int ret;
360
361 struct {
362 SVGA3dCmdHeader header;
363 SVGA3dCmdDestroyGBScreenTarget body;
364 } *cmd;
365
366
367 /* Nothing to do if not successfully defined */
368 if (unlikely(!stdu->defined))
369 return 0;
370
371 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
372
373 if (unlikely(cmd == NULL)) {
374 DRM_ERROR("Out of FIFO space, screen target not destroyed\n");
375 return -ENOMEM;
376 }
377
378 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SCREENTARGET;
379 cmd->header.size = sizeof(cmd->body);
380
381 cmd->body.stid = stdu->base.unit;
382
383 vmw_fifo_commit(dev_priv, sizeof(*cmd));
384
385 /* Force sync */
386 ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
387 if (unlikely(ret != 0))
388 DRM_ERROR("Failed to sync with HW");
389
390 stdu->defined = false;
391
392 return ret;
393}
394
395
396
397/**
398 * vmw_stdu_crtc_set_config - Sets a mode
399 *
400 * @set: mode parameters
401 *
402 * This function is the device-specific portion of the DRM CRTC mode set.
403 * For the SVGA device, we do this by defining a Screen Target, binding a
404 * GB Surface to that target, and finally update the screen target.
405 *
406 * RETURNS:
407 * 0 on success, error code otherwise
408 */
409static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
410{
411 struct vmw_private *dev_priv;
412 struct vmw_screen_target_display_unit *stdu;
413 struct vmw_framebuffer *vfb;
414 struct vmw_framebuffer_surface *new_vfbs;
415 struct drm_display_mode *mode;
416 struct drm_framebuffer *new_fb;
417 struct drm_crtc *crtc;
418 struct drm_encoder *encoder;
419 struct drm_connector *connector;
420 int ret;
421
422
423 if (!set || !set->crtc)
424 return -EINVAL;
425
426 crtc = set->crtc;
427 crtc->x = set->x;
428 crtc->y = set->y;
429 stdu = vmw_crtc_to_stdu(crtc);
430 mode = set->mode;
431 new_fb = set->fb;
432 dev_priv = vmw_priv(crtc->dev);
433
434
435 if (set->num_connectors > 1) {
436 DRM_ERROR("Too many connectors\n");
437 return -EINVAL;
438 }
439
440 if (set->num_connectors == 1 &&
441 set->connectors[0] != &stdu->base.connector) {
442 DRM_ERROR("Connectors don't match %p %p\n",
443 set->connectors[0], &stdu->base.connector);
444 return -EINVAL;
445 }
446
447
448 /* Since they always map one to one these are safe */
449 connector = &stdu->base.connector;
450 encoder = &stdu->base.encoder;
451
452
453 /*
454 * After this point the CRTC will be considered off unless a new fb
455 * is bound
456 */
457 if (stdu->defined) {
458 /* Unbind current surface by binding an invalid one */
459 ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
460 if (unlikely(ret != 0))
461 return ret;
462
463 /* Update Screen Target, display will now be blank */
464 if (crtc->primary->fb) {
465 vmw_stdu_update_st(dev_priv, stdu);
466 if (unlikely(ret != 0))
467 return ret;
468 }
469
470 crtc->primary->fb = NULL;
471 crtc->enabled = false;
472 encoder->crtc = NULL;
473 connector->encoder = NULL;
474
475 vmw_stdu_unpin_display(stdu);
476 stdu->content_fb = NULL;
477 stdu->content_fb_type = SAME_AS_DISPLAY;
478
479 ret = vmw_stdu_destroy_st(dev_priv, stdu);
480 /* The hardware is hung, give up */
481 if (unlikely(ret != 0))
482 return ret;
483 }
484
485
486 /* Any of these conditions means the caller wants CRTC off */
487 if (set->num_connectors == 0 || !mode || !new_fb)
488 return 0;
489
490
491 if (set->x + mode->hdisplay > new_fb->width ||
492 set->y + mode->vdisplay > new_fb->height) {
493 DRM_ERROR("Set outside of framebuffer\n");
494 return -EINVAL;
495 }
496
497 stdu->content_fb = new_fb;
498 vfb = vmw_framebuffer_to_vfb(stdu->content_fb);
499
500 if (vfb->dmabuf)
501 stdu->content_fb_type = SEPARATE_DMA;
502
503 /*
504 * If the requested mode is different than the width and height
505 * of the FB or if the content buffer is a DMA buf, then allocate
506 * a display FB that matches the dimension of the mode
507 */
508 if (mode->hdisplay != new_fb->width ||
509 mode->vdisplay != new_fb->height ||
510 stdu->content_fb_type != SAME_AS_DISPLAY) {
511 struct vmw_surface content_srf;
512 struct drm_vmw_size display_base_size = {0};
513 struct vmw_surface *display_srf;
514
515
516 display_base_size.width = mode->hdisplay;
517 display_base_size.height = mode->vdisplay;
518 display_base_size.depth = 1;
519
520 /*
521 * If content buffer is a DMA buf, then we have to construct
522 * surface info
523 */
524 if (stdu->content_fb_type == SEPARATE_DMA) {
525
526 switch (new_fb->bits_per_pixel) {
527 case 32:
528 content_srf.format = SVGA3D_X8R8G8B8;
529 break;
530
531 case 16:
532 content_srf.format = SVGA3D_R5G6B5;
533 break;
534
535 case 8:
536 content_srf.format = SVGA3D_P8;
537 break;
538
539 default:
540 DRM_ERROR("Invalid format\n");
541 ret = -EINVAL;
542 goto err_unref_content;
543 }
544
545 content_srf.flags = 0;
546 content_srf.mip_levels[0] = 1;
547 content_srf.multisample_count = 0;
548 } else {
549
550 stdu->content_fb_type = SEPARATE_SURFACE;
551
552 new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
553 content_srf = *new_vfbs->surface;
554 }
555
556
557 ret = vmw_surface_gb_priv_define(crtc->dev,
558 0, /* because kernel visible only */
559 content_srf.flags,
560 content_srf.format,
561 true, /* a scanout buffer */
562 content_srf.mip_levels[0],
563 content_srf.multisample_count,
564 0,
565 display_base_size,
566 &display_srf);
567 if (unlikely(ret != 0)) {
568 DRM_ERROR("Cannot allocate a display FB.\n");
569 goto err_unref_content;
570 }
571
572 stdu->display_srf = display_srf;
573 } else {
574 new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
575 stdu->display_srf = new_vfbs->surface;
576 }
577
578
579 ret = vmw_stdu_pin_display(stdu);
580 if (unlikely(ret != 0)) {
581 stdu->display_srf = NULL;
582 goto err_unref_content;
583 }
584
585 vmw_svga_enable(dev_priv);
586
587 /*
588 * Steps to displaying a surface, assume surface is already
589 * bound:
590 * 1. define a screen target
591 * 2. bind a fb to the screen target
592 * 3. update that screen target (this is done later by
593 * vmw_kms_stdu_do_surface_dirty_or_present)
594 */
595 ret = vmw_stdu_define_st(dev_priv, stdu);
596 if (unlikely(ret != 0))
597 goto err_unpin_display_and_content;
598
599 ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
600 if (unlikely(ret != 0))
601 goto err_unpin_destroy_st;
602
603
604 connector->encoder = encoder;
605 encoder->crtc = crtc;
606
607 crtc->mode = *mode;
608 crtc->primary->fb = new_fb;
609 crtc->enabled = true;
610
611 return ret;
612
613err_unpin_destroy_st:
614 vmw_stdu_destroy_st(dev_priv, stdu);
615err_unpin_display_and_content:
616 vmw_stdu_unpin_display(stdu);
617err_unref_content:
618 stdu->content_fb = NULL;
619 return ret;
620}
621
622
623
624/**
625 * vmw_stdu_crtc_page_flip - Binds a buffer to a screen target
626 *
627 * @crtc: CRTC to attach FB to
628 * @fb: FB to attach
629 * @event: Event to be posted. This event should've been alloced
630 * using k[mz]alloc, and should've been completely initialized.
631 * @page_flip_flags: Input flags.
632 *
633 * If the STDU uses the same display and content buffers, i.e. a true flip,
634 * this function will replace the existing display buffer with the new content
635 * buffer.
636 *
637 * If the STDU uses different display and content buffers, i.e. a blit, then
638 * only the content buffer will be updated.
639 *
640 * RETURNS:
641 * 0 on success, error code on failure
642 */
643static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
644 struct drm_framebuffer *new_fb,
645 struct drm_pending_vblank_event *event,
646 uint32_t flags)
647
648{
649 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
650 struct vmw_screen_target_display_unit *stdu;
651 int ret;
652
653 if (crtc == NULL)
654 return -EINVAL;
655
656 dev_priv = vmw_priv(crtc->dev);
657 stdu = vmw_crtc_to_stdu(crtc);
658 crtc->primary->fb = new_fb;
659 stdu->content_fb = new_fb;
660
661 if (stdu->display_srf) {
662 /*
663 * If the display surface is the same as the content surface
664 * then remove the reference
665 */
666 if (stdu->content_fb_type == SAME_AS_DISPLAY) {
667 if (stdu->defined) {
668 /* Unbind the current surface */
669 ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
670 if (unlikely(ret != 0))
671 goto err_out;
672 }
673 vmw_stdu_unpin_display(stdu);
674 stdu->display_srf = NULL;
675 }
676 }
677
678
679 if (!new_fb) {
680 /* Blanks the display */
681 (void) vmw_stdu_update_st(dev_priv, stdu);
682
683 return 0;
684 }
685
686
687 if (stdu->content_fb_type == SAME_AS_DISPLAY) {
688 stdu->display_srf = vmw_framebuffer_to_vfbs(new_fb)->surface;
689 ret = vmw_stdu_pin_display(stdu);
690 if (ret) {
691 stdu->display_srf = NULL;
692 goto err_out;
693 }
694
695 /* Bind display surface */
696 ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
697 if (unlikely(ret != 0))
698 goto err_unpin_display_and_content;
699 }
700
701 /* Update display surface: after this point everything is bound */
702 ret = vmw_stdu_update_st(dev_priv, stdu);
703 if (unlikely(ret != 0))
704 return ret;
705
706 if (event) {
707 struct vmw_fence_obj *fence = NULL;
708 struct drm_file *file_priv = event->base.file_priv;
709
710 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
711 if (!fence)
712 return -ENOMEM;
713
714 ret = vmw_event_fence_action_queue(file_priv, fence,
715 &event->base,
716 &event->event.tv_sec,
717 &event->event.tv_usec,
718 true);
719 vmw_fence_obj_unreference(&fence);
720 }
721
722 return ret;
723
724err_unpin_display_and_content:
725 vmw_stdu_unpin_display(stdu);
726err_out:
727 crtc->primary->fb = NULL;
728 stdu->content_fb = NULL;
729 return ret;
730}
731
732
733/**
734 * vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect
735 *
736 * @dirty: The closure structure.
737 *
738 * Encodes a surface DMA command cliprect and updates the bounding box
739 * for the DMA.
740 */
741static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
742{
743 struct vmw_stdu_dirty *ddirty =
744 container_of(dirty, struct vmw_stdu_dirty, base);
745 struct vmw_stdu_dma *cmd = dirty->cmd;
746 struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
747
748 blit += dirty->num_hits;
749 blit->srcx = dirty->fb_x;
750 blit->srcy = dirty->fb_y;
751 blit->x = dirty->unit_x1;
752 blit->y = dirty->unit_y1;
753 blit->d = 1;
754 blit->w = dirty->unit_x2 - dirty->unit_x1;
755 blit->h = dirty->unit_y2 - dirty->unit_y1;
756 dirty->num_hits++;
757
758 if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM)
759 return;
760
761 /* Destination bounding box */
762 ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
763 ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
764 ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
765 ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
766}
767
768/**
769 * vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command.
770 *
771 * @dirty: The closure structure.
772 *
773 * Fills in the missing fields in a DMA command, and optionally encodes
774 * a screen target update command, depending on transfer direction.
775 */
776static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
777{
778 struct vmw_stdu_dirty *ddirty =
779 container_of(dirty, struct vmw_stdu_dirty, base);
780 struct vmw_screen_target_display_unit *stdu =
781 container_of(dirty->unit, typeof(*stdu), base);
782 struct vmw_stdu_dma *cmd = dirty->cmd;
783 struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
784 SVGA3dCmdSurfaceDMASuffix *suffix =
785 (SVGA3dCmdSurfaceDMASuffix *) &blit[dirty->num_hits];
786 size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix);
787
788 if (!dirty->num_hits) {
789 vmw_fifo_commit(dirty->dev_priv, 0);
790 return;
791 }
792
793 cmd->header.id = SVGA_3D_CMD_SURFACE_DMA;
794 cmd->header.size = sizeof(cmd->body) + blit_size;
795 vmw_bo_get_guest_ptr(&ddirty->buf->base, &cmd->body.guest.ptr);
796 cmd->body.guest.pitch = ddirty->pitch;
797 cmd->body.host.sid = stdu->display_srf->res.id;
798 cmd->body.host.face = 0;
799 cmd->body.host.mipmap = 0;
800 cmd->body.transfer = ddirty->transfer;
801 suffix->suffixSize = sizeof(*suffix);
802 suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
803
804 if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
805 blit_size += sizeof(struct vmw_stdu_update);
806
807 vmw_stdu_populate_update(&suffix[1], stdu->base.unit,
808 ddirty->left, ddirty->right,
809 ddirty->top, ddirty->bottom);
810 }
811
812 vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
813
814 ddirty->left = ddirty->top = S32_MAX;
815 ddirty->right = ddirty->bottom = S32_MIN;
816}
817
818/**
819 * vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed
820 * framebuffer and the screen target system.
821 *
822 * @dev_priv: Pointer to the device private structure.
823 * @file_priv: Pointer to a struct drm-file identifying the caller. May be
824 * set to NULL, but then @user_fence_rep must also be set to NULL.
825 * @vfb: Pointer to the dma-buffer backed framebuffer.
826 * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
827 * @vclips: Alternate array of clip rects. Either @clips or @vclips must
828 * be NULL.
829 * @num_clips: Number of clip rects in @clips or @vclips.
830 * @increment: Increment to use when looping over @clips or @vclips.
831 * @to_surface: Whether to DMA to the screen target system as opposed to
832 * from the screen target system.
833 * @interruptible: Whether to perform waits interruptible if possible.
834 *
835 * If DMA-ing till the screen target system, the function will also notify
836 * the screen target system that a bounding box of the cliprects has been
837 * updated.
838 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
839 * interrupted.
840 */
841int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
842 struct drm_file *file_priv,
843 struct vmw_framebuffer *vfb,
844 struct drm_vmw_fence_rep __user *user_fence_rep,
845 struct drm_clip_rect *clips,
846 struct drm_vmw_rect *vclips,
847 uint32_t num_clips,
848 int increment,
849 bool to_surface,
850 bool interruptible)
851{
852 struct vmw_dma_buffer *buf =
853 container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
854 struct vmw_stdu_dirty ddirty;
855 int ret;
856
857 ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
858 false);
859 if (ret)
860 return ret;
861
862 ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
863 SVGA3D_READ_HOST_VRAM;
864 ddirty.left = ddirty.top = S32_MAX;
865 ddirty.right = ddirty.bottom = S32_MIN;
866 ddirty.pitch = vfb->base.pitches[0];
867 ddirty.buf = buf;
868 ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
869 ddirty.base.clip = vmw_stdu_dmabuf_clip;
870 ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
871 num_clips * sizeof(SVGA3dCopyBox) +
872 sizeof(SVGA3dCmdSurfaceDMASuffix);
873 if (to_surface)
874 ddirty.base.fifo_reserve_size += sizeof(struct vmw_stdu_update);
875
876 ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
877 0, 0, num_clips, increment, &ddirty.base);
878 vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
879 user_fence_rep);
880
881 return ret;
882}
883
884/**
885 * vmw_stdu_surface_clip - Callback to encode a surface copy command cliprect
886 *
887 * @dirty: The closure structure.
888 *
889 * Encodes a surface copy command cliprect and updates the bounding box
890 * for the copy.
891 */
892static void vmw_kms_stdu_surface_clip(struct vmw_kms_dirty *dirty)
893{
894 struct vmw_stdu_dirty *sdirty =
895 container_of(dirty, struct vmw_stdu_dirty, base);
896 struct vmw_stdu_surface_copy *cmd = dirty->cmd;
897 struct vmw_screen_target_display_unit *stdu =
898 container_of(dirty->unit, typeof(*stdu), base);
899
900 if (sdirty->sid != stdu->display_srf->res.id) {
901 struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
902
903 blit += dirty->num_hits;
904 blit->srcx = dirty->fb_x;
905 blit->srcy = dirty->fb_y;
906 blit->x = dirty->unit_x1;
907 blit->y = dirty->unit_y1;
908 blit->d = 1;
909 blit->w = dirty->unit_x2 - dirty->unit_x1;
910 blit->h = dirty->unit_y2 - dirty->unit_y1;
911 }
912
913 dirty->num_hits++;
914
915 /* Destination bounding box */
916 sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
917 sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
918 sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
919 sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
920}
921
922/**
923 * vmw_stdu_surface_fifo_commit - Callback to fill in and submit a surface
924 * copy command.
925 *
926 * @dirty: The closure structure.
927 *
928 * Fills in the missing fields in a surface copy command, and encodes a screen
929 * target update command.
930 */
931static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
932{
933 struct vmw_stdu_dirty *sdirty =
934 container_of(dirty, struct vmw_stdu_dirty, base);
935 struct vmw_screen_target_display_unit *stdu =
936 container_of(dirty->unit, typeof(*stdu), base);
937 struct vmw_stdu_surface_copy *cmd = dirty->cmd;
938 struct vmw_stdu_update *update;
939 size_t blit_size = sizeof(SVGA3dCopyBox) * dirty->num_hits;
940 size_t commit_size;
941
942 if (!dirty->num_hits) {
943 vmw_fifo_commit(dirty->dev_priv, 0);
944 return;
945 }
946
947 if (sdirty->sid != stdu->display_srf->res.id) {
948 struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
949
950 cmd->header.id = SVGA_3D_CMD_SURFACE_COPY;
951 cmd->header.size = sizeof(cmd->body) + blit_size;
952 cmd->body.src.sid = sdirty->sid;
953 cmd->body.dest.sid = stdu->display_srf->res.id;
954 update = (struct vmw_stdu_update *) &blit[dirty->num_hits];
955 commit_size = sizeof(*cmd) + blit_size + sizeof(*update);
956 } else {
957 update = dirty->cmd;
958 commit_size = sizeof(*update);
959 }
960
961 vmw_stdu_populate_update(update, stdu->base.unit, sdirty->left,
962 sdirty->right, sdirty->top, sdirty->bottom);
963
964 vmw_fifo_commit(dirty->dev_priv, commit_size);
965
966 sdirty->left = sdirty->top = S32_MAX;
967 sdirty->right = sdirty->bottom = S32_MIN;
968}
969
970/**
971 * vmw_kms_stdu_surface_dirty - Dirty part of a surface backed framebuffer
972 *
973 * @dev_priv: Pointer to the device private structure.
974 * @framebuffer: Pointer to the surface-buffer backed framebuffer.
975 * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
976 * @vclips: Alternate array of clip rects. Either @clips or @vclips must
977 * be NULL.
978 * @srf: Pointer to surface to blit from. If NULL, the surface attached
979 * to @framebuffer will be used.
980 * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
981 * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
982 * @num_clips: Number of clip rects in @clips.
983 * @inc: Increment to use when looping over @clips.
984 * @out_fence: If non-NULL, will return a ref-counted pointer to a
985 * struct vmw_fence_obj. The returned fence pointer may be NULL in which
986 * case the device has already synchronized.
987 *
988 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
989 * interrupted.
990 */
991int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
992 struct vmw_framebuffer *framebuffer,
993 struct drm_clip_rect *clips,
994 struct drm_vmw_rect *vclips,
995 struct vmw_resource *srf,
996 s32 dest_x,
997 s32 dest_y,
998 unsigned num_clips, int inc,
999 struct vmw_fence_obj **out_fence)
1000{
1001 struct vmw_framebuffer_surface *vfbs =
1002 container_of(framebuffer, typeof(*vfbs), base);
1003 struct vmw_stdu_dirty sdirty;
1004 int ret;
1005
1006 if (!srf)
1007 srf = &vfbs->surface->res;
1008
1009 ret = vmw_kms_helper_resource_prepare(srf, true);
1010 if (ret)
1011 return ret;
1012
1013 if (vfbs->is_dmabuf_proxy) {
1014 ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
1015 if (ret)
1016 goto out_finish;
1017 }
1018
1019 sdirty.base.fifo_commit = vmw_kms_stdu_surface_fifo_commit;
1020 sdirty.base.clip = vmw_kms_stdu_surface_clip;
1021 sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) +
1022 sizeof(SVGA3dCopyBox) * num_clips +
1023 sizeof(struct vmw_stdu_update);
1024 sdirty.sid = srf->id;
1025 sdirty.left = sdirty.top = S32_MAX;
1026 sdirty.right = sdirty.bottom = S32_MIN;
1027
1028 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
1029 dest_x, dest_y, num_clips, inc,
1030 &sdirty.base);
1031out_finish:
1032 vmw_kms_helper_resource_finish(srf, out_fence);
1033
1034 return ret;
1035}
1036
1037
1038/*
1039 * Screen Target CRTC dispatch table
1040 */
1041static struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
1042 .save = vmw_du_crtc_save,
1043 .restore = vmw_du_crtc_restore,
1044 .cursor_set = vmw_du_crtc_cursor_set,
1045 .cursor_move = vmw_du_crtc_cursor_move,
1046 .gamma_set = vmw_du_crtc_gamma_set,
1047 .destroy = vmw_stdu_crtc_destroy,
1048 .set_config = vmw_stdu_crtc_set_config,
1049 .page_flip = vmw_stdu_crtc_page_flip,
1050};
1051
1052
1053
1054/******************************************************************************
1055 * Screen Target Display Unit Encoder Functions
1056 *****************************************************************************/
1057
1058/**
1059 * vmw_stdu_encoder_destroy - cleans up the STDU
1060 *
1061 * @encoder: used the get the containing STDU
1062 *
1063 * vmwgfx cleans up crtc/encoder/connector all at the same time so technically
1064 * this can be a no-op. Nevertheless, it doesn't hurt of have this in case
1065 * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
1066 * get called.
1067 */
1068static void vmw_stdu_encoder_destroy(struct drm_encoder *encoder)
1069{
1070 vmw_stdu_destroy(vmw_encoder_to_stdu(encoder));
1071}
1072
1073static struct drm_encoder_funcs vmw_stdu_encoder_funcs = {
1074 .destroy = vmw_stdu_encoder_destroy,
1075};
1076
1077
1078
1079/******************************************************************************
1080 * Screen Target Display Unit Connector Functions
1081 *****************************************************************************/
1082
1083/**
1084 * vmw_stdu_connector_destroy - cleans up the STDU
1085 *
1086 * @connector: used to get the containing STDU
1087 *
1088 * vmwgfx cleans up crtc/encoder/connector all at the same time so technically
1089 * this can be a no-op. Nevertheless, it doesn't hurt of have this in case
1090 * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
1091 * get called.
1092 */
1093static void vmw_stdu_connector_destroy(struct drm_connector *connector)
1094{
1095 vmw_stdu_destroy(vmw_connector_to_stdu(connector));
1096}
1097
1098
1099
1100static struct drm_connector_funcs vmw_stdu_connector_funcs = {
1101 .dpms = vmw_du_connector_dpms,
1102 .save = vmw_du_connector_save,
1103 .restore = vmw_du_connector_restore,
1104 .detect = vmw_du_connector_detect,
1105 .fill_modes = vmw_du_connector_fill_modes,
1106 .set_property = vmw_du_connector_set_property,
1107 .destroy = vmw_stdu_connector_destroy,
1108};
1109
1110
1111
1112/**
1113 * vmw_stdu_init - Sets up a Screen Target Display Unit
1114 *
1115 * @dev_priv: VMW DRM device
1116 * @unit: unit number range from 0 to VMWGFX_NUM_DISPLAY_UNITS
1117 *
1118 * This function is called once per CRTC, and allocates one Screen Target
1119 * display unit to represent that CRTC. Since the SVGA device does not separate
1120 * out encoder and connector, they are represented as part of the STDU as well.
1121 */
1122static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
1123{
1124 struct vmw_screen_target_display_unit *stdu;
1125 struct drm_device *dev = dev_priv->dev;
1126 struct drm_connector *connector;
1127 struct drm_encoder *encoder;
1128 struct drm_crtc *crtc;
1129
1130
1131 stdu = kzalloc(sizeof(*stdu), GFP_KERNEL);
1132 if (!stdu)
1133 return -ENOMEM;
1134
1135 stdu->base.unit = unit;
1136 crtc = &stdu->base.crtc;
1137 encoder = &stdu->base.encoder;
1138 connector = &stdu->base.connector;
1139
1140 stdu->base.pref_active = (unit == 0);
1141 stdu->base.pref_width = dev_priv->initial_width;
1142 stdu->base.pref_height = dev_priv->initial_height;
1143 stdu->base.is_implicit = true;
1144
1145 drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
1146 DRM_MODE_CONNECTOR_VIRTUAL);
1147 connector->status = vmw_du_connector_detect(connector, false);
1148
1149 drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
1150 DRM_MODE_ENCODER_VIRTUAL);
1151 drm_mode_connector_attach_encoder(connector, encoder);
1152 encoder->possible_crtcs = (1 << unit);
1153 encoder->possible_clones = 0;
1154
1155 (void) drm_connector_register(connector);
1156
1157 drm_crtc_init(dev, crtc, &vmw_stdu_crtc_funcs);
1158
1159 drm_mode_crtc_set_gamma_size(crtc, 256);
1160
1161 drm_object_attach_property(&connector->base,
1162 dev->mode_config.dirty_info_property,
1163 1);
1164
1165 return 0;
1166}
1167
1168
1169
1170/**
1171 * vmw_stdu_destroy - Cleans up a vmw_screen_target_display_unit
1172 *
1173 * @stdu: Screen Target Display Unit to be destroyed
1174 *
1175 * Clean up after vmw_stdu_init
1176 */
1177static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu)
1178{
1179 vmw_stdu_unpin_display(stdu);
1180
1181 vmw_du_cleanup(&stdu->base);
1182 kfree(stdu);
1183}
1184
1185
1186
1187/******************************************************************************
1188 * Screen Target Display KMS Functions
1189 *
1190 * These functions are called by the common KMS code in vmwgfx_kms.c
1191 *****************************************************************************/
1192
1193/**
1194 * vmw_kms_stdu_init_display - Initializes a Screen Target based display
1195 *
1196 * @dev_priv: VMW DRM device
1197 *
1198 * This function initialize a Screen Target based display device. It checks
1199 * the capability bits to make sure the underlying hardware can support
1200 * screen targets, and then creates the maximum number of CRTCs, a.k.a Display
1201 * Units, as supported by the display hardware.
1202 *
1203 * RETURNS:
1204 * 0 on success, error code otherwise
1205 */
1206int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
1207{
1208 struct drm_device *dev = dev_priv->dev;
1209 int i, ret;
1210
1211
1212 /* Do nothing if Screen Target support is turned off */
1213 if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE)
1214 return -ENOSYS;
1215
1216 if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
1217 return -ENOSYS;
1218
1219 ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
1220 if (unlikely(ret != 0))
1221 return ret;
1222
1223 ret = drm_mode_create_dirty_info_property(dev);
1224 if (unlikely(ret != 0))
1225 goto err_vblank_cleanup;
1226
1227 dev_priv->active_display_unit = vmw_du_screen_target;
1228
1229 for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
1230 ret = vmw_stdu_init(dev_priv, i);
1231
1232 if (unlikely(ret != 0)) {
1233 DRM_ERROR("Failed to initialize STDU %d", i);
1234 goto err_vblank_cleanup;
1235 }
1236 }
1237
1238 DRM_INFO("Screen Target Display device initialized\n");
1239
1240 return 0;
1241
1242err_vblank_cleanup:
1243 drm_vblank_cleanup(dev);
1244 return ret;
1245}
1246
1247
1248
1249/**
1250 * vmw_kms_stdu_close_display - Cleans up after vmw_kms_stdu_init_display
1251 *
1252 * @dev_priv: VMW DRM device
1253 *
1254 * Frees up any resources allocated by vmw_kms_stdu_init_display
1255 *
1256 * RETURNS:
1257 * 0 on success
1258 */
1259int vmw_kms_stdu_close_display(struct vmw_private *dev_priv)
1260{
1261 struct drm_device *dev = dev_priv->dev;
1262
1263 drm_vblank_cleanup(dev);
1264
1265 return 0;
1266}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 4ecdbf3e59da..5b8595b78429 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,8 +27,11 @@
27 27
28#include "vmwgfx_drv.h" 28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h" 29#include "vmwgfx_resource_priv.h"
30#include "vmwgfx_so.h"
31#include "vmwgfx_binding.h"
30#include <ttm/ttm_placement.h> 32#include <ttm/ttm_placement.h>
31#include "svga3d_surfacedefs.h" 33#include "device_include/svga3d_surfacedefs.h"
34
32 35
33/** 36/**
34 * struct vmw_user_surface - User-space visible surface resource 37 * struct vmw_user_surface - User-space visible surface resource
@@ -36,7 +39,7 @@
36 * @base: The TTM base object handling user-space visibility. 39 * @base: The TTM base object handling user-space visibility.
37 * @srf: The surface metadata. 40 * @srf: The surface metadata.
38 * @size: TTM accounting size for the surface. 41 * @size: TTM accounting size for the surface.
39 * @master: master of the creating client. Used for security check. 42 * @master: master of the creating client. Used for security check.
40 */ 43 */
41struct vmw_user_surface { 44struct vmw_user_surface {
42 struct ttm_prime_object prime; 45 struct ttm_prime_object prime;
@@ -220,7 +223,7 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
220 cmd->header.size = cmd_len; 223 cmd->header.size = cmd_len;
221 cmd->body.sid = srf->res.id; 224 cmd->body.sid = srf->res.id;
222 cmd->body.surfaceFlags = srf->flags; 225 cmd->body.surfaceFlags = srf->flags;
223 cmd->body.format = cpu_to_le32(srf->format); 226 cmd->body.format = srf->format;
224 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 227 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
225 cmd->body.face[i].numMipLevels = srf->mip_levels[i]; 228 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
226 229
@@ -340,7 +343,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
340 dev_priv->used_memory_size -= res->backup_size; 343 dev_priv->used_memory_size -= res->backup_size;
341 mutex_unlock(&dev_priv->cmdbuf_mutex); 344 mutex_unlock(&dev_priv->cmdbuf_mutex);
342 } 345 }
343 vmw_3d_resource_dec(dev_priv, false); 346 vmw_fifo_resource_dec(dev_priv);
344} 347}
345 348
346/** 349/**
@@ -576,14 +579,14 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
576 579
577 BUG_ON(res_free == NULL); 580 BUG_ON(res_free == NULL);
578 if (!dev_priv->has_mob) 581 if (!dev_priv->has_mob)
579 (void) vmw_3d_resource_inc(dev_priv, false); 582 vmw_fifo_resource_inc(dev_priv);
580 ret = vmw_resource_init(dev_priv, res, true, res_free, 583 ret = vmw_resource_init(dev_priv, res, true, res_free,
581 (dev_priv->has_mob) ? &vmw_gb_surface_func : 584 (dev_priv->has_mob) ? &vmw_gb_surface_func :
582 &vmw_legacy_surface_func); 585 &vmw_legacy_surface_func);
583 586
584 if (unlikely(ret != 0)) { 587 if (unlikely(ret != 0)) {
585 if (!dev_priv->has_mob) 588 if (!dev_priv->has_mob)
586 vmw_3d_resource_dec(dev_priv, false); 589 vmw_fifo_resource_dec(dev_priv);
587 res_free(res); 590 res_free(res);
588 return ret; 591 return ret;
589 } 592 }
@@ -593,6 +596,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
593 * surface validate. 596 * surface validate.
594 */ 597 */
595 598
599 INIT_LIST_HEAD(&srf->view_list);
596 vmw_resource_activate(res, vmw_hw_surface_destroy); 600 vmw_resource_activate(res, vmw_hw_surface_destroy);
597 return ret; 601 return ret;
598} 602}
@@ -723,6 +727,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
723 desc = svga3dsurface_get_desc(req->format); 727 desc = svga3dsurface_get_desc(req->format);
724 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { 728 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
725 DRM_ERROR("Invalid surface format for surface creation.\n"); 729 DRM_ERROR("Invalid surface format for surface creation.\n");
730 DRM_ERROR("Format requested is: %d\n", req->format);
726 return -EINVAL; 731 return -EINVAL;
727 } 732 }
728 733
@@ -1018,17 +1023,21 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
1018{ 1023{
1019 struct vmw_private *dev_priv = res->dev_priv; 1024 struct vmw_private *dev_priv = res->dev_priv;
1020 struct vmw_surface *srf = vmw_res_to_srf(res); 1025 struct vmw_surface *srf = vmw_res_to_srf(res);
1021 uint32_t cmd_len, submit_len; 1026 uint32_t cmd_len, cmd_id, submit_len;
1022 int ret; 1027 int ret;
1023 struct { 1028 struct {
1024 SVGA3dCmdHeader header; 1029 SVGA3dCmdHeader header;
1025 SVGA3dCmdDefineGBSurface body; 1030 SVGA3dCmdDefineGBSurface body;
1026 } *cmd; 1031 } *cmd;
1032 struct {
1033 SVGA3dCmdHeader header;
1034 SVGA3dCmdDefineGBSurface_v2 body;
1035 } *cmd2;
1027 1036
1028 if (likely(res->id != -1)) 1037 if (likely(res->id != -1))
1029 return 0; 1038 return 0;
1030 1039
1031 (void) vmw_3d_resource_inc(dev_priv, false); 1040 vmw_fifo_resource_inc(dev_priv);
1032 ret = vmw_resource_alloc_id(res); 1041 ret = vmw_resource_alloc_id(res);
1033 if (unlikely(ret != 0)) { 1042 if (unlikely(ret != 0)) {
1034 DRM_ERROR("Failed to allocate a surface id.\n"); 1043 DRM_ERROR("Failed to allocate a surface id.\n");
@@ -1040,9 +1049,19 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
1040 goto out_no_fifo; 1049 goto out_no_fifo;
1041 } 1050 }
1042 1051
1043 cmd_len = sizeof(cmd->body); 1052 if (srf->array_size > 0) {
1044 submit_len = sizeof(*cmd); 1053 /* has_dx checked on creation time. */
1054 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
1055 cmd_len = sizeof(cmd2->body);
1056 submit_len = sizeof(*cmd2);
1057 } else {
1058 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
1059 cmd_len = sizeof(cmd->body);
1060 submit_len = sizeof(*cmd);
1061 }
1062
1045 cmd = vmw_fifo_reserve(dev_priv, submit_len); 1063 cmd = vmw_fifo_reserve(dev_priv, submit_len);
1064 cmd2 = (typeof(cmd2))cmd;
1046 if (unlikely(cmd == NULL)) { 1065 if (unlikely(cmd == NULL)) {
1047 DRM_ERROR("Failed reserving FIFO space for surface " 1066 DRM_ERROR("Failed reserving FIFO space for surface "
1048 "creation.\n"); 1067 "creation.\n");
@@ -1050,17 +1069,33 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
1050 goto out_no_fifo; 1069 goto out_no_fifo;
1051 } 1070 }
1052 1071
1053 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE; 1072 if (srf->array_size > 0) {
1054 cmd->header.size = cmd_len; 1073 cmd2->header.id = cmd_id;
1055 cmd->body.sid = srf->res.id; 1074 cmd2->header.size = cmd_len;
1056 cmd->body.surfaceFlags = srf->flags; 1075 cmd2->body.sid = srf->res.id;
1057 cmd->body.format = cpu_to_le32(srf->format); 1076 cmd2->body.surfaceFlags = srf->flags;
1058 cmd->body.numMipLevels = srf->mip_levels[0]; 1077 cmd2->body.format = cpu_to_le32(srf->format);
1059 cmd->body.multisampleCount = srf->multisample_count; 1078 cmd2->body.numMipLevels = srf->mip_levels[0];
1060 cmd->body.autogenFilter = srf->autogen_filter; 1079 cmd2->body.multisampleCount = srf->multisample_count;
1061 cmd->body.size.width = srf->base_size.width; 1080 cmd2->body.autogenFilter = srf->autogen_filter;
1062 cmd->body.size.height = srf->base_size.height; 1081 cmd2->body.size.width = srf->base_size.width;
1063 cmd->body.size.depth = srf->base_size.depth; 1082 cmd2->body.size.height = srf->base_size.height;
1083 cmd2->body.size.depth = srf->base_size.depth;
1084 cmd2->body.arraySize = srf->array_size;
1085 } else {
1086 cmd->header.id = cmd_id;
1087 cmd->header.size = cmd_len;
1088 cmd->body.sid = srf->res.id;
1089 cmd->body.surfaceFlags = srf->flags;
1090 cmd->body.format = cpu_to_le32(srf->format);
1091 cmd->body.numMipLevels = srf->mip_levels[0];
1092 cmd->body.multisampleCount = srf->multisample_count;
1093 cmd->body.autogenFilter = srf->autogen_filter;
1094 cmd->body.size.width = srf->base_size.width;
1095 cmd->body.size.height = srf->base_size.height;
1096 cmd->body.size.depth = srf->base_size.depth;
1097 }
1098
1064 vmw_fifo_commit(dev_priv, submit_len); 1099 vmw_fifo_commit(dev_priv, submit_len);
1065 1100
1066 return 0; 1101 return 0;
@@ -1068,7 +1103,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
1068out_no_fifo: 1103out_no_fifo:
1069 vmw_resource_release_id(res); 1104 vmw_resource_release_id(res);
1070out_no_id: 1105out_no_id:
1071 vmw_3d_resource_dec(dev_priv, false); 1106 vmw_fifo_resource_dec(dev_priv);
1072 return ret; 1107 return ret;
1073} 1108}
1074 1109
@@ -1188,6 +1223,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
1188static int vmw_gb_surface_destroy(struct vmw_resource *res) 1223static int vmw_gb_surface_destroy(struct vmw_resource *res)
1189{ 1224{
1190 struct vmw_private *dev_priv = res->dev_priv; 1225 struct vmw_private *dev_priv = res->dev_priv;
1226 struct vmw_surface *srf = vmw_res_to_srf(res);
1191 struct { 1227 struct {
1192 SVGA3dCmdHeader header; 1228 SVGA3dCmdHeader header;
1193 SVGA3dCmdDestroyGBSurface body; 1229 SVGA3dCmdDestroyGBSurface body;
@@ -1197,7 +1233,8 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
1197 return 0; 1233 return 0;
1198 1234
1199 mutex_lock(&dev_priv->binding_mutex); 1235 mutex_lock(&dev_priv->binding_mutex);
1200 vmw_context_binding_res_list_scrub(&res->binding_head); 1236 vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
1237 vmw_binding_res_list_scrub(&res->binding_head);
1201 1238
1202 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 1239 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
1203 if (unlikely(cmd == NULL)) { 1240 if (unlikely(cmd == NULL)) {
@@ -1213,11 +1250,12 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
1213 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 1250 vmw_fifo_commit(dev_priv, sizeof(*cmd));
1214 mutex_unlock(&dev_priv->binding_mutex); 1251 mutex_unlock(&dev_priv->binding_mutex);
1215 vmw_resource_release_id(res); 1252 vmw_resource_release_id(res);
1216 vmw_3d_resource_dec(dev_priv, false); 1253 vmw_fifo_resource_dec(dev_priv);
1217 1254
1218 return 0; 1255 return 0;
1219} 1256}
1220 1257
1258
1221/** 1259/**
1222 * vmw_gb_surface_define_ioctl - Ioctl function implementing 1260 * vmw_gb_surface_define_ioctl - Ioctl function implementing
1223 * the user surface define functionality. 1261 * the user surface define functionality.
@@ -1241,77 +1279,51 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1241 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1279 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1242 int ret; 1280 int ret;
1243 uint32_t size; 1281 uint32_t size;
1244 const struct svga3d_surface_desc *desc;
1245 uint32_t backup_handle; 1282 uint32_t backup_handle;
1246 1283
1284
1247 if (unlikely(vmw_user_surface_size == 0)) 1285 if (unlikely(vmw_user_surface_size == 0))
1248 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + 1286 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1249 128; 1287 128;
1250 1288
1251 size = vmw_user_surface_size + 128; 1289 size = vmw_user_surface_size + 128;
1252 1290
1253 desc = svga3dsurface_get_desc(req->format); 1291 /* Define a surface based on the parameters. */
1254 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { 1292 ret = vmw_surface_gb_priv_define(dev,
1255 DRM_ERROR("Invalid surface format for surface creation.\n"); 1293 size,
1256 return -EINVAL; 1294 req->svga3d_flags,
1257 } 1295 req->format,
1258 1296 req->drm_surface_flags & drm_vmw_surface_flag_scanout,
1259 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 1297 req->mip_levels,
1298 req->multisample_count,
1299 req->array_size,
1300 req->base_size,
1301 &srf);
1260 if (unlikely(ret != 0)) 1302 if (unlikely(ret != 0))
1261 return ret; 1303 return ret;
1262 1304
1263 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 1305 user_srf = container_of(srf, struct vmw_user_surface, srf);
1264 size, false, true);
1265 if (unlikely(ret != 0)) {
1266 if (ret != -ERESTARTSYS)
1267 DRM_ERROR("Out of graphics memory for surface"
1268 " creation.\n");
1269 goto out_unlock;
1270 }
1271
1272 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1273 if (unlikely(user_srf == NULL)) {
1274 ret = -ENOMEM;
1275 goto out_no_user_srf;
1276 }
1277
1278 srf = &user_srf->srf;
1279 res = &srf->res;
1280
1281 srf->flags = req->svga3d_flags;
1282 srf->format = req->format;
1283 srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
1284 srf->mip_levels[0] = req->mip_levels;
1285 srf->num_sizes = 1;
1286 srf->sizes = NULL;
1287 srf->offsets = NULL;
1288 user_srf->size = size;
1289 srf->base_size = req->base_size;
1290 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
1291 srf->multisample_count = req->multisample_count;
1292 res->backup_size = svga3dsurface_get_serialized_size
1293 (srf->format, srf->base_size, srf->mip_levels[0],
1294 srf->flags & SVGA3D_SURFACE_CUBEMAP);
1295
1296 user_srf->prime.base.shareable = false;
1297 user_srf->prime.base.tfile = NULL;
1298 if (drm_is_primary_client(file_priv)) 1306 if (drm_is_primary_client(file_priv))
1299 user_srf->master = drm_master_get(file_priv->master); 1307 user_srf->master = drm_master_get(file_priv->master);
1300 1308
1301 /** 1309 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1302 * From this point, the generic resource management functions
1303 * destroy the object on failure.
1304 */
1305
1306 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1307 if (unlikely(ret != 0)) 1310 if (unlikely(ret != 0))
1308 goto out_unlock; 1311 return ret;
1312
1313 res = &user_srf->srf.res;
1314
1309 1315
1310 if (req->buffer_handle != SVGA3D_INVALID_ID) { 1316 if (req->buffer_handle != SVGA3D_INVALID_ID) {
1311 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, 1317 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
1312 &res->backup); 1318 &res->backup);
1313 } else if (req->drm_surface_flags & 1319 if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
1314 drm_vmw_surface_flag_create_buffer) 1320 res->backup_size) {
1321 DRM_ERROR("Surface backup buffer is too small.\n");
1322 vmw_dmabuf_unreference(&res->backup);
1323 ret = -EINVAL;
1324 goto out_unlock;
1325 }
1326 } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
1315 ret = vmw_user_dmabuf_alloc(dev_priv, tfile, 1327 ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
1316 res->backup_size, 1328 res->backup_size,
1317 req->drm_surface_flags & 1329 req->drm_surface_flags &
@@ -1324,7 +1336,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1324 goto out_unlock; 1336 goto out_unlock;
1325 } 1337 }
1326 1338
1327 tmp = vmw_resource_reference(&srf->res); 1339 tmp = vmw_resource_reference(res);
1328 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, 1340 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
1329 req->drm_surface_flags & 1341 req->drm_surface_flags &
1330 drm_vmw_surface_flag_shareable, 1342 drm_vmw_surface_flag_shareable,
@@ -1337,7 +1349,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1337 goto out_unlock; 1349 goto out_unlock;
1338 } 1350 }
1339 1351
1340 rep->handle = user_srf->prime.base.hash.key; 1352 rep->handle = user_srf->prime.base.hash.key;
1341 rep->backup_size = res->backup_size; 1353 rep->backup_size = res->backup_size;
1342 if (res->backup) { 1354 if (res->backup) {
1343 rep->buffer_map_handle = 1355 rep->buffer_map_handle =
@@ -1352,10 +1364,6 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1352 1364
1353 vmw_resource_unreference(&res); 1365 vmw_resource_unreference(&res);
1354 1366
1355 ttm_read_unlock(&dev_priv->reservation_sem);
1356 return 0;
1357out_no_user_srf:
1358 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1359out_unlock: 1367out_unlock:
1360 ttm_read_unlock(&dev_priv->reservation_sem); 1368 ttm_read_unlock(&dev_priv->reservation_sem);
1361 return ret; 1369 return ret;
@@ -1415,6 +1423,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1415 rep->creq.drm_surface_flags = 0; 1423 rep->creq.drm_surface_flags = 0;
1416 rep->creq.multisample_count = srf->multisample_count; 1424 rep->creq.multisample_count = srf->multisample_count;
1417 rep->creq.autogen_filter = srf->autogen_filter; 1425 rep->creq.autogen_filter = srf->autogen_filter;
1426 rep->creq.array_size = srf->array_size;
1418 rep->creq.buffer_handle = backup_handle; 1427 rep->creq.buffer_handle = backup_handle;
1419 rep->creq.base_size = srf->base_size; 1428 rep->creq.base_size = srf->base_size;
1420 rep->crep.handle = user_srf->prime.base.hash.key; 1429 rep->crep.handle = user_srf->prime.base.hash.key;
@@ -1429,3 +1438,137 @@ out_bad_resource:
1429 1438
1430 return ret; 1439 return ret;
1431} 1440}
1441
1442/**
1443 * vmw_surface_gb_priv_define - Define a private GB surface
1444 *
1445 * @dev: Pointer to a struct drm_device
1446 * @user_accounting_size: Used to track user-space memory usage, set
1447 * to 0 for kernel mode only memory
1448 * @svga3d_flags: SVGA3d surface flags for the device
1449 * @format: requested surface format
1450 * @for_scanout: true if inteded to be used for scanout buffer
1451 * @num_mip_levels: number of MIP levels
1452 * @multisample_count:
1453 * @array_size: Surface array size.
1454 * @size: width, heigh, depth of the surface requested
1455 * @user_srf_out: allocated user_srf. Set to NULL on failure.
1456 *
1457 * GB surfaces allocated by this function will not have a user mode handle, and
1458 * thus will only be visible to vmwgfx. For optimization reasons the
1459 * surface may later be given a user mode handle by another function to make
1460 * it available to user mode drivers.
1461 */
1462int vmw_surface_gb_priv_define(struct drm_device *dev,
1463 uint32_t user_accounting_size,
1464 uint32_t svga3d_flags,
1465 SVGA3dSurfaceFormat format,
1466 bool for_scanout,
1467 uint32_t num_mip_levels,
1468 uint32_t multisample_count,
1469 uint32_t array_size,
1470 struct drm_vmw_size size,
1471 struct vmw_surface **srf_out)
1472{
1473 struct vmw_private *dev_priv = vmw_priv(dev);
1474 struct vmw_user_surface *user_srf;
1475 struct vmw_surface *srf;
1476 int ret;
1477 u32 num_layers;
1478
1479 *srf_out = NULL;
1480
1481 if (for_scanout) {
1482 if (!svga3dsurface_is_screen_target_format(format)) {
1483 DRM_ERROR("Invalid Screen Target surface format.");
1484 return -EINVAL;
1485 }
1486 } else {
1487 const struct svga3d_surface_desc *desc;
1488
1489 desc = svga3dsurface_get_desc(format);
1490 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
1491 DRM_ERROR("Invalid surface format.\n");
1492 return -EINVAL;
1493 }
1494 }
1495
1496 /* array_size must be null for non-GL3 host. */
1497 if (array_size > 0 && !dev_priv->has_dx) {
1498 DRM_ERROR("Tried to create DX surface on non-DX host.\n");
1499 return -EINVAL;
1500 }
1501
1502 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1503 if (unlikely(ret != 0))
1504 return ret;
1505
1506 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1507 user_accounting_size, false, true);
1508 if (unlikely(ret != 0)) {
1509 if (ret != -ERESTARTSYS)
1510 DRM_ERROR("Out of graphics memory for surface"
1511 " creation.\n");
1512 goto out_unlock;
1513 }
1514
1515 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1516 if (unlikely(user_srf == NULL)) {
1517 ret = -ENOMEM;
1518 goto out_no_user_srf;
1519 }
1520
1521 *srf_out = &user_srf->srf;
1522 user_srf->size = user_accounting_size;
1523 user_srf->prime.base.shareable = false;
1524 user_srf->prime.base.tfile = NULL;
1525
1526 srf = &user_srf->srf;
1527 srf->flags = svga3d_flags;
1528 srf->format = format;
1529 srf->scanout = for_scanout;
1530 srf->mip_levels[0] = num_mip_levels;
1531 srf->num_sizes = 1;
1532 srf->sizes = NULL;
1533 srf->offsets = NULL;
1534 srf->base_size = size;
1535 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
1536 srf->array_size = array_size;
1537 srf->multisample_count = multisample_count;
1538
1539 if (array_size)
1540 num_layers = array_size;
1541 else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
1542 num_layers = SVGA3D_MAX_SURFACE_FACES;
1543 else
1544 num_layers = 1;
1545
1546 srf->res.backup_size =
1547 svga3dsurface_get_serialized_size(srf->format,
1548 srf->base_size,
1549 srf->mip_levels[0],
1550 num_layers);
1551
1552 if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
1553 srf->res.backup_size += sizeof(SVGA3dDXSOState);
1554
1555 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1556 for_scanout)
1557 srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
1558
1559 /*
1560 * From this point, the generic resource management functions
1561 * destroy the object on failure.
1562 */
1563 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1564
1565 ttm_read_unlock(&dev_priv->reservation_sem);
1566 return ret;
1567
1568out_no_user_srf:
1569 ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
1570
1571out_unlock:
1572 ttm_read_unlock(&dev_priv->reservation_sem);
1573 return ret;
1574}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 98d6bfb3a997..e771091d2cd3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 020afa343dff..8b5ce7c5d9bb 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -887,6 +887,7 @@ static inline bool drm_is_primary_client(const struct drm_file *file_priv)
887/*@{*/ 887/*@{*/
888 888
889 /* Driver support (drm_drv.h) */ 889 /* Driver support (drm_drv.h) */
890extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
890extern long drm_ioctl(struct file *filp, 891extern long drm_ioctl(struct file *filp,
891 unsigned int cmd, unsigned long arg); 892 unsigned int cmd, unsigned long arg);
892extern long drm_compat_ioctl(struct file *filp, 893extern long drm_compat_ioctl(struct file *filp,
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index c472bedbe38e..05b204954d16 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -64,6 +64,7 @@
64#define DRM_VMW_GB_SURFACE_CREATE 23 64#define DRM_VMW_GB_SURFACE_CREATE 23
65#define DRM_VMW_GB_SURFACE_REF 24 65#define DRM_VMW_GB_SURFACE_REF 24
66#define DRM_VMW_SYNCCPU 25 66#define DRM_VMW_SYNCCPU 25
67#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
67 68
68/*************************************************************************/ 69/*************************************************************************/
69/** 70/**
@@ -88,6 +89,8 @@
88#define DRM_VMW_PARAM_3D_CAPS_SIZE 8 89#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
89#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 90#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
90#define DRM_VMW_PARAM_MAX_MOB_SIZE 10 91#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
92#define DRM_VMW_PARAM_SCREEN_TARGET 11
93#define DRM_VMW_PARAM_DX 12
91 94
92/** 95/**
93 * enum drm_vmw_handle_type - handle type for ref ioctls 96 * enum drm_vmw_handle_type - handle type for ref ioctls
@@ -296,7 +299,7 @@ union drm_vmw_surface_reference_arg {
296 * Argument to the DRM_VMW_EXECBUF Ioctl. 299 * Argument to the DRM_VMW_EXECBUF Ioctl.
297 */ 300 */
298 301
299#define DRM_VMW_EXECBUF_VERSION 1 302#define DRM_VMW_EXECBUF_VERSION 2
300 303
301struct drm_vmw_execbuf_arg { 304struct drm_vmw_execbuf_arg {
302 uint64_t commands; 305 uint64_t commands;
@@ -305,6 +308,8 @@ struct drm_vmw_execbuf_arg {
305 uint64_t fence_rep; 308 uint64_t fence_rep;
306 uint32_t version; 309 uint32_t version;
307 uint32_t flags; 310 uint32_t flags;
311 uint32_t context_handle;
312 uint32_t pad64;
308}; 313};
309 314
310/** 315/**
@@ -825,7 +830,6 @@ struct drm_vmw_update_layout_arg {
825enum drm_vmw_shader_type { 830enum drm_vmw_shader_type {
826 drm_vmw_shader_type_vs = 0, 831 drm_vmw_shader_type_vs = 0,
827 drm_vmw_shader_type_ps, 832 drm_vmw_shader_type_ps,
828 drm_vmw_shader_type_gs
829}; 833};
830 834
831 835
@@ -907,6 +911,8 @@ enum drm_vmw_surface_flags {
907 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID 911 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
908 * if none. 912 * if none.
909 * @base_size Size of the base mip level for all faces. 913 * @base_size Size of the base mip level for all faces.
914 * @array_size Must be zero for non-DX hardware, and if non-zero
915 * svga3d_flags must have proper bind flags setup.
910 * 916 *
911 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. 917 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
912 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. 918 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
@@ -919,7 +925,7 @@ struct drm_vmw_gb_surface_create_req {
919 uint32_t multisample_count; 925 uint32_t multisample_count;
920 uint32_t autogen_filter; 926 uint32_t autogen_filter;
921 uint32_t buffer_handle; 927 uint32_t buffer_handle;
922 uint32_t pad64; 928 uint32_t array_size;
923 struct drm_vmw_size base_size; 929 struct drm_vmw_size base_size;
924}; 930};
925 931
@@ -1059,4 +1065,28 @@ struct drm_vmw_synccpu_arg {
1059 uint32_t pad64; 1065 uint32_t pad64;
1060}; 1066};
1061 1067
1068/*************************************************************************/
1069/**
1070 * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
1071 *
1072 * Allocates a device unique context id, and queues a create context command
1073 * for the host. Does not wait for host completion.
1074 */
1075enum drm_vmw_extended_context {
1076 drm_vmw_context_legacy,
1077 drm_vmw_context_dx
1078};
1079
1080/**
1081 * union drm_vmw_extended_context_arg
1082 *
1083 * @req: Context type.
1084 * @rep: Context identifier.
1085 *
1086 * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
1087 */
1088union drm_vmw_extended_context_arg {
1089 enum drm_vmw_extended_context req;
1090 struct drm_vmw_context_arg rep;
1091};
1062#endif 1092#endif