aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2014-06-09 06:39:22 -0400
committerThomas Hellstrom <thellstrom@vmware.com>2014-07-04 06:12:09 -0400
commit18e4a4669c5023eb1157f2a3f1bf6ca2b8535572 (patch)
tree3d3e32d5eee38490a1660f8dfc4f62c7b8e3fd01
parentbc1dfff04a5d4064ba0db1fab13f84ab4f333d2b (diff)
drm/vmwgfx: Fix compat shader namespace
Contrary to the host-backed shader interface that has a per-context name-space for shaders, the compat shader namespace was per client (or rather, per file). Fix this so that the compat shader namespace is per context, and at the same time, make command buffer managed context resource management generic. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c341
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h74
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c227
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c396
7 files changed, 673 insertions, 413 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 458cdf6d81e8..ce0ab951f507 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -6,6 +6,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ 6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ 7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ 8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
9 vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o 9 vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
10 vmwgfx_cmdbuf_res.o \
10 11
11obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o 12obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
new file mode 100644
index 000000000000..bfeb4b1f2acf
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -0,0 +1,341 @@
1/**************************************************************************
2 *
3 * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29
30#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
31
32enum vmw_cmdbuf_res_state {
33 VMW_CMDBUF_RES_COMMITED,
34 VMW_CMDBUF_RES_ADD,
35 VMW_CMDBUF_RES_DEL
36};
37
38/**
39 * struct vmw_cmdbuf_res - Command buffer managed resource entry.
40 *
41 * @res: Refcounted pointer to a struct vmw_resource.
42 * @hash: Hash entry for the manager hash table.
43 * @head: List head used either by the staging list or the manager list
44 * of commited resources.
45 * @state: Staging state of this resource entry.
46 * @man: Pointer to a resource manager for this entry.
47 */
48struct vmw_cmdbuf_res {
49 struct vmw_resource *res;
50 struct drm_hash_item hash;
51 struct list_head head;
52 enum vmw_cmdbuf_res_state state;
53 struct vmw_cmdbuf_res_manager *man;
54};
55
56/**
57 * struct vmw_cmdbuf_res_manager - Command buffer resource manager.
58 *
59 * @resources: Hash table containing staged and commited command buffer
60 * resources
61 * @list: List of commited command buffer resources.
62 * @dev_priv: Pointer to a device private structure.
63 *
64 * @resources and @list are protected by the cmdbuf mutex for now.
65 */
66struct vmw_cmdbuf_res_manager {
67 struct drm_open_hash resources;
68 struct list_head list;
69 struct vmw_private *dev_priv;
70};
71
72
73/**
74 * vmw_cmdbuf_res_lookup - Look up a command buffer resource
75 *
76 * @man: Pointer to the command buffer resource manager
77 * @resource_type: The resource type, that combined with the user key
78 * identifies the resource.
79 * @user_key: The user key.
80 *
81 * Returns a valid refcounted struct vmw_resource pointer on success,
82 * an error pointer on failure.
83 */
84struct vmw_resource *
85vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
86 enum vmw_cmdbuf_res_type res_type,
87 u32 user_key)
88{
89 struct drm_hash_item *hash;
90 int ret;
91 unsigned long key = user_key | (res_type << 24);
92
93 ret = drm_ht_find_item(&man->resources, key, &hash);
94 if (unlikely(ret != 0))
95 return ERR_PTR(ret);
96
97 return vmw_resource_reference
98 (drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res);
99}
100
101/**
102 * vmw_cmdbuf_res_free - Free a command buffer resource.
103 *
104 * @man: Pointer to the command buffer resource manager
105 * @entry: Pointer to a struct vmw_cmdbuf_res.
106 *
107 * Frees a struct vmw_cmdbuf_res entry and drops its reference to the
108 * struct vmw_resource.
109 */
110static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
111 struct vmw_cmdbuf_res *entry)
112{
113 list_del(&entry->head);
114 WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash));
115 vmw_resource_unreference(&entry->res);
116 kfree(entry);
117}
118
119/**
120 * vmw_cmdbuf_res_commit - Commit a list of command buffer resource actions
121 *
122 * @list: Caller's list of command buffer resource actions.
123 *
124 * This function commits a list of command buffer resource
125 * additions or removals.
126 * It is typically called when the execbuf ioctl call triggering these
127 * actions has commited the fifo contents to the device.
128 */
129void vmw_cmdbuf_res_commit(struct list_head *list)
130{
131 struct vmw_cmdbuf_res *entry, *next;
132
133 list_for_each_entry_safe(entry, next, list, head) {
134 list_del(&entry->head);
135 switch (entry->state) {
136 case VMW_CMDBUF_RES_ADD:
137 entry->state = VMW_CMDBUF_RES_COMMITED;
138 list_add_tail(&entry->head, &entry->man->list);
139 break;
140 case VMW_CMDBUF_RES_DEL:
141 vmw_resource_unreference(&entry->res);
142 kfree(entry);
143 break;
144 default:
145 BUG();
146 break;
147 }
148 }
149}
150
151/**
152 * vmw_cmdbuf_res_revert - Revert a list of command buffer resource actions
153 *
154 * @man: Pointer to the command buffer resource manager
155 * @list: Caller's list of command buffer resource action
156 *
157 * This function reverts a list of command buffer resource
158 * additions or removals.
159 * It is typically called when the execbuf ioctl call triggering these
160 * actions failed for some reason, and the command stream was never
161 * submitted.
162 */
163void vmw_cmdbuf_res_revert(struct list_head *list)
164{
165 struct vmw_cmdbuf_res *entry, *next;
166 int ret;
167
168 list_for_each_entry_safe(entry, next, list, head) {
169 switch (entry->state) {
170 case VMW_CMDBUF_RES_ADD:
171 vmw_cmdbuf_res_free(entry->man, entry);
172 break;
173 case VMW_CMDBUF_RES_DEL:
174 ret = drm_ht_insert_item(&entry->man->resources,
175 &entry->hash);
176 list_del(&entry->head);
177 list_add_tail(&entry->head, &entry->man->list);
178 entry->state = VMW_CMDBUF_RES_COMMITED;
179 break;
180 default:
181 BUG();
182 break;
183 }
184 }
185}
186
187/**
188 * vmw_cmdbuf_res_add - Stage a command buffer managed resource for addition.
189 *
190 * @man: Pointer to the command buffer resource manager.
191 * @res_type: The resource type.
192 * @user_key: The user-space id of the resource.
193 * @res: Valid (refcount != 0) pointer to a struct vmw_resource.
194 * @list: The staging list.
195 *
196 * This function allocates a struct vmw_cmdbuf_res entry and adds the
197 * resource to the hash table of the manager identified by @man. The
198 * entry is then put on the staging list identified by @list.
199 */
200int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
201 enum vmw_cmdbuf_res_type res_type,
202 u32 user_key,
203 struct vmw_resource *res,
204 struct list_head *list)
205{
206 struct vmw_cmdbuf_res *cres;
207 int ret;
208
209 cres = kzalloc(sizeof(*cres), GFP_KERNEL);
210 if (unlikely(cres == NULL))
211 return -ENOMEM;
212
213 cres->hash.key = user_key | (res_type << 24);
214 ret = drm_ht_insert_item(&man->resources, &cres->hash);
215 if (unlikely(ret != 0))
216 goto out_invalid_key;
217
218 cres->state = VMW_CMDBUF_RES_ADD;
219 cres->res = vmw_resource_reference(res);
220 cres->man = man;
221 list_add_tail(&cres->head, list);
222
223out_invalid_key:
224 return ret;
225}
226
227/**
228 * vmw_cmdbuf_res_remove - Stage a command buffer managed resource for removal.
229 *
230 * @man: Pointer to the command buffer resource manager.
231 * @res_type: The resource type.
232 * @user_key: The user-space id of the resource.
233 * @list: The staging list.
234 *
235 * This function looks up the struct vmw_cmdbuf_res entry from the manager
236 * hash table and, if it exists, removes it. Depending on its current staging
237 * state it then either removes the entry from the staging list or adds it
238 * to it with a staging state of removal.
239 */
240int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
241 enum vmw_cmdbuf_res_type res_type,
242 u32 user_key,
243 struct list_head *list)
244{
245 struct vmw_cmdbuf_res *entry;
246 struct drm_hash_item *hash;
247 int ret;
248
249 ret = drm_ht_find_item(&man->resources, user_key, &hash);
250 if (likely(ret != 0))
251 return -EINVAL;
252
253 entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash);
254
255 switch (entry->state) {
256 case VMW_CMDBUF_RES_ADD:
257 vmw_cmdbuf_res_free(man, entry);
258 break;
259 case VMW_CMDBUF_RES_COMMITED:
260 (void) drm_ht_remove_item(&man->resources, &entry->hash);
261 list_del(&entry->head);
262 entry->state = VMW_CMDBUF_RES_DEL;
263 list_add_tail(&entry->head, list);
264 break;
265 default:
266 BUG();
267 break;
268 }
269
270 return 0;
271}
272
273/**
274 * vmw_cmdbuf_res_man_create - Allocate a command buffer managed resource
275 * manager.
276 *
277 * @dev_priv: Pointer to a struct vmw_private
278 *
279 * Allocates and initializes a command buffer managed resource manager. Returns
280 * an error pointer on failure.
281 */
282struct vmw_cmdbuf_res_manager *
283vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
284{
285 struct vmw_cmdbuf_res_manager *man;
286 int ret;
287
288 man = kzalloc(sizeof(*man), GFP_KERNEL);
289 if (man == NULL)
290 return ERR_PTR(-ENOMEM);
291
292 man->dev_priv = dev_priv;
293 INIT_LIST_HEAD(&man->list);
294 ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
295 if (ret == 0)
296 return man;
297
298 kfree(man);
299 return ERR_PTR(ret);
300}
301
302/**
303 * vmw_cmdbuf_res_man_destroy - Destroy a command buffer managed resource
304 * manager.
305 *
306 * @man: Pointer to the manager to destroy.
307 *
308 * This function destroys a command buffer managed resource manager and
309 * unreferences / frees all command buffer managed resources and -entries
310 * associated with it.
311 */
312void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
313{
314 struct vmw_cmdbuf_res *entry, *next;
315
316 list_for_each_entry_safe(entry, next, &man->list, head)
317 vmw_cmdbuf_res_free(man, entry);
318
319 kfree(man);
320}
321
322/**
323 *
324 * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed
325 * resource manager
326 *
327 * Returns the approximate allocation size of a command buffer managed
328 * resource manager.
329 */
330size_t vmw_cmdbuf_res_man_size(void)
331{
332 static size_t res_man_size;
333
334 if (unlikely(res_man_size == 0))
335 res_man_size =
336 ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) +
337 ttm_round_pot(sizeof(struct hlist_head) <<
338 VMW_CMDBUF_RES_MAN_HT_ORDER);
339
340 return res_man_size;
341}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 8bb26dcd9eae..5ac92874404d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -33,6 +33,7 @@ struct vmw_user_context {
33 struct ttm_base_object base; 33 struct ttm_base_object base;
34 struct vmw_resource res; 34 struct vmw_resource res;
35 struct vmw_ctx_binding_state cbs; 35 struct vmw_ctx_binding_state cbs;
36 struct vmw_cmdbuf_res_manager *man;
36}; 37};
37 38
38 39
@@ -103,7 +104,8 @@ static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
103 104
104static void vmw_hw_context_destroy(struct vmw_resource *res) 105static void vmw_hw_context_destroy(struct vmw_resource *res)
105{ 106{
106 107 struct vmw_user_context *uctx =
108 container_of(res, struct vmw_user_context, res);
107 struct vmw_private *dev_priv = res->dev_priv; 109 struct vmw_private *dev_priv = res->dev_priv;
108 struct { 110 struct {
109 SVGA3dCmdHeader header; 111 SVGA3dCmdHeader header;
@@ -113,9 +115,9 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
113 115
114 if (res->func->destroy == vmw_gb_context_destroy) { 116 if (res->func->destroy == vmw_gb_context_destroy) {
115 mutex_lock(&dev_priv->cmdbuf_mutex); 117 mutex_lock(&dev_priv->cmdbuf_mutex);
118 vmw_cmdbuf_res_man_destroy(uctx->man);
116 mutex_lock(&dev_priv->binding_mutex); 119 mutex_lock(&dev_priv->binding_mutex);
117 (void) vmw_context_binding_state_kill 120 (void) vmw_context_binding_state_kill(&uctx->cbs);
118 (&container_of(res, struct vmw_user_context, res)->cbs);
119 (void) vmw_gb_context_destroy(res); 121 (void) vmw_gb_context_destroy(res);
120 mutex_unlock(&dev_priv->binding_mutex); 122 mutex_unlock(&dev_priv->binding_mutex);
121 if (dev_priv->pinned_bo != NULL && 123 if (dev_priv->pinned_bo != NULL &&
@@ -152,13 +154,16 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
152 ret = vmw_resource_init(dev_priv, res, true, 154 ret = vmw_resource_init(dev_priv, res, true,
153 res_free, &vmw_gb_context_func); 155 res_free, &vmw_gb_context_func);
154 res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; 156 res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
157 if (unlikely(ret != 0))
158 goto out_err;
155 159
156 if (unlikely(ret != 0)) { 160 if (dev_priv->has_mob) {
157 if (res_free) 161 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
158 res_free(res); 162 if (unlikely(IS_ERR(uctx->man))) {
159 else 163 ret = PTR_ERR(uctx->man);
160 kfree(res); 164 uctx->man = NULL;
161 return ret; 165 goto out_err;
166 }
162 } 167 }
163 168
164 memset(&uctx->cbs, 0, sizeof(uctx->cbs)); 169 memset(&uctx->cbs, 0, sizeof(uctx->cbs));
@@ -166,6 +171,13 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
166 171
167 vmw_resource_activate(res, vmw_hw_context_destroy); 172 vmw_resource_activate(res, vmw_hw_context_destroy);
168 return 0; 173 return 0;
174
175out_err:
176 if (res_free)
177 res_free(res);
178 else
179 kfree(res);
180 return ret;
169} 181}
170 182
171static int vmw_context_init(struct vmw_private *dev_priv, 183static int vmw_context_init(struct vmw_private *dev_priv,
@@ -471,7 +483,8 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
471 */ 483 */
472 484
473 if (unlikely(vmw_user_context_size == 0)) 485 if (unlikely(vmw_user_context_size == 0))
474 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128; 486 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
487 ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
475 488
476 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 489 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
477 if (unlikely(ret != 0)) 490 if (unlikely(ret != 0))
@@ -901,3 +914,8 @@ struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
901{ 914{
902 return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); 915 return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
903} 916}
917
918struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
919{
920 return container_of(ctx, struct vmw_user_context, res)->man;
921}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 246a62bab378..f31a75494e07 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -946,7 +946,6 @@ static void vmw_postclose(struct drm_device *dev,
946 drm_master_put(&vmw_fp->locked_master); 946 drm_master_put(&vmw_fp->locked_master);
947 } 947 }
948 948
949 vmw_compat_shader_man_destroy(vmw_fp->shman);
950 ttm_object_file_release(&vmw_fp->tfile); 949 ttm_object_file_release(&vmw_fp->tfile);
951 kfree(vmw_fp); 950 kfree(vmw_fp);
952} 951}
@@ -966,16 +965,10 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
966 if (unlikely(vmw_fp->tfile == NULL)) 965 if (unlikely(vmw_fp->tfile == NULL))
967 goto out_no_tfile; 966 goto out_no_tfile;
968 967
969 vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
970 if (IS_ERR(vmw_fp->shman))
971 goto out_no_shman;
972
973 file_priv->driver_priv = vmw_fp; 968 file_priv->driver_priv = vmw_fp;
974 969
975 return 0; 970 return 0;
976 971
977out_no_shman:
978 ttm_object_file_release(&vmw_fp->tfile);
979out_no_tfile: 972out_no_tfile:
980 kfree(vmw_fp); 973 kfree(vmw_fp);
981 return ret; 974 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 6b252a887ae2..c1811750cc8d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,10 +40,10 @@
40#include <drm/ttm/ttm_module.h> 40#include <drm/ttm/ttm_module.h>
41#include "vmwgfx_fence.h" 41#include "vmwgfx_fence.h"
42 42
43#define VMWGFX_DRIVER_DATE "20140325" 43#define VMWGFX_DRIVER_DATE "20140704"
44#define VMWGFX_DRIVER_MAJOR 2 44#define VMWGFX_DRIVER_MAJOR 2
45#define VMWGFX_DRIVER_MINOR 6 45#define VMWGFX_DRIVER_MINOR 6
46#define VMWGFX_DRIVER_PATCHLEVEL 0 46#define VMWGFX_DRIVER_PATCHLEVEL 1
47#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 47#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49#define VMWGFX_MAX_RELOCATIONS 2048 49#define VMWGFX_MAX_RELOCATIONS 2048
@@ -75,14 +75,11 @@
75#define VMW_RES_FENCE ttm_driver_type3 75#define VMW_RES_FENCE ttm_driver_type3
76#define VMW_RES_SHADER ttm_driver_type4 76#define VMW_RES_SHADER ttm_driver_type4
77 77
78struct vmw_compat_shader_manager;
79
80struct vmw_fpriv { 78struct vmw_fpriv {
81 struct drm_master *locked_master; 79 struct drm_master *locked_master;
82 struct ttm_object_file *tfile; 80 struct ttm_object_file *tfile;
83 struct list_head fence_events; 81 struct list_head fence_events;
84 bool gb_aware; 82 bool gb_aware;
85 struct vmw_compat_shader_manager *shman;
86}; 83};
87 84
88struct vmw_dma_buffer { 85struct vmw_dma_buffer {
@@ -124,6 +121,10 @@ struct vmw_resource {
124 void (*hw_destroy) (struct vmw_resource *res); 121 void (*hw_destroy) (struct vmw_resource *res);
125}; 122};
126 123
124
125/*
126 * Resources that are managed using ioctls.
127 */
127enum vmw_res_type { 128enum vmw_res_type {
128 vmw_res_context, 129 vmw_res_context,
129 vmw_res_surface, 130 vmw_res_surface,
@@ -132,6 +133,15 @@ enum vmw_res_type {
132 vmw_res_max 133 vmw_res_max
133}; 134};
134 135
136/*
137 * Resources that are managed using command streams.
138 */
139enum vmw_cmdbuf_res_type {
140 vmw_cmdbuf_res_compat_shader
141};
142
143struct vmw_cmdbuf_res_manager;
144
135struct vmw_cursor_snooper { 145struct vmw_cursor_snooper {
136 struct drm_crtc *crtc; 146 struct drm_crtc *crtc;
137 size_t age; 147 size_t age;
@@ -341,7 +351,7 @@ struct vmw_sw_context{
341 bool needs_post_query_barrier; 351 bool needs_post_query_barrier;
342 struct vmw_resource *error_resource; 352 struct vmw_resource *error_resource;
343 struct vmw_ctx_binding_state staged_bindings; 353 struct vmw_ctx_binding_state staged_bindings;
344 struct list_head staged_shaders; 354 struct list_head staged_cmd_res;
345}; 355};
346 356
347struct vmw_legacy_display; 357struct vmw_legacy_display;
@@ -974,7 +984,8 @@ extern void vmw_context_binding_res_list_kill(struct list_head *head);
974extern void vmw_context_binding_res_list_scrub(struct list_head *head); 984extern void vmw_context_binding_res_list_scrub(struct list_head *head);
975extern int vmw_context_rebind_all(struct vmw_resource *ctx); 985extern int vmw_context_rebind_all(struct vmw_resource *ctx);
976extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); 986extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
977 987extern struct vmw_cmdbuf_res_manager *
988vmw_context_res_man(struct vmw_resource *ctx);
978/* 989/*
979 * Surface management - vmwgfx_surface.c 990 * Surface management - vmwgfx_surface.c
980 */ 991 */
@@ -1008,27 +1019,42 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1008 struct drm_file *file_priv); 1019 struct drm_file *file_priv);
1009extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, 1020extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
1010 struct drm_file *file_priv); 1021 struct drm_file *file_priv);
1011extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, 1022extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1012 SVGA3dShaderType shader_type, 1023 struct vmw_cmdbuf_res_manager *man,
1013 u32 *user_key);
1014extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
1015 struct list_head *list);
1016extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
1017 struct list_head *list);
1018extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
1019 u32 user_key,
1020 SVGA3dShaderType shader_type,
1021 struct list_head *list);
1022extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
1023 u32 user_key, const void *bytecode, 1024 u32 user_key, const void *bytecode,
1024 SVGA3dShaderType shader_type, 1025 SVGA3dShaderType shader_type,
1025 size_t size, 1026 size_t size,
1026 struct ttm_object_file *tfile,
1027 struct list_head *list); 1027 struct list_head *list);
1028extern struct vmw_compat_shader_manager * 1028extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
1029vmw_compat_shader_man_create(struct vmw_private *dev_priv); 1029 u32 user_key, SVGA3dShaderType shader_type,
1030extern void 1030 struct list_head *list);
1031vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man); 1031extern struct vmw_resource *
1032vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1033 u32 user_key, SVGA3dShaderType shader_type);
1034
1035/*
1036 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1037 */
1038
1039extern struct vmw_cmdbuf_res_manager *
1040vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1041extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1042extern size_t vmw_cmdbuf_res_man_size(void);
1043extern struct vmw_resource *
1044vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1045 enum vmw_cmdbuf_res_type res_type,
1046 u32 user_key);
1047extern void vmw_cmdbuf_res_revert(struct list_head *list);
1048extern void vmw_cmdbuf_res_commit(struct list_head *list);
1049extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1050 enum vmw_cmdbuf_res_type res_type,
1051 u32 user_key,
1052 struct vmw_resource *res,
1053 struct list_head *list);
1054extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1055 enum vmw_cmdbuf_res_type res_type,
1056 u32 user_key,
1057 struct list_head *list);
1032 1058
1033 1059
1034/** 1060/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 87df0b3674fd..7bfdaa163a33 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -422,28 +422,91 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
422 return 0; 422 return 0;
423} 423}
424 424
425
426/**
427 * vmw_cmd_res_reloc_add - Add a resource to a software context's
428 * relocation- and validation lists.
429 *
430 * @dev_priv: Pointer to a struct vmw_private identifying the device.
431 * @sw_context: Pointer to the software context.
432 * @res_type: Resource type.
433 * @id_loc: Pointer to where the id that needs translation is located.
434 * @res: Valid pointer to a struct vmw_resource.
435 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
436 * used for this resource is returned here.
437 */
438static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
439 struct vmw_sw_context *sw_context,
440 enum vmw_res_type res_type,
441 uint32_t *id_loc,
442 struct vmw_resource *res,
443 struct vmw_resource_val_node **p_val)
444{
445 int ret;
446 struct vmw_resource_val_node *node;
447
448 *p_val = NULL;
449 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
450 res,
451 id_loc - sw_context->buf_start);
452 if (unlikely(ret != 0))
453 goto out_err;
454
455 ret = vmw_resource_val_add(sw_context, res, &node);
456 if (unlikely(ret != 0))
457 goto out_err;
458
459 if (res_type == vmw_res_context && dev_priv->has_mob &&
460 node->first_usage) {
461
462 /*
463 * Put contexts first on the list to be able to exit
464 * list traversal for contexts early.
465 */
466 list_del(&node->head);
467 list_add(&node->head, &sw_context->resource_list);
468
469 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
470 if (unlikely(ret != 0))
471 goto out_err;
472 node->staged_bindings =
473 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
474 if (node->staged_bindings == NULL) {
475 DRM_ERROR("Failed to allocate context binding "
476 "information.\n");
477 goto out_err;
478 }
479 INIT_LIST_HEAD(&node->staged_bindings->list);
480 }
481
482 if (p_val)
483 *p_val = node;
484
485out_err:
486 return ret;
487}
488
489
425/** 490/**
426 * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it 491 * vmw_cmd_res_check - Check that a resource is present and if so, put it
427 * on the resource validate list unless it's already there. 492 * on the resource validate list unless it's already there.
428 * 493 *
429 * @dev_priv: Pointer to a device private structure. 494 * @dev_priv: Pointer to a device private structure.
430 * @sw_context: Pointer to the software context. 495 * @sw_context: Pointer to the software context.
431 * @res_type: Resource type. 496 * @res_type: Resource type.
432 * @converter: User-space visisble type specific information. 497 * @converter: User-space visisble type specific information.
433 * @id: user-space resource id handle.
434 * @id_loc: Pointer to the location in the command buffer currently being 498 * @id_loc: Pointer to the location in the command buffer currently being
435 * parsed from where the user-space resource id handle is located. 499 * parsed from where the user-space resource id handle is located.
436 * @p_val: Pointer to pointer to resource validalidation node. Populated 500 * @p_val: Pointer to pointer to resource validalidation node. Populated
437 * on exit. 501 * on exit.
438 */ 502 */
439static int 503static int
440vmw_cmd_compat_res_check(struct vmw_private *dev_priv, 504vmw_cmd_res_check(struct vmw_private *dev_priv,
441 struct vmw_sw_context *sw_context, 505 struct vmw_sw_context *sw_context,
442 enum vmw_res_type res_type, 506 enum vmw_res_type res_type,
443 const struct vmw_user_resource_conv *converter, 507 const struct vmw_user_resource_conv *converter,
444 uint32_t id, 508 uint32_t *id_loc,
445 uint32_t *id_loc, 509 struct vmw_resource_val_node **p_val)
446 struct vmw_resource_val_node **p_val)
447{ 510{
448 struct vmw_res_cache_entry *rcache = 511 struct vmw_res_cache_entry *rcache =
449 &sw_context->res_cache[res_type]; 512 &sw_context->res_cache[res_type];
@@ -451,7 +514,7 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
451 struct vmw_resource_val_node *node; 514 struct vmw_resource_val_node *node;
452 int ret; 515 int ret;
453 516
454 if (id == SVGA3D_INVALID_ID) { 517 if (*id_loc == SVGA3D_INVALID_ID) {
455 if (p_val) 518 if (p_val)
456 *p_val = NULL; 519 *p_val = NULL;
457 if (res_type == vmw_res_context) { 520 if (res_type == vmw_res_context) {
@@ -466,7 +529,7 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
466 * resource 529 * resource
467 */ 530 */
468 531
469 if (likely(rcache->valid && id == rcache->handle)) { 532 if (likely(rcache->valid && *id_loc == rcache->handle)) {
470 const struct vmw_resource *res = rcache->res; 533 const struct vmw_resource *res = rcache->res;
471 534
472 rcache->node->first_usage = false; 535 rcache->node->first_usage = false;
@@ -480,49 +543,28 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
480 543
481 ret = vmw_user_resource_lookup_handle(dev_priv, 544 ret = vmw_user_resource_lookup_handle(dev_priv,
482 sw_context->fp->tfile, 545 sw_context->fp->tfile,
483 id, 546 *id_loc,
484 converter, 547 converter,
485 &res); 548 &res);
486 if (unlikely(ret != 0)) { 549 if (unlikely(ret != 0)) {
487 DRM_ERROR("Could not find or use resource 0x%08x.\n", 550 DRM_ERROR("Could not find or use resource 0x%08x.\n",
488 (unsigned) id); 551 (unsigned) *id_loc);
489 dump_stack(); 552 dump_stack();
490 return ret; 553 return ret;
491 } 554 }
492 555
493 rcache->valid = true; 556 rcache->valid = true;
494 rcache->res = res; 557 rcache->res = res;
495 rcache->handle = id; 558 rcache->handle = *id_loc;
496
497 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
498 res,
499 id_loc - sw_context->buf_start);
500 if (unlikely(ret != 0))
501 goto out_no_reloc;
502 559
503 ret = vmw_resource_val_add(sw_context, res, &node); 560 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
561 res, &node);
504 if (unlikely(ret != 0)) 562 if (unlikely(ret != 0))
505 goto out_no_reloc; 563 goto out_no_reloc;
506 564
507 rcache->node = node; 565 rcache->node = node;
508 if (p_val) 566 if (p_val)
509 *p_val = node; 567 *p_val = node;
510
511 if (dev_priv->has_mob && node->first_usage &&
512 res_type == vmw_res_context) {
513 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
514 if (unlikely(ret != 0))
515 goto out_no_reloc;
516 node->staged_bindings =
517 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
518 if (node->staged_bindings == NULL) {
519 DRM_ERROR("Failed to allocate context binding "
520 "information.\n");
521 goto out_no_reloc;
522 }
523 INIT_LIST_HEAD(&node->staged_bindings->list);
524 }
525
526 vmw_resource_unreference(&res); 568 vmw_resource_unreference(&res);
527 return 0; 569 return 0;
528 570
@@ -534,31 +576,6 @@ out_no_reloc:
534} 576}
535 577
536/** 578/**
537 * vmw_cmd_res_check - Check that a resource is present and if so, put it
538 * on the resource validate list unless it's already there.
539 *
540 * @dev_priv: Pointer to a device private structure.
541 * @sw_context: Pointer to the software context.
542 * @res_type: Resource type.
543 * @converter: User-space visisble type specific information.
544 * @id_loc: Pointer to the location in the command buffer currently being
545 * parsed from where the user-space resource id handle is located.
546 * @p_val: Pointer to pointer to resource validalidation node. Populated
547 * on exit.
548 */
549static int
550vmw_cmd_res_check(struct vmw_private *dev_priv,
551 struct vmw_sw_context *sw_context,
552 enum vmw_res_type res_type,
553 const struct vmw_user_resource_conv *converter,
554 uint32_t *id_loc,
555 struct vmw_resource_val_node **p_val)
556{
557 return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
558 converter, *id_loc, id_loc, p_val);
559}
560
561/**
562 * vmw_rebind_contexts - Rebind all resources previously bound to 579 * vmw_rebind_contexts - Rebind all resources previously bound to
563 * referenced contexts. 580 * referenced contexts.
564 * 581 *
@@ -572,8 +589,8 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
572 int ret; 589 int ret;
573 590
574 list_for_each_entry(val, &sw_context->resource_list, head) { 591 list_for_each_entry(val, &sw_context->resource_list, head) {
575 if (likely(!val->staged_bindings)) 592 if (unlikely(!val->staged_bindings))
576 continue; 593 break;
577 594
578 ret = vmw_context_rebind_all(val->res); 595 ret = vmw_context_rebind_all(val->res);
579 if (unlikely(ret != 0)) { 596 if (unlikely(ret != 0)) {
@@ -1626,13 +1643,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1626 } *cmd; 1643 } *cmd;
1627 int ret; 1644 int ret;
1628 size_t size; 1645 size_t size;
1646 struct vmw_resource_val_node *val;
1629 1647
1630 cmd = container_of(header, struct vmw_shader_define_cmd, 1648 cmd = container_of(header, struct vmw_shader_define_cmd,
1631 header); 1649 header);
1632 1650
1633 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1651 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1634 user_context_converter, &cmd->body.cid, 1652 user_context_converter, &cmd->body.cid,
1635 NULL); 1653 &val);
1636 if (unlikely(ret != 0)) 1654 if (unlikely(ret != 0))
1637 return ret; 1655 return ret;
1638 1656
@@ -1640,11 +1658,11 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1640 return 0; 1658 return 0;
1641 1659
1642 size = cmd->header.size - sizeof(cmd->body); 1660 size = cmd->header.size - sizeof(cmd->body);
1643 ret = vmw_compat_shader_add(sw_context->fp->shman, 1661 ret = vmw_compat_shader_add(dev_priv,
1662 vmw_context_res_man(val->res),
1644 cmd->body.shid, cmd + 1, 1663 cmd->body.shid, cmd + 1,
1645 cmd->body.type, size, 1664 cmd->body.type, size,
1646 sw_context->fp->tfile, 1665 &sw_context->staged_cmd_res);
1647 &sw_context->staged_shaders);
1648 if (unlikely(ret != 0)) 1666 if (unlikely(ret != 0))
1649 return ret; 1667 return ret;
1650 1668
@@ -1672,23 +1690,24 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1672 SVGA3dCmdDestroyShader body; 1690 SVGA3dCmdDestroyShader body;
1673 } *cmd; 1691 } *cmd;
1674 int ret; 1692 int ret;
1693 struct vmw_resource_val_node *val;
1675 1694
1676 cmd = container_of(header, struct vmw_shader_destroy_cmd, 1695 cmd = container_of(header, struct vmw_shader_destroy_cmd,
1677 header); 1696 header);
1678 1697
1679 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1698 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1680 user_context_converter, &cmd->body.cid, 1699 user_context_converter, &cmd->body.cid,
1681 NULL); 1700 &val);
1682 if (unlikely(ret != 0)) 1701 if (unlikely(ret != 0))
1683 return ret; 1702 return ret;
1684 1703
1685 if (unlikely(!dev_priv->has_mob)) 1704 if (unlikely(!dev_priv->has_mob))
1686 return 0; 1705 return 0;
1687 1706
1688 ret = vmw_compat_shader_remove(sw_context->fp->shman, 1707 ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
1689 cmd->body.shid, 1708 cmd->body.shid,
1690 cmd->body.type, 1709 cmd->body.type,
1691 &sw_context->staged_shaders); 1710 &sw_context->staged_cmd_res);
1692 if (unlikely(ret != 0)) 1711 if (unlikely(ret != 0))
1693 return ret; 1712 return ret;
1694 1713
@@ -1715,7 +1734,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1715 SVGA3dCmdHeader header; 1734 SVGA3dCmdHeader header;
1716 SVGA3dCmdSetShader body; 1735 SVGA3dCmdSetShader body;
1717 } *cmd; 1736 } *cmd;
1718 struct vmw_resource_val_node *ctx_node; 1737 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
1738 struct vmw_ctx_bindinfo bi;
1739 struct vmw_resource *res = NULL;
1719 int ret; 1740 int ret;
1720 1741
1721 cmd = container_of(header, struct vmw_set_shader_cmd, 1742 cmd = container_of(header, struct vmw_set_shader_cmd,
@@ -1727,32 +1748,40 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1727 if (unlikely(ret != 0)) 1748 if (unlikely(ret != 0))
1728 return ret; 1749 return ret;
1729 1750
1730 if (dev_priv->has_mob) { 1751 if (!dev_priv->has_mob)
1731 struct vmw_ctx_bindinfo bi; 1752 return 0;
1732 struct vmw_resource_val_node *res_node; 1753
1733 u32 shid = cmd->body.shid; 1754 if (cmd->body.shid != SVGA3D_INVALID_ID) {
1734 1755 res = vmw_compat_shader_lookup
1735 if (shid != SVGA3D_INVALID_ID) 1756 (vmw_context_res_man(ctx_node->res),
1736 (void) vmw_compat_shader_lookup(sw_context->fp->shman, 1757 cmd->body.shid,
1737 cmd->body.type, 1758 cmd->body.type);
1738 &shid); 1759
1739 1760 if (!IS_ERR(res)) {
1740 ret = vmw_cmd_compat_res_check(dev_priv, sw_context, 1761 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
1741 vmw_res_shader, 1762 vmw_res_shader,
1742 user_shader_converter, 1763 &cmd->body.shid, res,
1743 shid, 1764 &res_node);
1744 &cmd->body.shid, &res_node); 1765 vmw_resource_unreference(&res);
1766 if (unlikely(ret != 0))
1767 return ret;
1768 }
1769 }
1770
1771 if (!res_node) {
1772 ret = vmw_cmd_res_check(dev_priv, sw_context,
1773 vmw_res_shader,
1774 user_shader_converter,
1775 &cmd->body.shid, &res_node);
1745 if (unlikely(ret != 0)) 1776 if (unlikely(ret != 0))
1746 return ret; 1777 return ret;
1747
1748 bi.ctx = ctx_node->res;
1749 bi.res = res_node ? res_node->res : NULL;
1750 bi.bt = vmw_ctx_binding_shader;
1751 bi.i1.shader_type = cmd->body.type;
1752 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1753 } 1778 }
1754 1779
1755 return 0; 1780 bi.ctx = ctx_node->res;
1781 bi.res = res_node ? res_node->res : NULL;
1782 bi.bt = vmw_ctx_binding_shader;
1783 bi.i1.shader_type = cmd->body.type;
1784 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1756} 1785}
1757 1786
1758/** 1787/**
@@ -2394,6 +2423,8 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2394 } 2423 }
2395} 2424}
2396 2425
2426
2427
2397int vmw_execbuf_process(struct drm_file *file_priv, 2428int vmw_execbuf_process(struct drm_file *file_priv,
2398 struct vmw_private *dev_priv, 2429 struct vmw_private *dev_priv,
2399 void __user *user_commands, 2430 void __user *user_commands,
@@ -2453,7 +2484,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2453 goto out_unlock; 2484 goto out_unlock;
2454 sw_context->res_ht_initialized = true; 2485 sw_context->res_ht_initialized = true;
2455 } 2486 }
2456 INIT_LIST_HEAD(&sw_context->staged_shaders); 2487 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
2457 2488
2458 INIT_LIST_HEAD(&resource_list); 2489 INIT_LIST_HEAD(&resource_list);
2459 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, 2490 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
@@ -2548,8 +2579,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2548 } 2579 }
2549 2580
2550 list_splice_init(&sw_context->resource_list, &resource_list); 2581 list_splice_init(&sw_context->resource_list, &resource_list);
2551 vmw_compat_shaders_commit(sw_context->fp->shman, 2582 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
2552 &sw_context->staged_shaders);
2553 mutex_unlock(&dev_priv->cmdbuf_mutex); 2583 mutex_unlock(&dev_priv->cmdbuf_mutex);
2554 2584
2555 /* 2585 /*
@@ -2576,8 +2606,7 @@ out_unlock:
2576 list_splice_init(&sw_context->resource_list, &resource_list); 2606 list_splice_init(&sw_context->resource_list, &resource_list);
2577 error_resource = sw_context->error_resource; 2607 error_resource = sw_context->error_resource;
2578 sw_context->error_resource = NULL; 2608 sw_context->error_resource = NULL;
2579 vmw_compat_shaders_revert(sw_context->fp->shman, 2609 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
2580 &sw_context->staged_shaders);
2581 mutex_unlock(&dev_priv->cmdbuf_mutex); 2610 mutex_unlock(&dev_priv->cmdbuf_mutex);
2582 2611
2583 /* 2612 /*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index c1559eeaffe9..8719fb3cccc9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -29,8 +29,6 @@
29#include "vmwgfx_resource_priv.h" 29#include "vmwgfx_resource_priv.h"
30#include "ttm/ttm_placement.h" 30#include "ttm/ttm_placement.h"
31 31
32#define VMW_COMPAT_SHADER_HT_ORDER 12
33
34struct vmw_shader { 32struct vmw_shader {
35 struct vmw_resource res; 33 struct vmw_resource res;
36 SVGA3dShaderType type; 34 SVGA3dShaderType type;
@@ -42,49 +40,8 @@ struct vmw_user_shader {
42 struct vmw_shader shader; 40 struct vmw_shader shader;
43}; 41};
44 42
45/** 43static uint64_t vmw_user_shader_size;
46 * enum vmw_compat_shader_state - Staging state for compat shaders 44static uint64_t vmw_shader_size;
47 */
48enum vmw_compat_shader_state {
49 VMW_COMPAT_COMMITED,
50 VMW_COMPAT_ADD,
51 VMW_COMPAT_DEL
52};
53
54/**
55 * struct vmw_compat_shader - Metadata for compat shaders.
56 *
57 * @handle: The TTM handle of the guest backed shader.
58 * @tfile: The struct ttm_object_file the guest backed shader is registered
59 * with.
60 * @hash: Hash item for lookup.
61 * @head: List head for staging lists or the compat shader manager list.
62 * @state: Staging state.
63 *
64 * The structure is protected by the cmdbuf lock.
65 */
66struct vmw_compat_shader {
67 u32 handle;
68 struct ttm_object_file *tfile;
69 struct drm_hash_item hash;
70 struct list_head head;
71 enum vmw_compat_shader_state state;
72};
73
74/**
75 * struct vmw_compat_shader_manager - Compat shader manager.
76 *
77 * @shaders: Hash table containing staged and commited compat shaders
78 * @list: List of commited shaders.
79 * @dev_priv: Pointer to a device private structure.
80 *
81 * @shaders and @list are protected by the cmdbuf mutex for now.
82 */
83struct vmw_compat_shader_manager {
84 struct drm_open_hash shaders;
85 struct list_head list;
86 struct vmw_private *dev_priv;
87};
88 45
89static void vmw_user_shader_free(struct vmw_resource *res); 46static void vmw_user_shader_free(struct vmw_resource *res);
90static struct vmw_resource * 47static struct vmw_resource *
@@ -98,8 +55,6 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
98 struct ttm_validate_buffer *val_buf); 55 struct ttm_validate_buffer *val_buf);
99static int vmw_gb_shader_destroy(struct vmw_resource *res); 56static int vmw_gb_shader_destroy(struct vmw_resource *res);
100 57
101static uint64_t vmw_user_shader_size;
102
103static const struct vmw_user_resource_conv user_shader_conv = { 58static const struct vmw_user_resource_conv user_shader_conv = {
104 .object_type = VMW_RES_SHADER, 59 .object_type = VMW_RES_SHADER,
105 .base_obj_to_res = vmw_user_shader_base_to_res, 60 .base_obj_to_res = vmw_user_shader_base_to_res,
@@ -347,6 +302,16 @@ static void vmw_user_shader_free(struct vmw_resource *res)
347 vmw_user_shader_size); 302 vmw_user_shader_size);
348} 303}
349 304
305static void vmw_shader_free(struct vmw_resource *res)
306{
307 struct vmw_shader *shader = vmw_res_to_shader(res);
308 struct vmw_private *dev_priv = res->dev_priv;
309
310 kfree(shader);
311 ttm_mem_global_free(vmw_mem_glob(dev_priv),
312 vmw_shader_size);
313}
314
350/** 315/**
351 * This function is called when user space has no more references on the 316 * This function is called when user space has no more references on the
352 * base object. It releases the base-object's reference on the resource object. 317 * base object. It releases the base-object's reference on the resource object.
@@ -371,13 +336,13 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
371 TTM_REF_USAGE); 336 TTM_REF_USAGE);
372} 337}
373 338
374static int vmw_shader_alloc(struct vmw_private *dev_priv, 339static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
375 struct vmw_dma_buffer *buffer, 340 struct vmw_dma_buffer *buffer,
376 size_t shader_size, 341 size_t shader_size,
377 size_t offset, 342 size_t offset,
378 SVGA3dShaderType shader_type, 343 SVGA3dShaderType shader_type,
379 struct ttm_object_file *tfile, 344 struct ttm_object_file *tfile,
380 u32 *handle) 345 u32 *handle)
381{ 346{
382 struct vmw_user_shader *ushader; 347 struct vmw_user_shader *ushader;
383 struct vmw_resource *res, *tmp; 348 struct vmw_resource *res, *tmp;
@@ -442,6 +407,56 @@ out:
442} 407}
443 408
444 409
410struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
411 struct vmw_dma_buffer *buffer,
412 size_t shader_size,
413 size_t offset,
414 SVGA3dShaderType shader_type)
415{
416 struct vmw_shader *shader;
417 struct vmw_resource *res;
418 int ret;
419
420 /*
421 * Approximate idr memory usage with 128 bytes. It will be limited
422 * by maximum number_of shaders anyway.
423 */
424 if (unlikely(vmw_shader_size == 0))
425 vmw_shader_size =
426 ttm_round_pot(sizeof(struct vmw_shader)) + 128;
427
428 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
429 vmw_shader_size,
430 false, true);
431 if (unlikely(ret != 0)) {
432 if (ret != -ERESTARTSYS)
433 DRM_ERROR("Out of graphics memory for shader "
434 "creation.\n");
435 goto out_err;
436 }
437
438 shader = kzalloc(sizeof(*shader), GFP_KERNEL);
439 if (unlikely(shader == NULL)) {
440 ttm_mem_global_free(vmw_mem_glob(dev_priv),
441 vmw_shader_size);
442 ret = -ENOMEM;
443 goto out_err;
444 }
445
446 res = &shader->res;
447
448 /*
449 * From here on, the destructor takes over resource freeing.
450 */
451 ret = vmw_gb_shader_init(dev_priv, res, shader_size,
452 offset, shader_type, buffer,
453 vmw_shader_free);
454
455out_err:
456 return ret ? ERR_PTR(ret) : res;
457}
458
459
445int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 460int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
446 struct drm_file *file_priv) 461 struct drm_file *file_priv)
447{ 462{
@@ -490,8 +505,8 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
490 if (unlikely(ret != 0)) 505 if (unlikely(ret != 0))
491 goto out_bad_arg; 506 goto out_bad_arg;
492 507
493 ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset, 508 ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
494 shader_type, tfile, &arg->shader_handle); 509 shader_type, tfile, &arg->shader_handle);
495 510
496 ttm_read_unlock(&dev_priv->reservation_sem); 511 ttm_read_unlock(&dev_priv->reservation_sem);
497out_bad_arg: 512out_bad_arg:
@@ -500,202 +515,83 @@ out_bad_arg:
500} 515}
501 516
502/** 517/**
503 * vmw_compat_shader_lookup - Look up a compat shader 518 * vmw_compat_shader_id_ok - Check whether a compat shader user key and
504 * 519 * shader type are within valid bounds.
505 * @man: Pointer to the compat shader manager.
506 * @shader_type: The shader type, that combined with the user_key identifies
507 * the shader.
508 * @user_key: On entry, this should be a pointer to the user_key.
509 * On successful exit, it will contain the guest-backed shader's TTM handle.
510 * 520 *
511 * Returns 0 on success. Non-zero on failure, in which case the value pointed 521 * @user_key: User space id of the shader.
512 * to by @user_key is unmodified. 522 * @shader_type: Shader type.
513 */
514int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
515 SVGA3dShaderType shader_type,
516 u32 *user_key)
517{
518 struct drm_hash_item *hash;
519 int ret;
520 unsigned long key = *user_key | (shader_type << 24);
521
522 ret = drm_ht_find_item(&man->shaders, key, &hash);
523 if (unlikely(ret != 0))
524 return ret;
525
526 *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
527 hash)->handle;
528
529 return 0;
530}
531
532/**
533 * vmw_compat_shader_free - Free a compat shader.
534 *
535 * @man: Pointer to the compat shader manager.
536 * @entry: Pointer to a struct vmw_compat_shader.
537 *
538 * Frees a struct vmw_compat_shder entry and drops its reference to the
539 * guest backed shader.
540 */
541static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
542 struct vmw_compat_shader *entry)
543{
544 list_del(&entry->head);
545 WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
546 WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
547 TTM_REF_USAGE));
548 kfree(entry);
549}
550
551/**
552 * vmw_compat_shaders_commit - Commit a list of compat shader actions.
553 *
554 * @man: Pointer to the compat shader manager.
555 * @list: Caller's list of compat shader actions.
556 * 523 *
557 * This function commits a list of compat shader additions or removals. 524 * Returns true if valid false if not.
558 * It is typically called when the execbuf ioctl call triggering these
559 * actions has commited the fifo contents to the device.
560 */ 525 */
561void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, 526static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
562 struct list_head *list)
563{ 527{
564 struct vmw_compat_shader *entry, *next; 528 return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
565
566 list_for_each_entry_safe(entry, next, list, head) {
567 list_del(&entry->head);
568 switch (entry->state) {
569 case VMW_COMPAT_ADD:
570 entry->state = VMW_COMPAT_COMMITED;
571 list_add_tail(&entry->head, &man->list);
572 break;
573 case VMW_COMPAT_DEL:
574 ttm_ref_object_base_unref(entry->tfile, entry->handle,
575 TTM_REF_USAGE);
576 kfree(entry);
577 break;
578 default:
579 BUG();
580 break;
581 }
582 }
583} 529}
584 530
585/** 531/**
586 * vmw_compat_shaders_revert - Revert a list of compat shader actions 532 * vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
587 * 533 *
588 * @man: Pointer to the compat shader manager. 534 * @user_key: User space id of the shader.
589 * @list: Caller's list of compat shader actions. 535 * @shader_type: Shader type.
590 * 536 *
591 * This function reverts a list of compat shader additions or removals. 537 * Returns a hash key suitable for a command buffer managed resource
592 * It is typically called when the execbuf ioctl call triggering these 538 * manager hash table.
593 * actions failed for some reason, and the command stream was never
594 * submitted.
595 */ 539 */
596void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, 540static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
597 struct list_head *list)
598{ 541{
599 struct vmw_compat_shader *entry, *next; 542 return user_key | (shader_type << 20);
600 int ret;
601
602 list_for_each_entry_safe(entry, next, list, head) {
603 switch (entry->state) {
604 case VMW_COMPAT_ADD:
605 vmw_compat_shader_free(man, entry);
606 break;
607 case VMW_COMPAT_DEL:
608 ret = drm_ht_insert_item(&man->shaders, &entry->hash);
609 list_del(&entry->head);
610 list_add_tail(&entry->head, &man->list);
611 entry->state = VMW_COMPAT_COMMITED;
612 break;
613 default:
614 BUG();
615 break;
616 }
617 }
618} 543}
619 544
620/** 545/**
621 * vmw_compat_shader_remove - Stage a compat shader for removal. 546 * vmw_compat_shader_remove - Stage a compat shader for removal.
622 * 547 *
623 * @man: Pointer to the compat shader manager 548 * @man: Pointer to the compat shader manager identifying the shader namespace.
624 * @user_key: The key that is used to identify the shader. The key is 549 * @user_key: The key that is used to identify the shader. The key is
625 * unique to the shader type. 550 * unique to the shader type.
626 * @shader_type: Shader type. 551 * @shader_type: Shader type.
627 * @list: Caller's list of staged shader actions. 552 * @list: Caller's list of staged command buffer resource actions.
628 *
629 * This function stages a compat shader for removal and removes the key from
630 * the shader manager's hash table. If the shader was previously only staged
631 * for addition it is completely removed (But the execbuf code may keep a
632 * reference if it was bound to a context between addition and removal). If
633 * it was previously commited to the manager, it is staged for removal.
634 */ 553 */
635int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, 554int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
636 u32 user_key, SVGA3dShaderType shader_type, 555 u32 user_key, SVGA3dShaderType shader_type,
637 struct list_head *list) 556 struct list_head *list)
638{ 557{
639 struct vmw_compat_shader *entry; 558 if (!vmw_compat_shader_id_ok(user_key, shader_type))
640 struct drm_hash_item *hash;
641 int ret;
642
643 ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
644 &hash);
645 if (likely(ret != 0))
646 return -EINVAL; 559 return -EINVAL;
647 560
648 entry = drm_hash_entry(hash, struct vmw_compat_shader, hash); 561 return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
649 562 vmw_compat_shader_key(user_key,
650 switch (entry->state) { 563 shader_type),
651 case VMW_COMPAT_ADD: 564 list);
652 vmw_compat_shader_free(man, entry);
653 break;
654 case VMW_COMPAT_COMMITED:
655 (void) drm_ht_remove_item(&man->shaders, &entry->hash);
656 list_del(&entry->head);
657 entry->state = VMW_COMPAT_DEL;
658 list_add_tail(&entry->head, list);
659 break;
660 default:
661 BUG();
662 break;
663 }
664
665 return 0;
666} 565}
667 566
668/** 567/**
669 * vmw_compat_shader_add - Create a compat shader and add the 568 * vmw_compat_shader_add - Create a compat shader and stage it for addition
670 * key to the manager 569 * as a command buffer managed resource.
671 * 570 *
672 * @man: Pointer to the compat shader manager 571 * @man: Pointer to the compat shader manager identifying the shader namespace.
673 * @user_key: The key that is used to identify the shader. The key is 572 * @user_key: The key that is used to identify the shader. The key is
674 * unique to the shader type. 573 * unique to the shader type.
675 * @bytecode: Pointer to the bytecode of the shader. 574 * @bytecode: Pointer to the bytecode of the shader.
676 * @shader_type: Shader type. 575 * @shader_type: Shader type.
677 * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is 576 * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
678 * to be created with. 577 * to be created with.
679 * @list: Caller's list of staged shader actions. 578 * @list: Caller's list of staged command buffer resource actions.
680 * 579 *
681 * Note that only the key is added to the shader manager's hash table.
682 * The shader is not yet added to the shader manager's list of shaders.
683 */ 580 */
684int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, 581int vmw_compat_shader_add(struct vmw_private *dev_priv,
582 struct vmw_cmdbuf_res_manager *man,
685 u32 user_key, const void *bytecode, 583 u32 user_key, const void *bytecode,
686 SVGA3dShaderType shader_type, 584 SVGA3dShaderType shader_type,
687 size_t size, 585 size_t size,
688 struct ttm_object_file *tfile,
689 struct list_head *list) 586 struct list_head *list)
690{ 587{
691 struct vmw_dma_buffer *buf; 588 struct vmw_dma_buffer *buf;
692 struct ttm_bo_kmap_obj map; 589 struct ttm_bo_kmap_obj map;
693 bool is_iomem; 590 bool is_iomem;
694 struct vmw_compat_shader *compat;
695 u32 handle;
696 int ret; 591 int ret;
592 struct vmw_resource *res;
697 593
698 if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16) 594 if (!vmw_compat_shader_id_ok(user_key, shader_type))
699 return -EINVAL; 595 return -EINVAL;
700 596
701 /* Allocate and pin a DMA buffer */ 597 /* Allocate and pin a DMA buffer */
@@ -703,7 +599,7 @@ int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
703 if (unlikely(buf == NULL)) 599 if (unlikely(buf == NULL))
704 return -ENOMEM; 600 return -ENOMEM;
705 601
706 ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement, 602 ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
707 true, vmw_dmabuf_bo_free); 603 true, vmw_dmabuf_bo_free);
708 if (unlikely(ret != 0)) 604 if (unlikely(ret != 0))
709 goto out; 605 goto out;
@@ -728,84 +624,40 @@ int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
728 WARN_ON(ret != 0); 624 WARN_ON(ret != 0);
729 ttm_bo_unreserve(&buf->base); 625 ttm_bo_unreserve(&buf->base);
730 626
731 /* Create a guest-backed shader container backed by the dma buffer */ 627 res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
732 ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
733 tfile, &handle);
734 vmw_dmabuf_unreference(&buf);
735 if (unlikely(ret != 0)) 628 if (unlikely(ret != 0))
736 goto no_reserve; 629 goto no_reserve;
737 /*
738 * Create a compat shader structure and stage it for insertion
739 * in the manager
740 */
741 compat = kzalloc(sizeof(*compat), GFP_KERNEL);
742 if (compat == NULL)
743 goto no_compat;
744
745 compat->hash.key = user_key | (shader_type << 24);
746 ret = drm_ht_insert_item(&man->shaders, &compat->hash);
747 if (unlikely(ret != 0))
748 goto out_invalid_key;
749
750 compat->state = VMW_COMPAT_ADD;
751 compat->handle = handle;
752 compat->tfile = tfile;
753 list_add_tail(&compat->head, list);
754
755 return 0;
756 630
757out_invalid_key: 631 ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
758 kfree(compat); 632 vmw_compat_shader_key(user_key, shader_type),
759no_compat: 633 res, list);
760 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); 634 vmw_resource_unreference(&res);
761no_reserve: 635no_reserve:
636 vmw_dmabuf_unreference(&buf);
762out: 637out:
763 return ret; 638 return ret;
764} 639}
765 640
766/** 641/**
767 * vmw_compat_shader_man_create - Create a compat shader manager 642 * vmw_compat_shader_lookup - Look up a compat shader
768 *
769 * @dev_priv: Pointer to a device private structure.
770 *
771 * Typically done at file open time. If successful returns a pointer to a
772 * compat shader manager. Otherwise returns an error pointer.
773 */
774struct vmw_compat_shader_manager *
775vmw_compat_shader_man_create(struct vmw_private *dev_priv)
776{
777 struct vmw_compat_shader_manager *man;
778 int ret;
779
780 man = kzalloc(sizeof(*man), GFP_KERNEL);
781 if (man == NULL)
782 return ERR_PTR(-ENOMEM);
783
784 man->dev_priv = dev_priv;
785 INIT_LIST_HEAD(&man->list);
786 ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
787 if (ret == 0)
788 return man;
789
790 kfree(man);
791 return ERR_PTR(ret);
792}
793
794/**
795 * vmw_compat_shader_man_destroy - Destroy a compat shader manager
796 * 643 *
797 * @man: Pointer to the shader manager to destroy. 644 * @man: Pointer to the command buffer managed resource manager identifying
645 * the shader namespace.
646 * @user_key: The user space id of the shader.
647 * @shader_type: The shader type.
798 * 648 *
799 * Typically done at file close time. 649 * Returns a refcounted pointer to a struct vmw_resource if the shader was
650 * found. An error pointer otherwise.
800 */ 651 */
801void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man) 652struct vmw_resource *
653vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
654 u32 user_key,
655 SVGA3dShaderType shader_type)
802{ 656{
803 struct vmw_compat_shader *entry, *next; 657 if (!vmw_compat_shader_id_ok(user_key, shader_type))
804 658 return ERR_PTR(-EINVAL);
805 mutex_lock(&man->dev_priv->cmdbuf_mutex);
806 list_for_each_entry_safe(entry, next, &man->list, head)
807 vmw_compat_shader_free(man, entry);
808 659
809 mutex_unlock(&man->dev_priv->cmdbuf_mutex); 660 return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
810 kfree(man); 661 vmw_compat_shader_key(user_key,
662 shader_type));
811} 663}