diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 2019 |
1 files changed, 663 insertions, 1356 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index da3c6b5b98a1..e01a17b407b2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -30,17 +30,7 @@ | |||
30 | #include <drm/ttm/ttm_object.h> | 30 | #include <drm/ttm/ttm_object.h> |
31 | #include <drm/ttm/ttm_placement.h> | 31 | #include <drm/ttm/ttm_placement.h> |
32 | #include <drm/drmP.h> | 32 | #include <drm/drmP.h> |
33 | 33 | #include "vmwgfx_resource_priv.h" | |
34 | struct vmw_user_context { | ||
35 | struct ttm_base_object base; | ||
36 | struct vmw_resource res; | ||
37 | }; | ||
38 | |||
39 | struct vmw_user_surface { | ||
40 | struct ttm_base_object base; | ||
41 | struct vmw_surface srf; | ||
42 | uint32_t size; | ||
43 | }; | ||
44 | 34 | ||
45 | struct vmw_user_dma_buffer { | 35 | struct vmw_user_dma_buffer { |
46 | struct ttm_base_object base; | 36 | struct ttm_base_object base; |
@@ -62,17 +52,21 @@ struct vmw_user_stream { | |||
62 | struct vmw_stream stream; | 52 | struct vmw_stream stream; |
63 | }; | 53 | }; |
64 | 54 | ||
65 | struct vmw_surface_offset { | ||
66 | uint32_t face; | ||
67 | uint32_t mip; | ||
68 | uint32_t bo_offset; | ||
69 | }; | ||
70 | 55 | ||
71 | |||
72 | static uint64_t vmw_user_context_size; | ||
73 | static uint64_t vmw_user_surface_size; | ||
74 | static uint64_t vmw_user_stream_size; | 56 | static uint64_t vmw_user_stream_size; |
75 | 57 | ||
58 | static const struct vmw_res_func vmw_stream_func = { | ||
59 | .res_type = vmw_res_stream, | ||
60 | .needs_backup = false, | ||
61 | .may_evict = false, | ||
62 | .type_name = "video streams", | ||
63 | .backup_placement = NULL, | ||
64 | .create = NULL, | ||
65 | .destroy = NULL, | ||
66 | .bind = NULL, | ||
67 | .unbind = NULL | ||
68 | }; | ||
69 | |||
76 | static inline struct vmw_dma_buffer * | 70 | static inline struct vmw_dma_buffer * |
77 | vmw_dma_buffer(struct ttm_buffer_object *bo) | 71 | vmw_dma_buffer(struct ttm_buffer_object *bo) |
78 | { | 72 | { |
@@ -100,13 +94,14 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) | |||
100 | * | 94 | * |
101 | * Release the resource id to the resource id manager and set it to -1 | 95 | * Release the resource id to the resource id manager and set it to -1 |
102 | */ | 96 | */ |
103 | static void vmw_resource_release_id(struct vmw_resource *res) | 97 | void vmw_resource_release_id(struct vmw_resource *res) |
104 | { | 98 | { |
105 | struct vmw_private *dev_priv = res->dev_priv; | 99 | struct vmw_private *dev_priv = res->dev_priv; |
100 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; | ||
106 | 101 | ||
107 | write_lock(&dev_priv->resource_lock); | 102 | write_lock(&dev_priv->resource_lock); |
108 | if (res->id != -1) | 103 | if (res->id != -1) |
109 | idr_remove(res->idr, res->id); | 104 | idr_remove(idr, res->id); |
110 | res->id = -1; | 105 | res->id = -1; |
111 | write_unlock(&dev_priv->resource_lock); | 106 | write_unlock(&dev_priv->resource_lock); |
112 | } | 107 | } |
@@ -116,17 +111,33 @@ static void vmw_resource_release(struct kref *kref) | |||
116 | struct vmw_resource *res = | 111 | struct vmw_resource *res = |
117 | container_of(kref, struct vmw_resource, kref); | 112 | container_of(kref, struct vmw_resource, kref); |
118 | struct vmw_private *dev_priv = res->dev_priv; | 113 | struct vmw_private *dev_priv = res->dev_priv; |
119 | int id = res->id; | 114 | int id; |
120 | struct idr *idr = res->idr; | 115 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
121 | 116 | ||
122 | res->avail = false; | 117 | res->avail = false; |
123 | if (res->remove_from_lists != NULL) | 118 | list_del_init(&res->lru_head); |
124 | res->remove_from_lists(res); | ||
125 | write_unlock(&dev_priv->resource_lock); | 119 | write_unlock(&dev_priv->resource_lock); |
120 | if (res->backup) { | ||
121 | struct ttm_buffer_object *bo = &res->backup->base; | ||
122 | |||
123 | ttm_bo_reserve(bo, false, false, false, 0); | ||
124 | if (!list_empty(&res->mob_head) && | ||
125 | res->func->unbind != NULL) { | ||
126 | struct ttm_validate_buffer val_buf; | ||
127 | |||
128 | val_buf.bo = bo; | ||
129 | res->func->unbind(res, false, &val_buf); | ||
130 | } | ||
131 | res->backup_dirty = false; | ||
132 | list_del_init(&res->mob_head); | ||
133 | ttm_bo_unreserve(bo); | ||
134 | vmw_dmabuf_unreference(&res->backup); | ||
135 | } | ||
126 | 136 | ||
127 | if (likely(res->hw_destroy != NULL)) | 137 | if (likely(res->hw_destroy != NULL)) |
128 | res->hw_destroy(res); | 138 | res->hw_destroy(res); |
129 | 139 | ||
140 | id = res->id; | ||
130 | if (res->res_free != NULL) | 141 | if (res->res_free != NULL) |
131 | res->res_free(res); | 142 | res->res_free(res); |
132 | else | 143 | else |
@@ -153,25 +164,25 @@ void vmw_resource_unreference(struct vmw_resource **p_res) | |||
153 | /** | 164 | /** |
154 | * vmw_resource_alloc_id - release a resource id to the id manager. | 165 | * vmw_resource_alloc_id - release a resource id to the id manager. |
155 | * | 166 | * |
156 | * @dev_priv: Pointer to the device private structure. | ||
157 | * @res: Pointer to the resource. | 167 | * @res: Pointer to the resource. |
158 | * | 168 | * |
159 | * Allocate the lowest free resource from the resource manager, and set | 169 | * Allocate the lowest free resource from the resource manager, and set |
160 | * @res->id to that id. Returns 0 on success and -ENOMEM on failure. | 170 | * @res->id to that id. Returns 0 on success and -ENOMEM on failure. |
161 | */ | 171 | */ |
162 | static int vmw_resource_alloc_id(struct vmw_private *dev_priv, | 172 | int vmw_resource_alloc_id(struct vmw_resource *res) |
163 | struct vmw_resource *res) | ||
164 | { | 173 | { |
174 | struct vmw_private *dev_priv = res->dev_priv; | ||
165 | int ret; | 175 | int ret; |
176 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; | ||
166 | 177 | ||
167 | BUG_ON(res->id != -1); | 178 | BUG_ON(res->id != -1); |
168 | 179 | ||
169 | do { | 180 | do { |
170 | if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0)) | 181 | if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) |
171 | return -ENOMEM; | 182 | return -ENOMEM; |
172 | 183 | ||
173 | write_lock(&dev_priv->resource_lock); | 184 | write_lock(&dev_priv->resource_lock); |
174 | ret = idr_get_new_above(res->idr, res, 1, &res->id); | 185 | ret = idr_get_new_above(idr, res, 1, &res->id); |
175 | write_unlock(&dev_priv->resource_lock); | 186 | write_unlock(&dev_priv->resource_lock); |
176 | 187 | ||
177 | } while (ret == -EAGAIN); | 188 | } while (ret == -EAGAIN); |
@@ -179,31 +190,39 @@ static int vmw_resource_alloc_id(struct vmw_private *dev_priv, | |||
179 | return ret; | 190 | return ret; |
180 | } | 191 | } |
181 | 192 | ||
182 | 193 | /** | |
183 | static int vmw_resource_init(struct vmw_private *dev_priv, | 194 | * vmw_resource_init - initialize a struct vmw_resource |
184 | struct vmw_resource *res, | 195 | * |
185 | struct idr *idr, | 196 | * @dev_priv: Pointer to a device private struct. |
186 | enum ttm_object_type obj_type, | 197 | * @res: The struct vmw_resource to initialize. |
187 | bool delay_id, | 198 | * @obj_type: Resource object type. |
188 | void (*res_free) (struct vmw_resource *res), | 199 | * @delay_id: Boolean whether to defer device id allocation until |
189 | void (*remove_from_lists) | 200 | * the first validation. |
190 | (struct vmw_resource *res)) | 201 | * @res_free: Resource destructor. |
202 | * @func: Resource function table. | ||
203 | */ | ||
204 | int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, | ||
205 | bool delay_id, | ||
206 | void (*res_free) (struct vmw_resource *res), | ||
207 | const struct vmw_res_func *func) | ||
191 | { | 208 | { |
192 | kref_init(&res->kref); | 209 | kref_init(&res->kref); |
193 | res->hw_destroy = NULL; | 210 | res->hw_destroy = NULL; |
194 | res->res_free = res_free; | 211 | res->res_free = res_free; |
195 | res->remove_from_lists = remove_from_lists; | ||
196 | res->res_type = obj_type; | ||
197 | res->idr = idr; | ||
198 | res->avail = false; | 212 | res->avail = false; |
199 | res->dev_priv = dev_priv; | 213 | res->dev_priv = dev_priv; |
200 | INIT_LIST_HEAD(&res->query_head); | 214 | res->func = func; |
201 | INIT_LIST_HEAD(&res->validate_head); | 215 | INIT_LIST_HEAD(&res->lru_head); |
216 | INIT_LIST_HEAD(&res->mob_head); | ||
202 | res->id = -1; | 217 | res->id = -1; |
218 | res->backup = NULL; | ||
219 | res->backup_offset = 0; | ||
220 | res->backup_dirty = false; | ||
221 | res->res_dirty = false; | ||
203 | if (delay_id) | 222 | if (delay_id) |
204 | return 0; | 223 | return 0; |
205 | else | 224 | else |
206 | return vmw_resource_alloc_id(dev_priv, res); | 225 | return vmw_resource_alloc_id(res); |
207 | } | 226 | } |
208 | 227 | ||
209 | /** | 228 | /** |
@@ -218,9 +237,8 @@ static int vmw_resource_init(struct vmw_private *dev_priv, | |||
218 | * Activate basically means that the function vmw_resource_lookup will | 237 | * Activate basically means that the function vmw_resource_lookup will |
219 | * find it. | 238 | * find it. |
220 | */ | 239 | */ |
221 | 240 | void vmw_resource_activate(struct vmw_resource *res, | |
222 | static void vmw_resource_activate(struct vmw_resource *res, | 241 | void (*hw_destroy) (struct vmw_resource *)) |
223 | void (*hw_destroy) (struct vmw_resource *)) | ||
224 | { | 242 | { |
225 | struct vmw_private *dev_priv = res->dev_priv; | 243 | struct vmw_private *dev_priv = res->dev_priv; |
226 | 244 | ||
@@ -250,994 +268,41 @@ struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, | |||
250 | } | 268 | } |
251 | 269 | ||
252 | /** | 270 | /** |
253 | * Context management: | 271 | * vmw_user_resource_lookup_handle - lookup a struct resource from a |
254 | */ | 272 | * TTM user-space handle and perform basic type checks |
255 | |||
256 | static void vmw_hw_context_destroy(struct vmw_resource *res) | ||
257 | { | ||
258 | |||
259 | struct vmw_private *dev_priv = res->dev_priv; | ||
260 | struct { | ||
261 | SVGA3dCmdHeader header; | ||
262 | SVGA3dCmdDestroyContext body; | ||
263 | } *cmd; | ||
264 | |||
265 | |||
266 | vmw_execbuf_release_pinned_bo(dev_priv, true, res->id); | ||
267 | |||
268 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
269 | if (unlikely(cmd == NULL)) { | ||
270 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
271 | "destruction.\n"); | ||
272 | return; | ||
273 | } | ||
274 | |||
275 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY); | ||
276 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); | ||
277 | cmd->body.cid = cpu_to_le32(res->id); | ||
278 | |||
279 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
280 | vmw_3d_resource_dec(dev_priv, false); | ||
281 | } | ||
282 | |||
283 | static int vmw_context_init(struct vmw_private *dev_priv, | ||
284 | struct vmw_resource *res, | ||
285 | void (*res_free) (struct vmw_resource *res)) | ||
286 | { | ||
287 | int ret; | ||
288 | |||
289 | struct { | ||
290 | SVGA3dCmdHeader header; | ||
291 | SVGA3dCmdDefineContext body; | ||
292 | } *cmd; | ||
293 | |||
294 | ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr, | ||
295 | VMW_RES_CONTEXT, false, res_free, NULL); | ||
296 | |||
297 | if (unlikely(ret != 0)) { | ||
298 | DRM_ERROR("Failed to allocate a resource id.\n"); | ||
299 | goto out_early; | ||
300 | } | ||
301 | |||
302 | if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) { | ||
303 | DRM_ERROR("Out of hw context ids.\n"); | ||
304 | vmw_resource_unreference(&res); | ||
305 | return -ENOMEM; | ||
306 | } | ||
307 | |||
308 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
309 | if (unlikely(cmd == NULL)) { | ||
310 | DRM_ERROR("Fifo reserve failed.\n"); | ||
311 | vmw_resource_unreference(&res); | ||
312 | return -ENOMEM; | ||
313 | } | ||
314 | |||
315 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE); | ||
316 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); | ||
317 | cmd->body.cid = cpu_to_le32(res->id); | ||
318 | |||
319 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
320 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
321 | vmw_resource_activate(res, vmw_hw_context_destroy); | ||
322 | return 0; | ||
323 | |||
324 | out_early: | ||
325 | if (res_free == NULL) | ||
326 | kfree(res); | ||
327 | else | ||
328 | res_free(res); | ||
329 | return ret; | ||
330 | } | ||
331 | |||
332 | struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) | ||
333 | { | ||
334 | struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL); | ||
335 | int ret; | ||
336 | |||
337 | if (unlikely(res == NULL)) | ||
338 | return NULL; | ||
339 | |||
340 | ret = vmw_context_init(dev_priv, res, NULL); | ||
341 | return (ret == 0) ? res : NULL; | ||
342 | } | ||
343 | |||
344 | /** | ||
345 | * User-space context management: | ||
346 | */ | ||
347 | |||
348 | static void vmw_user_context_free(struct vmw_resource *res) | ||
349 | { | ||
350 | struct vmw_user_context *ctx = | ||
351 | container_of(res, struct vmw_user_context, res); | ||
352 | struct vmw_private *dev_priv = res->dev_priv; | ||
353 | |||
354 | kfree(ctx); | ||
355 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
356 | vmw_user_context_size); | ||
357 | } | ||
358 | |||
359 | /** | ||
360 | * This function is called when user space has no more references on the | ||
361 | * base object. It releases the base-object's reference on the resource object. | ||
362 | */ | ||
363 | |||
364 | static void vmw_user_context_base_release(struct ttm_base_object **p_base) | ||
365 | { | ||
366 | struct ttm_base_object *base = *p_base; | ||
367 | struct vmw_user_context *ctx = | ||
368 | container_of(base, struct vmw_user_context, base); | ||
369 | struct vmw_resource *res = &ctx->res; | ||
370 | |||
371 | *p_base = NULL; | ||
372 | vmw_resource_unreference(&res); | ||
373 | } | ||
374 | |||
375 | int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | ||
376 | struct drm_file *file_priv) | ||
377 | { | ||
378 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
379 | struct vmw_resource *res; | ||
380 | struct vmw_user_context *ctx; | ||
381 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; | ||
382 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
383 | int ret = 0; | ||
384 | |||
385 | res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid); | ||
386 | if (unlikely(res == NULL)) | ||
387 | return -EINVAL; | ||
388 | |||
389 | if (res->res_free != &vmw_user_context_free) { | ||
390 | ret = -EINVAL; | ||
391 | goto out; | ||
392 | } | ||
393 | |||
394 | ctx = container_of(res, struct vmw_user_context, res); | ||
395 | if (ctx->base.tfile != tfile && !ctx->base.shareable) { | ||
396 | ret = -EPERM; | ||
397 | goto out; | ||
398 | } | ||
399 | |||
400 | ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE); | ||
401 | out: | ||
402 | vmw_resource_unreference(&res); | ||
403 | return ret; | ||
404 | } | ||
405 | |||
406 | int vmw_context_define_ioctl(struct drm_device *dev, void *data, | ||
407 | struct drm_file *file_priv) | ||
408 | { | ||
409 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
410 | struct vmw_user_context *ctx; | ||
411 | struct vmw_resource *res; | ||
412 | struct vmw_resource *tmp; | ||
413 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; | ||
414 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
415 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
416 | int ret; | ||
417 | |||
418 | |||
419 | /* | ||
420 | * Approximate idr memory usage with 128 bytes. It will be limited | ||
421 | * by maximum number_of contexts anyway. | ||
422 | */ | ||
423 | |||
424 | if (unlikely(vmw_user_context_size == 0)) | ||
425 | vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128; | ||
426 | |||
427 | ret = ttm_read_lock(&vmaster->lock, true); | ||
428 | if (unlikely(ret != 0)) | ||
429 | return ret; | ||
430 | |||
431 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
432 | vmw_user_context_size, | ||
433 | false, true); | ||
434 | if (unlikely(ret != 0)) { | ||
435 | if (ret != -ERESTARTSYS) | ||
436 | DRM_ERROR("Out of graphics memory for context" | ||
437 | " creation.\n"); | ||
438 | goto out_unlock; | ||
439 | } | ||
440 | |||
441 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); | ||
442 | if (unlikely(ctx == NULL)) { | ||
443 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
444 | vmw_user_context_size); | ||
445 | ret = -ENOMEM; | ||
446 | goto out_unlock; | ||
447 | } | ||
448 | |||
449 | res = &ctx->res; | ||
450 | ctx->base.shareable = false; | ||
451 | ctx->base.tfile = NULL; | ||
452 | |||
453 | /* | ||
454 | * From here on, the destructor takes over resource freeing. | ||
455 | */ | ||
456 | |||
457 | ret = vmw_context_init(dev_priv, res, vmw_user_context_free); | ||
458 | if (unlikely(ret != 0)) | ||
459 | goto out_unlock; | ||
460 | |||
461 | tmp = vmw_resource_reference(&ctx->res); | ||
462 | ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, | ||
463 | &vmw_user_context_base_release, NULL); | ||
464 | |||
465 | if (unlikely(ret != 0)) { | ||
466 | vmw_resource_unreference(&tmp); | ||
467 | goto out_err; | ||
468 | } | ||
469 | |||
470 | arg->cid = res->id; | ||
471 | out_err: | ||
472 | vmw_resource_unreference(&res); | ||
473 | out_unlock: | ||
474 | ttm_read_unlock(&vmaster->lock); | ||
475 | return ret; | ||
476 | |||
477 | } | ||
478 | |||
479 | int vmw_context_check(struct vmw_private *dev_priv, | ||
480 | struct ttm_object_file *tfile, | ||
481 | int id, | ||
482 | struct vmw_resource **p_res) | ||
483 | { | ||
484 | struct vmw_resource *res; | ||
485 | int ret = 0; | ||
486 | |||
487 | read_lock(&dev_priv->resource_lock); | ||
488 | res = idr_find(&dev_priv->context_idr, id); | ||
489 | if (res && res->avail) { | ||
490 | struct vmw_user_context *ctx = | ||
491 | container_of(res, struct vmw_user_context, res); | ||
492 | if (ctx->base.tfile != tfile && !ctx->base.shareable) | ||
493 | ret = -EPERM; | ||
494 | if (p_res) | ||
495 | *p_res = vmw_resource_reference(res); | ||
496 | } else | ||
497 | ret = -EINVAL; | ||
498 | read_unlock(&dev_priv->resource_lock); | ||
499 | |||
500 | return ret; | ||
501 | } | ||
502 | |||
503 | struct vmw_bpp { | ||
504 | uint8_t bpp; | ||
505 | uint8_t s_bpp; | ||
506 | }; | ||
507 | |||
508 | /* | ||
509 | * Size table for the supported SVGA3D surface formats. It consists of | ||
510 | * two values. The bpp value and the s_bpp value which is short for | ||
511 | * "stride bits per pixel" The values are given in such a way that the | ||
512 | * minimum stride for the image is calculated using | ||
513 | * | ||
514 | * min_stride = w*s_bpp | ||
515 | * | ||
516 | * and the total memory requirement for the image is | ||
517 | * | ||
518 | * h*min_stride*bpp/s_bpp | ||
519 | * | ||
520 | */ | ||
521 | static const struct vmw_bpp vmw_sf_bpp[] = { | ||
522 | [SVGA3D_FORMAT_INVALID] = {0, 0}, | ||
523 | [SVGA3D_X8R8G8B8] = {32, 32}, | ||
524 | [SVGA3D_A8R8G8B8] = {32, 32}, | ||
525 | [SVGA3D_R5G6B5] = {16, 16}, | ||
526 | [SVGA3D_X1R5G5B5] = {16, 16}, | ||
527 | [SVGA3D_A1R5G5B5] = {16, 16}, | ||
528 | [SVGA3D_A4R4G4B4] = {16, 16}, | ||
529 | [SVGA3D_Z_D32] = {32, 32}, | ||
530 | [SVGA3D_Z_D16] = {16, 16}, | ||
531 | [SVGA3D_Z_D24S8] = {32, 32}, | ||
532 | [SVGA3D_Z_D15S1] = {16, 16}, | ||
533 | [SVGA3D_LUMINANCE8] = {8, 8}, | ||
534 | [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8}, | ||
535 | [SVGA3D_LUMINANCE16] = {16, 16}, | ||
536 | [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16}, | ||
537 | [SVGA3D_DXT1] = {4, 16}, | ||
538 | [SVGA3D_DXT2] = {8, 32}, | ||
539 | [SVGA3D_DXT3] = {8, 32}, | ||
540 | [SVGA3D_DXT4] = {8, 32}, | ||
541 | [SVGA3D_DXT5] = {8, 32}, | ||
542 | [SVGA3D_BUMPU8V8] = {16, 16}, | ||
543 | [SVGA3D_BUMPL6V5U5] = {16, 16}, | ||
544 | [SVGA3D_BUMPX8L8V8U8] = {32, 32}, | ||
545 | [SVGA3D_ARGB_S10E5] = {16, 16}, | ||
546 | [SVGA3D_ARGB_S23E8] = {32, 32}, | ||
547 | [SVGA3D_A2R10G10B10] = {32, 32}, | ||
548 | [SVGA3D_V8U8] = {16, 16}, | ||
549 | [SVGA3D_Q8W8V8U8] = {32, 32}, | ||
550 | [SVGA3D_CxV8U8] = {16, 16}, | ||
551 | [SVGA3D_X8L8V8U8] = {32, 32}, | ||
552 | [SVGA3D_A2W10V10U10] = {32, 32}, | ||
553 | [SVGA3D_ALPHA8] = {8, 8}, | ||
554 | [SVGA3D_R_S10E5] = {16, 16}, | ||
555 | [SVGA3D_R_S23E8] = {32, 32}, | ||
556 | [SVGA3D_RG_S10E5] = {16, 16}, | ||
557 | [SVGA3D_RG_S23E8] = {32, 32}, | ||
558 | [SVGA3D_BUFFER] = {8, 8}, | ||
559 | [SVGA3D_Z_D24X8] = {32, 32}, | ||
560 | [SVGA3D_V16U16] = {32, 32}, | ||
561 | [SVGA3D_G16R16] = {32, 32}, | ||
562 | [SVGA3D_A16B16G16R16] = {64, 64}, | ||
563 | [SVGA3D_UYVY] = {12, 12}, | ||
564 | [SVGA3D_YUY2] = {12, 12}, | ||
565 | [SVGA3D_NV12] = {12, 8}, | ||
566 | [SVGA3D_AYUV] = {32, 32}, | ||
567 | [SVGA3D_BC4_UNORM] = {4, 16}, | ||
568 | [SVGA3D_BC5_UNORM] = {8, 32}, | ||
569 | [SVGA3D_Z_DF16] = {16, 16}, | ||
570 | [SVGA3D_Z_DF24] = {24, 24}, | ||
571 | [SVGA3D_Z_D24S8_INT] = {32, 32} | ||
572 | }; | ||
573 | |||
574 | |||
575 | /** | ||
576 | * Surface management. | ||
577 | */ | ||
578 | |||
579 | struct vmw_surface_dma { | ||
580 | SVGA3dCmdHeader header; | ||
581 | SVGA3dCmdSurfaceDMA body; | ||
582 | SVGA3dCopyBox cb; | ||
583 | SVGA3dCmdSurfaceDMASuffix suffix; | ||
584 | }; | ||
585 | |||
586 | struct vmw_surface_define { | ||
587 | SVGA3dCmdHeader header; | ||
588 | SVGA3dCmdDefineSurface body; | ||
589 | }; | ||
590 | |||
591 | struct vmw_surface_destroy { | ||
592 | SVGA3dCmdHeader header; | ||
593 | SVGA3dCmdDestroySurface body; | ||
594 | }; | ||
595 | |||
596 | |||
597 | /** | ||
598 | * vmw_surface_dma_size - Compute fifo size for a dma command. | ||
599 | * | ||
600 | * @srf: Pointer to a struct vmw_surface | ||
601 | * | ||
602 | * Computes the required size for a surface dma command for backup or | ||
603 | * restoration of the surface represented by @srf. | ||
604 | */ | ||
605 | static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf) | ||
606 | { | ||
607 | return srf->num_sizes * sizeof(struct vmw_surface_dma); | ||
608 | } | ||
609 | |||
610 | |||
611 | /** | ||
612 | * vmw_surface_define_size - Compute fifo size for a surface define command. | ||
613 | * | ||
614 | * @srf: Pointer to a struct vmw_surface | ||
615 | * | ||
616 | * Computes the required size for a surface define command for the definition | ||
617 | * of the surface represented by @srf. | ||
618 | */ | ||
619 | static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf) | ||
620 | { | ||
621 | return sizeof(struct vmw_surface_define) + srf->num_sizes * | ||
622 | sizeof(SVGA3dSize); | ||
623 | } | ||
624 | |||
625 | |||
626 | /** | ||
627 | * vmw_surface_destroy_size - Compute fifo size for a surface destroy command. | ||
628 | * | 273 | * |
629 | * Computes the required size for a surface destroy command for the destruction | 274 | * @dev_priv: Pointer to a device private struct |
630 | * of a hw surface. | 275 | * @tfile: Pointer to a struct ttm_object_file identifying the caller |
631 | */ | 276 | * @handle: The TTM user-space handle |
632 | static inline uint32_t vmw_surface_destroy_size(void) | 277 | * @converter: Pointer to an object describing the resource type |
633 | { | 278 | * @p_res: On successful return the location pointed to will contain |
634 | return sizeof(struct vmw_surface_destroy); | 279 | * a pointer to a refcounted struct vmw_resource. |
635 | } | ||
636 | |||
637 | /** | ||
638 | * vmw_surface_destroy_encode - Encode a surface_destroy command. | ||
639 | * | ||
640 | * @id: The surface id | ||
641 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
642 | */ | ||
643 | static void vmw_surface_destroy_encode(uint32_t id, | ||
644 | void *cmd_space) | ||
645 | { | ||
646 | struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *) | ||
647 | cmd_space; | ||
648 | |||
649 | cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY; | ||
650 | cmd->header.size = sizeof(cmd->body); | ||
651 | cmd->body.sid = id; | ||
652 | } | ||
653 | |||
654 | /** | ||
655 | * vmw_surface_define_encode - Encode a surface_define command. | ||
656 | * | 280 | * |
657 | * @srf: Pointer to a struct vmw_surface object. | 281 | * If the handle can't be found or is associated with an incorrect resource |
658 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | 282 | * type, -EINVAL will be returned. |
659 | */ | 283 | */ |
660 | static void vmw_surface_define_encode(const struct vmw_surface *srf, | 284 | int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, |
661 | void *cmd_space) | 285 | struct ttm_object_file *tfile, |
286 | uint32_t handle, | ||
287 | const struct vmw_user_resource_conv | ||
288 | *converter, | ||
289 | struct vmw_resource **p_res) | ||
662 | { | 290 | { |
663 | struct vmw_surface_define *cmd = (struct vmw_surface_define *) | ||
664 | cmd_space; | ||
665 | struct drm_vmw_size *src_size; | ||
666 | SVGA3dSize *cmd_size; | ||
667 | uint32_t cmd_len; | ||
668 | int i; | ||
669 | |||
670 | cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); | ||
671 | |||
672 | cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE; | ||
673 | cmd->header.size = cmd_len; | ||
674 | cmd->body.sid = srf->res.id; | ||
675 | cmd->body.surfaceFlags = srf->flags; | ||
676 | cmd->body.format = cpu_to_le32(srf->format); | ||
677 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | ||
678 | cmd->body.face[i].numMipLevels = srf->mip_levels[i]; | ||
679 | |||
680 | cmd += 1; | ||
681 | cmd_size = (SVGA3dSize *) cmd; | ||
682 | src_size = srf->sizes; | ||
683 | |||
684 | for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { | ||
685 | cmd_size->width = src_size->width; | ||
686 | cmd_size->height = src_size->height; | ||
687 | cmd_size->depth = src_size->depth; | ||
688 | } | ||
689 | } | ||
690 | |||
691 | |||
692 | /** | ||
693 | * vmw_surface_dma_encode - Encode a surface_dma command. | ||
694 | * | ||
695 | * @srf: Pointer to a struct vmw_surface object. | ||
696 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
697 | * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents | ||
698 | * should be placed or read from. | ||
699 | * @to_surface: Boolean whether to DMA to the surface or from the surface. | ||
700 | */ | ||
701 | static void vmw_surface_dma_encode(struct vmw_surface *srf, | ||
702 | void *cmd_space, | ||
703 | const SVGAGuestPtr *ptr, | ||
704 | bool to_surface) | ||
705 | { | ||
706 | uint32_t i; | ||
707 | uint32_t bpp = vmw_sf_bpp[srf->format].bpp; | ||
708 | uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp; | ||
709 | struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space; | ||
710 | |||
711 | for (i = 0; i < srf->num_sizes; ++i) { | ||
712 | SVGA3dCmdHeader *header = &cmd->header; | ||
713 | SVGA3dCmdSurfaceDMA *body = &cmd->body; | ||
714 | SVGA3dCopyBox *cb = &cmd->cb; | ||
715 | SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix; | ||
716 | const struct vmw_surface_offset *cur_offset = &srf->offsets[i]; | ||
717 | const struct drm_vmw_size *cur_size = &srf->sizes[i]; | ||
718 | |||
719 | header->id = SVGA_3D_CMD_SURFACE_DMA; | ||
720 | header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix); | ||
721 | |||
722 | body->guest.ptr = *ptr; | ||
723 | body->guest.ptr.offset += cur_offset->bo_offset; | ||
724 | body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3; | ||
725 | body->host.sid = srf->res.id; | ||
726 | body->host.face = cur_offset->face; | ||
727 | body->host.mipmap = cur_offset->mip; | ||
728 | body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM : | ||
729 | SVGA3D_READ_HOST_VRAM); | ||
730 | cb->x = 0; | ||
731 | cb->y = 0; | ||
732 | cb->z = 0; | ||
733 | cb->srcx = 0; | ||
734 | cb->srcy = 0; | ||
735 | cb->srcz = 0; | ||
736 | cb->w = cur_size->width; | ||
737 | cb->h = cur_size->height; | ||
738 | cb->d = cur_size->depth; | ||
739 | |||
740 | suffix->suffixSize = sizeof(*suffix); | ||
741 | suffix->maximumOffset = body->guest.pitch*cur_size->height* | ||
742 | cur_size->depth*bpp / stride_bpp; | ||
743 | suffix->flags.discard = 0; | ||
744 | suffix->flags.unsynchronized = 0; | ||
745 | suffix->flags.reserved = 0; | ||
746 | ++cmd; | ||
747 | } | ||
748 | }; | ||
749 | |||
750 | |||
751 | static void vmw_hw_surface_destroy(struct vmw_resource *res) | ||
752 | { | ||
753 | |||
754 | struct vmw_private *dev_priv = res->dev_priv; | ||
755 | struct vmw_surface *srf; | ||
756 | void *cmd; | ||
757 | |||
758 | if (res->id != -1) { | ||
759 | |||
760 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); | ||
761 | if (unlikely(cmd == NULL)) { | ||
762 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
763 | "destruction.\n"); | ||
764 | return; | ||
765 | } | ||
766 | |||
767 | vmw_surface_destroy_encode(res->id, cmd); | ||
768 | vmw_fifo_commit(dev_priv, vmw_surface_destroy_size()); | ||
769 | |||
770 | /* | ||
771 | * used_memory_size_atomic, or separate lock | ||
772 | * to avoid taking dev_priv::cmdbuf_mutex in | ||
773 | * the destroy path. | ||
774 | */ | ||
775 | |||
776 | mutex_lock(&dev_priv->cmdbuf_mutex); | ||
777 | srf = container_of(res, struct vmw_surface, res); | ||
778 | dev_priv->used_memory_size -= srf->backup_size; | ||
779 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
780 | |||
781 | } | ||
782 | vmw_3d_resource_dec(dev_priv, false); | ||
783 | } | ||
784 | |||
785 | void vmw_surface_res_free(struct vmw_resource *res) | ||
786 | { | ||
787 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); | ||
788 | |||
789 | if (srf->backup) | ||
790 | ttm_bo_unref(&srf->backup); | ||
791 | kfree(srf->offsets); | ||
792 | kfree(srf->sizes); | ||
793 | kfree(srf->snooper.image); | ||
794 | kfree(srf); | ||
795 | } | ||
796 | |||
797 | |||
798 | /** | ||
799 | * vmw_surface_do_validate - make a surface available to the device. | ||
800 | * | ||
801 | * @dev_priv: Pointer to a device private struct. | ||
802 | * @srf: Pointer to a struct vmw_surface. | ||
803 | * | ||
804 | * If the surface doesn't have a hw id, allocate one, and optionally | ||
805 | * DMA the backed up surface contents to the device. | ||
806 | * | ||
807 | * Returns -EBUSY if there wasn't sufficient device resources to | ||
808 | * complete the validation. Retry after freeing up resources. | ||
809 | * | ||
810 | * May return other errors if the kernel is out of guest resources. | ||
811 | */ | ||
812 | int vmw_surface_do_validate(struct vmw_private *dev_priv, | ||
813 | struct vmw_surface *srf) | ||
814 | { | ||
815 | struct vmw_resource *res = &srf->res; | ||
816 | struct list_head val_list; | ||
817 | struct ttm_validate_buffer val_buf; | ||
818 | uint32_t submit_size; | ||
819 | uint8_t *cmd; | ||
820 | int ret; | ||
821 | |||
822 | if (likely(res->id != -1)) | ||
823 | return 0; | ||
824 | |||
825 | if (unlikely(dev_priv->used_memory_size + srf->backup_size >= | ||
826 | dev_priv->memory_size)) | ||
827 | return -EBUSY; | ||
828 | |||
829 | /* | ||
830 | * Reserve- and validate the backup DMA bo. | ||
831 | */ | ||
832 | |||
833 | if (srf->backup) { | ||
834 | INIT_LIST_HEAD(&val_list); | ||
835 | val_buf.bo = ttm_bo_reference(srf->backup); | ||
836 | val_buf.new_sync_obj_arg = (void *)((unsigned long) | ||
837 | DRM_VMW_FENCE_FLAG_EXEC); | ||
838 | list_add_tail(&val_buf.head, &val_list); | ||
839 | ret = ttm_eu_reserve_buffers(&val_list); | ||
840 | if (unlikely(ret != 0)) | ||
841 | goto out_no_reserve; | ||
842 | |||
843 | ret = ttm_bo_validate(srf->backup, &vmw_srf_placement, | ||
844 | true, false, false); | ||
845 | if (unlikely(ret != 0)) | ||
846 | goto out_no_validate; | ||
847 | } | ||
848 | |||
849 | /* | ||
850 | * Alloc id for the resource. | ||
851 | */ | ||
852 | |||
853 | ret = vmw_resource_alloc_id(dev_priv, res); | ||
854 | if (unlikely(ret != 0)) { | ||
855 | DRM_ERROR("Failed to allocate a surface id.\n"); | ||
856 | goto out_no_id; | ||
857 | } | ||
858 | if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) { | ||
859 | ret = -EBUSY; | ||
860 | goto out_no_fifo; | ||
861 | } | ||
862 | |||
863 | |||
864 | /* | ||
865 | * Encode surface define- and dma commands. | ||
866 | */ | ||
867 | |||
868 | submit_size = vmw_surface_define_size(srf); | ||
869 | if (srf->backup) | ||
870 | submit_size += vmw_surface_dma_size(srf); | ||
871 | |||
872 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
873 | if (unlikely(cmd == NULL)) { | ||
874 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
875 | "validation.\n"); | ||
876 | ret = -ENOMEM; | ||
877 | goto out_no_fifo; | ||
878 | } | ||
879 | |||
880 | vmw_surface_define_encode(srf, cmd); | ||
881 | if (srf->backup) { | ||
882 | SVGAGuestPtr ptr; | ||
883 | |||
884 | cmd += vmw_surface_define_size(srf); | ||
885 | vmw_bo_get_guest_ptr(srf->backup, &ptr); | ||
886 | vmw_surface_dma_encode(srf, cmd, &ptr, true); | ||
887 | } | ||
888 | |||
889 | vmw_fifo_commit(dev_priv, submit_size); | ||
890 | |||
891 | /* | ||
892 | * Create a fence object and fence the backup buffer. | ||
893 | */ | ||
894 | |||
895 | if (srf->backup) { | ||
896 | struct vmw_fence_obj *fence; | ||
897 | |||
898 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
899 | &fence, NULL); | ||
900 | ttm_eu_fence_buffer_objects(&val_list, fence); | ||
901 | if (likely(fence != NULL)) | ||
902 | vmw_fence_obj_unreference(&fence); | ||
903 | ttm_bo_unref(&val_buf.bo); | ||
904 | ttm_bo_unref(&srf->backup); | ||
905 | } | ||
906 | |||
907 | /* | ||
908 | * Surface memory usage accounting. | ||
909 | */ | ||
910 | |||
911 | dev_priv->used_memory_size += srf->backup_size; | ||
912 | |||
913 | return 0; | ||
914 | |||
915 | out_no_fifo: | ||
916 | vmw_resource_release_id(res); | ||
917 | out_no_id: | ||
918 | out_no_validate: | ||
919 | if (srf->backup) | ||
920 | ttm_eu_backoff_reservation(&val_list); | ||
921 | out_no_reserve: | ||
922 | if (srf->backup) | ||
923 | ttm_bo_unref(&val_buf.bo); | ||
924 | return ret; | ||
925 | } | ||
926 | |||
927 | /** | ||
928 | * vmw_surface_evict - Evict a hw surface. | ||
929 | * | ||
930 | * @dev_priv: Pointer to a device private struct. | ||
931 | * @srf: Pointer to a struct vmw_surface | ||
932 | * | ||
933 | * DMA the contents of a hw surface to a backup guest buffer object, | ||
934 | * and destroy the hw surface, releasing its id. | ||
935 | */ | ||
936 | int vmw_surface_evict(struct vmw_private *dev_priv, | ||
937 | struct vmw_surface *srf) | ||
938 | { | ||
939 | struct vmw_resource *res = &srf->res; | ||
940 | struct list_head val_list; | ||
941 | struct ttm_validate_buffer val_buf; | ||
942 | uint32_t submit_size; | ||
943 | uint8_t *cmd; | ||
944 | int ret; | ||
945 | struct vmw_fence_obj *fence; | ||
946 | SVGAGuestPtr ptr; | ||
947 | |||
948 | BUG_ON(res->id == -1); | ||
949 | |||
950 | /* | ||
951 | * Create a surface backup buffer object. | ||
952 | */ | ||
953 | |||
954 | if (!srf->backup) { | ||
955 | ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size, | ||
956 | ttm_bo_type_device, | ||
957 | &vmw_srf_placement, 0, 0, true, | ||
958 | NULL, &srf->backup); | ||
959 | if (unlikely(ret != 0)) | ||
960 | return ret; | ||
961 | } | ||
962 | |||
963 | /* | ||
964 | * Reserve- and validate the backup DMA bo. | ||
965 | */ | ||
966 | |||
967 | INIT_LIST_HEAD(&val_list); | ||
968 | val_buf.bo = ttm_bo_reference(srf->backup); | ||
969 | val_buf.new_sync_obj_arg = (void *)(unsigned long) | ||
970 | DRM_VMW_FENCE_FLAG_EXEC; | ||
971 | list_add_tail(&val_buf.head, &val_list); | ||
972 | ret = ttm_eu_reserve_buffers(&val_list); | ||
973 | if (unlikely(ret != 0)) | ||
974 | goto out_no_reserve; | ||
975 | |||
976 | ret = ttm_bo_validate(srf->backup, &vmw_srf_placement, | ||
977 | true, false, false); | ||
978 | if (unlikely(ret != 0)) | ||
979 | goto out_no_validate; | ||
980 | |||
981 | |||
982 | /* | ||
983 | * Encode the dma- and surface destroy commands. | ||
984 | */ | ||
985 | |||
986 | submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size(); | ||
987 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
988 | if (unlikely(cmd == NULL)) { | ||
989 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
990 | "eviction.\n"); | ||
991 | ret = -ENOMEM; | ||
992 | goto out_no_fifo; | ||
993 | } | ||
994 | |||
995 | vmw_bo_get_guest_ptr(srf->backup, &ptr); | ||
996 | vmw_surface_dma_encode(srf, cmd, &ptr, false); | ||
997 | cmd += vmw_surface_dma_size(srf); | ||
998 | vmw_surface_destroy_encode(res->id, cmd); | ||
999 | vmw_fifo_commit(dev_priv, submit_size); | ||
1000 | |||
1001 | /* | ||
1002 | * Surface memory usage accounting. | ||
1003 | */ | ||
1004 | |||
1005 | dev_priv->used_memory_size -= srf->backup_size; | ||
1006 | |||
1007 | /* | ||
1008 | * Create a fence object and fence the DMA buffer. | ||
1009 | */ | ||
1010 | |||
1011 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
1012 | &fence, NULL); | ||
1013 | ttm_eu_fence_buffer_objects(&val_list, fence); | ||
1014 | if (likely(fence != NULL)) | ||
1015 | vmw_fence_obj_unreference(&fence); | ||
1016 | ttm_bo_unref(&val_buf.bo); | ||
1017 | |||
1018 | /* | ||
1019 | * Release the surface ID. | ||
1020 | */ | ||
1021 | |||
1022 | vmw_resource_release_id(res); | ||
1023 | |||
1024 | return 0; | ||
1025 | |||
1026 | out_no_fifo: | ||
1027 | out_no_validate: | ||
1028 | if (srf->backup) | ||
1029 | ttm_eu_backoff_reservation(&val_list); | ||
1030 | out_no_reserve: | ||
1031 | ttm_bo_unref(&val_buf.bo); | ||
1032 | ttm_bo_unref(&srf->backup); | ||
1033 | return ret; | ||
1034 | } | ||
1035 | |||
1036 | |||
1037 | /** | ||
1038 | * vmw_surface_validate - make a surface available to the device, evicting | ||
1039 | * other surfaces if needed. | ||
1040 | * | ||
1041 | * @dev_priv: Pointer to a device private struct. | ||
1042 | * @srf: Pointer to a struct vmw_surface. | ||
1043 | * | ||
1044 | * Try to validate a surface and if it fails due to limited device resources, | ||
1045 | * repeatedly try to evict other surfaces until the request can be | ||
1046 | * acommodated. | ||
1047 | * | ||
1048 | * May return errors if out of resources. | ||
1049 | */ | ||
1050 | int vmw_surface_validate(struct vmw_private *dev_priv, | ||
1051 | struct vmw_surface *srf) | ||
1052 | { | ||
1053 | int ret; | ||
1054 | struct vmw_surface *evict_srf; | ||
1055 | |||
1056 | do { | ||
1057 | write_lock(&dev_priv->resource_lock); | ||
1058 | list_del_init(&srf->lru_head); | ||
1059 | write_unlock(&dev_priv->resource_lock); | ||
1060 | |||
1061 | ret = vmw_surface_do_validate(dev_priv, srf); | ||
1062 | if (likely(ret != -EBUSY)) | ||
1063 | break; | ||
1064 | |||
1065 | write_lock(&dev_priv->resource_lock); | ||
1066 | if (list_empty(&dev_priv->surface_lru)) { | ||
1067 | DRM_ERROR("Out of device memory for surfaces.\n"); | ||
1068 | ret = -EBUSY; | ||
1069 | write_unlock(&dev_priv->resource_lock); | ||
1070 | break; | ||
1071 | } | ||
1072 | |||
1073 | evict_srf = vmw_surface_reference | ||
1074 | (list_first_entry(&dev_priv->surface_lru, | ||
1075 | struct vmw_surface, | ||
1076 | lru_head)); | ||
1077 | list_del_init(&evict_srf->lru_head); | ||
1078 | |||
1079 | write_unlock(&dev_priv->resource_lock); | ||
1080 | (void) vmw_surface_evict(dev_priv, evict_srf); | ||
1081 | |||
1082 | vmw_surface_unreference(&evict_srf); | ||
1083 | |||
1084 | } while (1); | ||
1085 | |||
1086 | if (unlikely(ret != 0 && srf->res.id != -1)) { | ||
1087 | write_lock(&dev_priv->resource_lock); | ||
1088 | list_add_tail(&srf->lru_head, &dev_priv->surface_lru); | ||
1089 | write_unlock(&dev_priv->resource_lock); | ||
1090 | } | ||
1091 | |||
1092 | return ret; | ||
1093 | } | ||
1094 | |||
1095 | |||
1096 | /** | ||
1097 | * vmw_surface_remove_from_lists - Remove surface resources from lookup lists | ||
1098 | * | ||
1099 | * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface | ||
1100 | * | ||
1101 | * As part of the resource destruction, remove the surface from any | ||
1102 | * lookup lists. | ||
1103 | */ | ||
1104 | static void vmw_surface_remove_from_lists(struct vmw_resource *res) | ||
1105 | { | ||
1106 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); | ||
1107 | |||
1108 | list_del_init(&srf->lru_head); | ||
1109 | } | ||
1110 | |||
1111 | int vmw_surface_init(struct vmw_private *dev_priv, | ||
1112 | struct vmw_surface *srf, | ||
1113 | void (*res_free) (struct vmw_resource *res)) | ||
1114 | { | ||
1115 | int ret; | ||
1116 | struct vmw_resource *res = &srf->res; | ||
1117 | |||
1118 | BUG_ON(res_free == NULL); | ||
1119 | INIT_LIST_HEAD(&srf->lru_head); | ||
1120 | ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr, | ||
1121 | VMW_RES_SURFACE, true, res_free, | ||
1122 | vmw_surface_remove_from_lists); | ||
1123 | |||
1124 | if (unlikely(ret != 0)) | ||
1125 | res_free(res); | ||
1126 | |||
1127 | /* | ||
1128 | * The surface won't be visible to hardware until a | ||
1129 | * surface validate. | ||
1130 | */ | ||
1131 | |||
1132 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
1133 | vmw_resource_activate(res, vmw_hw_surface_destroy); | ||
1134 | return ret; | ||
1135 | } | ||
1136 | |||
1137 | static void vmw_user_surface_free(struct vmw_resource *res) | ||
1138 | { | ||
1139 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); | ||
1140 | struct vmw_user_surface *user_srf = | ||
1141 | container_of(srf, struct vmw_user_surface, srf); | ||
1142 | struct vmw_private *dev_priv = srf->res.dev_priv; | ||
1143 | uint32_t size = user_srf->size; | ||
1144 | |||
1145 | if (srf->backup) | ||
1146 | ttm_bo_unref(&srf->backup); | ||
1147 | kfree(srf->offsets); | ||
1148 | kfree(srf->sizes); | ||
1149 | kfree(srf->snooper.image); | ||
1150 | kfree(user_srf); | ||
1151 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||
1152 | } | ||
1153 | |||
1154 | /** | ||
1155 | * vmw_resource_unreserve - unreserve resources previously reserved for | ||
1156 | * command submission. | ||
1157 | * | ||
1158 | * @list_head: list of resources to unreserve. | ||
1159 | * | ||
1160 | * Currently only surfaces are considered, and unreserving a surface | ||
1161 | * means putting it back on the device's surface lru list, | ||
1162 | * so that it can be evicted if necessary. | ||
1163 | * This function traverses the resource list and | ||
1164 | * checks whether resources are surfaces, and in that case puts them back | ||
1165 | * on the device's surface LRU list. | ||
1166 | */ | ||
1167 | void vmw_resource_unreserve(struct list_head *list) | ||
1168 | { | ||
1169 | struct vmw_resource *res; | ||
1170 | struct vmw_surface *srf; | ||
1171 | rwlock_t *lock = NULL; | ||
1172 | |||
1173 | list_for_each_entry(res, list, validate_head) { | ||
1174 | |||
1175 | if (res->res_free != &vmw_surface_res_free && | ||
1176 | res->res_free != &vmw_user_surface_free) | ||
1177 | continue; | ||
1178 | |||
1179 | if (unlikely(lock == NULL)) { | ||
1180 | lock = &res->dev_priv->resource_lock; | ||
1181 | write_lock(lock); | ||
1182 | } | ||
1183 | |||
1184 | srf = container_of(res, struct vmw_surface, res); | ||
1185 | list_del_init(&srf->lru_head); | ||
1186 | list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru); | ||
1187 | } | ||
1188 | |||
1189 | if (lock != NULL) | ||
1190 | write_unlock(lock); | ||
1191 | } | ||
1192 | |||
1193 | /** | ||
1194 | * Helper function that looks either a surface or dmabuf. | ||
1195 | * | ||
1196 | * The pointer this pointed at by out_surf and out_buf needs to be null. | ||
1197 | */ | ||
1198 | int vmw_user_lookup_handle(struct vmw_private *dev_priv, | ||
1199 | struct ttm_object_file *tfile, | ||
1200 | uint32_t handle, | ||
1201 | struct vmw_surface **out_surf, | ||
1202 | struct vmw_dma_buffer **out_buf) | ||
1203 | { | ||
1204 | int ret; | ||
1205 | |||
1206 | BUG_ON(*out_surf || *out_buf); | ||
1207 | |||
1208 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf); | ||
1209 | if (!ret) | ||
1210 | return 0; | ||
1211 | |||
1212 | ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); | ||
1213 | return ret; | ||
1214 | } | ||
1215 | |||
1216 | |||
1217 | int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, | ||
1218 | struct ttm_object_file *tfile, | ||
1219 | uint32_t handle, struct vmw_surface **out) | ||
1220 | { | ||
1221 | struct vmw_resource *res; | ||
1222 | struct vmw_surface *srf; | ||
1223 | struct vmw_user_surface *user_srf; | ||
1224 | struct ttm_base_object *base; | 291 | struct ttm_base_object *base; |
292 | struct vmw_resource *res; | ||
1225 | int ret = -EINVAL; | 293 | int ret = -EINVAL; |
1226 | 294 | ||
1227 | base = ttm_base_object_lookup(tfile, handle); | 295 | base = ttm_base_object_lookup(tfile, handle); |
1228 | if (unlikely(base == NULL)) | 296 | if (unlikely(base == NULL)) |
1229 | return -EINVAL; | 297 | return -EINVAL; |
1230 | 298 | ||
1231 | if (unlikely(base->object_type != VMW_RES_SURFACE)) | 299 | if (unlikely(base->object_type != converter->object_type)) |
1232 | goto out_bad_resource; | 300 | goto out_bad_resource; |
1233 | 301 | ||
1234 | user_srf = container_of(base, struct vmw_user_surface, base); | 302 | res = converter->base_obj_to_res(base); |
1235 | srf = &user_srf->srf; | ||
1236 | res = &srf->res; | ||
1237 | 303 | ||
1238 | read_lock(&dev_priv->resource_lock); | 304 | read_lock(&dev_priv->resource_lock); |
1239 | 305 | if (!res->avail || res->res_free != converter->res_free) { | |
1240 | if (!res->avail || res->res_free != &vmw_user_surface_free) { | ||
1241 | read_unlock(&dev_priv->resource_lock); | 306 | read_unlock(&dev_priv->resource_lock); |
1242 | goto out_bad_resource; | 307 | goto out_bad_resource; |
1243 | } | 308 | } |
@@ -1245,7 +310,7 @@ int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, | |||
1245 | kref_get(&res->kref); | 310 | kref_get(&res->kref); |
1246 | read_unlock(&dev_priv->resource_lock); | 311 | read_unlock(&dev_priv->resource_lock); |
1247 | 312 | ||
1248 | *out = srf; | 313 | *p_res = res; |
1249 | ret = 0; | 314 | ret = 0; |
1250 | 315 | ||
1251 | out_bad_resource: | 316 | out_bad_resource: |
@@ -1254,286 +319,32 @@ out_bad_resource: | |||
1254 | return ret; | 319 | return ret; |
1255 | } | 320 | } |
1256 | 321 | ||
1257 | static void vmw_user_surface_base_release(struct ttm_base_object **p_base) | 322 | /** |
1258 | { | 323 | * Helper function that looks either a surface or dmabuf. |
1259 | struct ttm_base_object *base = *p_base; | 324 | * |
1260 | struct vmw_user_surface *user_srf = | 325 | * The pointer this pointed at by out_surf and out_buf needs to be null. |
1261 | container_of(base, struct vmw_user_surface, base); | 326 | */ |
1262 | struct vmw_resource *res = &user_srf->srf.res; | 327 | int vmw_user_lookup_handle(struct vmw_private *dev_priv, |
1263 | 328 | struct ttm_object_file *tfile, | |
1264 | *p_base = NULL; | 329 | uint32_t handle, |
1265 | vmw_resource_unreference(&res); | 330 | struct vmw_surface **out_surf, |
1266 | } | 331 | struct vmw_dma_buffer **out_buf) |
1267 | |||
1268 | int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | ||
1269 | struct drm_file *file_priv) | ||
1270 | { | ||
1271 | struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; | ||
1272 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
1273 | |||
1274 | return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); | ||
1275 | } | ||
1276 | |||
1277 | int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | ||
1278 | struct drm_file *file_priv) | ||
1279 | { | 332 | { |
1280 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1281 | struct vmw_user_surface *user_srf; | ||
1282 | struct vmw_surface *srf; | ||
1283 | struct vmw_resource *res; | 333 | struct vmw_resource *res; |
1284 | struct vmw_resource *tmp; | ||
1285 | union drm_vmw_surface_create_arg *arg = | ||
1286 | (union drm_vmw_surface_create_arg *)data; | ||
1287 | struct drm_vmw_surface_create_req *req = &arg->req; | ||
1288 | struct drm_vmw_surface_arg *rep = &arg->rep; | ||
1289 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
1290 | struct drm_vmw_size __user *user_sizes; | ||
1291 | int ret; | 334 | int ret; |
1292 | int i, j; | ||
1293 | uint32_t cur_bo_offset; | ||
1294 | struct drm_vmw_size *cur_size; | ||
1295 | struct vmw_surface_offset *cur_offset; | ||
1296 | uint32_t stride_bpp; | ||
1297 | uint32_t bpp; | ||
1298 | uint32_t num_sizes; | ||
1299 | uint32_t size; | ||
1300 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
1301 | 335 | ||
1302 | if (unlikely(vmw_user_surface_size == 0)) | 336 | BUG_ON(*out_surf || *out_buf); |
1303 | vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + | ||
1304 | 128; | ||
1305 | |||
1306 | num_sizes = 0; | ||
1307 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | ||
1308 | num_sizes += req->mip_levels[i]; | ||
1309 | |||
1310 | if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * | ||
1311 | DRM_VMW_MAX_MIP_LEVELS) | ||
1312 | return -EINVAL; | ||
1313 | |||
1314 | size = vmw_user_surface_size + 128 + | ||
1315 | ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) + | ||
1316 | ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset)); | ||
1317 | |||
1318 | |||
1319 | ret = ttm_read_lock(&vmaster->lock, true); | ||
1320 | if (unlikely(ret != 0)) | ||
1321 | return ret; | ||
1322 | |||
1323 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
1324 | size, false, true); | ||
1325 | if (unlikely(ret != 0)) { | ||
1326 | if (ret != -ERESTARTSYS) | ||
1327 | DRM_ERROR("Out of graphics memory for surface" | ||
1328 | " creation.\n"); | ||
1329 | goto out_unlock; | ||
1330 | } | ||
1331 | |||
1332 | user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL); | ||
1333 | if (unlikely(user_srf == NULL)) { | ||
1334 | ret = -ENOMEM; | ||
1335 | goto out_no_user_srf; | ||
1336 | } | ||
1337 | |||
1338 | srf = &user_srf->srf; | ||
1339 | res = &srf->res; | ||
1340 | |||
1341 | srf->flags = req->flags; | ||
1342 | srf->format = req->format; | ||
1343 | srf->scanout = req->scanout; | ||
1344 | srf->backup = NULL; | ||
1345 | |||
1346 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); | ||
1347 | srf->num_sizes = num_sizes; | ||
1348 | user_srf->size = size; | ||
1349 | |||
1350 | srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); | ||
1351 | if (unlikely(srf->sizes == NULL)) { | ||
1352 | ret = -ENOMEM; | ||
1353 | goto out_no_sizes; | ||
1354 | } | ||
1355 | srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), | ||
1356 | GFP_KERNEL); | ||
1357 | if (unlikely(srf->sizes == NULL)) { | ||
1358 | ret = -ENOMEM; | ||
1359 | goto out_no_offsets; | ||
1360 | } | ||
1361 | |||
1362 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) | ||
1363 | req->size_addr; | ||
1364 | |||
1365 | ret = copy_from_user(srf->sizes, user_sizes, | ||
1366 | srf->num_sizes * sizeof(*srf->sizes)); | ||
1367 | if (unlikely(ret != 0)) { | ||
1368 | ret = -EFAULT; | ||
1369 | goto out_no_copy; | ||
1370 | } | ||
1371 | |||
1372 | cur_bo_offset = 0; | ||
1373 | cur_offset = srf->offsets; | ||
1374 | cur_size = srf->sizes; | ||
1375 | |||
1376 | bpp = vmw_sf_bpp[srf->format].bpp; | ||
1377 | stride_bpp = vmw_sf_bpp[srf->format].s_bpp; | ||
1378 | |||
1379 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { | ||
1380 | for (j = 0; j < srf->mip_levels[i]; ++j) { | ||
1381 | uint32_t stride = | ||
1382 | (cur_size->width * stride_bpp + 7) >> 3; | ||
1383 | |||
1384 | cur_offset->face = i; | ||
1385 | cur_offset->mip = j; | ||
1386 | cur_offset->bo_offset = cur_bo_offset; | ||
1387 | cur_bo_offset += stride * cur_size->height * | ||
1388 | cur_size->depth * bpp / stride_bpp; | ||
1389 | ++cur_offset; | ||
1390 | ++cur_size; | ||
1391 | } | ||
1392 | } | ||
1393 | srf->backup_size = cur_bo_offset; | ||
1394 | |||
1395 | if (srf->scanout && | ||
1396 | srf->num_sizes == 1 && | ||
1397 | srf->sizes[0].width == 64 && | ||
1398 | srf->sizes[0].height == 64 && | ||
1399 | srf->format == SVGA3D_A8R8G8B8) { | ||
1400 | |||
1401 | /* allocate image area and clear it */ | ||
1402 | srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL); | ||
1403 | if (!srf->snooper.image) { | ||
1404 | DRM_ERROR("Failed to allocate cursor_image\n"); | ||
1405 | ret = -ENOMEM; | ||
1406 | goto out_no_copy; | ||
1407 | } | ||
1408 | } else { | ||
1409 | srf->snooper.image = NULL; | ||
1410 | } | ||
1411 | srf->snooper.crtc = NULL; | ||
1412 | |||
1413 | user_srf->base.shareable = false; | ||
1414 | user_srf->base.tfile = NULL; | ||
1415 | |||
1416 | /** | ||
1417 | * From this point, the generic resource management functions | ||
1418 | * destroy the object on failure. | ||
1419 | */ | ||
1420 | |||
1421 | ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); | ||
1422 | if (unlikely(ret != 0)) | ||
1423 | goto out_unlock; | ||
1424 | |||
1425 | tmp = vmw_resource_reference(&srf->res); | ||
1426 | ret = ttm_base_object_init(tfile, &user_srf->base, | ||
1427 | req->shareable, VMW_RES_SURFACE, | ||
1428 | &vmw_user_surface_base_release, NULL); | ||
1429 | |||
1430 | if (unlikely(ret != 0)) { | ||
1431 | vmw_resource_unreference(&tmp); | ||
1432 | vmw_resource_unreference(&res); | ||
1433 | goto out_unlock; | ||
1434 | } | ||
1435 | |||
1436 | rep->sid = user_srf->base.hash.key; | ||
1437 | if (rep->sid == SVGA3D_INVALID_ID) | ||
1438 | DRM_ERROR("Created bad Surface ID.\n"); | ||
1439 | |||
1440 | vmw_resource_unreference(&res); | ||
1441 | |||
1442 | ttm_read_unlock(&vmaster->lock); | ||
1443 | return 0; | ||
1444 | out_no_copy: | ||
1445 | kfree(srf->offsets); | ||
1446 | out_no_offsets: | ||
1447 | kfree(srf->sizes); | ||
1448 | out_no_sizes: | ||
1449 | kfree(user_srf); | ||
1450 | out_no_user_srf: | ||
1451 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||
1452 | out_unlock: | ||
1453 | ttm_read_unlock(&vmaster->lock); | ||
1454 | return ret; | ||
1455 | } | ||
1456 | |||
1457 | int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
1458 | struct drm_file *file_priv) | ||
1459 | { | ||
1460 | union drm_vmw_surface_reference_arg *arg = | ||
1461 | (union drm_vmw_surface_reference_arg *)data; | ||
1462 | struct drm_vmw_surface_arg *req = &arg->req; | ||
1463 | struct drm_vmw_surface_create_req *rep = &arg->rep; | ||
1464 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
1465 | struct vmw_surface *srf; | ||
1466 | struct vmw_user_surface *user_srf; | ||
1467 | struct drm_vmw_size __user *user_sizes; | ||
1468 | struct ttm_base_object *base; | ||
1469 | int ret = -EINVAL; | ||
1470 | |||
1471 | base = ttm_base_object_lookup(tfile, req->sid); | ||
1472 | if (unlikely(base == NULL)) { | ||
1473 | DRM_ERROR("Could not find surface to reference.\n"); | ||
1474 | return -EINVAL; | ||
1475 | } | ||
1476 | |||
1477 | if (unlikely(base->object_type != VMW_RES_SURFACE)) | ||
1478 | goto out_bad_resource; | ||
1479 | |||
1480 | user_srf = container_of(base, struct vmw_user_surface, base); | ||
1481 | srf = &user_srf->srf; | ||
1482 | |||
1483 | ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); | ||
1484 | if (unlikely(ret != 0)) { | ||
1485 | DRM_ERROR("Could not add a reference to a surface.\n"); | ||
1486 | goto out_no_reference; | ||
1487 | } | ||
1488 | |||
1489 | rep->flags = srf->flags; | ||
1490 | rep->format = srf->format; | ||
1491 | memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); | ||
1492 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) | ||
1493 | rep->size_addr; | ||
1494 | 337 | ||
1495 | if (user_sizes) | 338 | ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle, |
1496 | ret = copy_to_user(user_sizes, srf->sizes, | 339 | user_surface_converter, |
1497 | srf->num_sizes * sizeof(*srf->sizes)); | 340 | &res); |
1498 | if (unlikely(ret != 0)) { | 341 | if (!ret) { |
1499 | DRM_ERROR("copy_to_user failed %p %u\n", | 342 | *out_surf = vmw_res_to_srf(res); |
1500 | user_sizes, srf->num_sizes); | 343 | return 0; |
1501 | ret = -EFAULT; | ||
1502 | } | 344 | } |
1503 | out_bad_resource: | ||
1504 | out_no_reference: | ||
1505 | ttm_base_object_unref(&base); | ||
1506 | |||
1507 | return ret; | ||
1508 | } | ||
1509 | |||
1510 | int vmw_surface_check(struct vmw_private *dev_priv, | ||
1511 | struct ttm_object_file *tfile, | ||
1512 | uint32_t handle, int *id) | ||
1513 | { | ||
1514 | struct ttm_base_object *base; | ||
1515 | struct vmw_user_surface *user_srf; | ||
1516 | |||
1517 | int ret = -EPERM; | ||
1518 | |||
1519 | base = ttm_base_object_lookup(tfile, handle); | ||
1520 | if (unlikely(base == NULL)) | ||
1521 | return -EINVAL; | ||
1522 | 345 | ||
1523 | if (unlikely(base->object_type != VMW_RES_SURFACE)) | 346 | *out_surf = NULL; |
1524 | goto out_bad_surface; | 347 | ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); |
1525 | |||
1526 | user_srf = container_of(base, struct vmw_user_surface, base); | ||
1527 | *id = user_srf->srf.res.id; | ||
1528 | ret = 0; | ||
1529 | |||
1530 | out_bad_surface: | ||
1531 | /** | ||
1532 | * FIXME: May deadlock here when called from the | ||
1533 | * command parsing code. | ||
1534 | */ | ||
1535 | |||
1536 | ttm_base_object_unref(&base); | ||
1537 | return ret; | 348 | return ret; |
1538 | } | 349 | } |
1539 | 350 | ||
@@ -1562,11 +373,11 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
1562 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); | 373 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); |
1563 | memset(vmw_bo, 0, sizeof(*vmw_bo)); | 374 | memset(vmw_bo, 0, sizeof(*vmw_bo)); |
1564 | 375 | ||
1565 | INIT_LIST_HEAD(&vmw_bo->validate_list); | 376 | INIT_LIST_HEAD(&vmw_bo->res_list); |
1566 | 377 | ||
1567 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, | 378 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
1568 | ttm_bo_type_device, placement, | 379 | ttm_bo_type_device, placement, |
1569 | 0, 0, interruptible, | 380 | 0, interruptible, |
1570 | NULL, acc_size, NULL, bo_free); | 381 | NULL, acc_size, NULL, bo_free); |
1571 | return ret; | 382 | return ret; |
1572 | } | 383 | } |
@@ -1575,7 +386,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) | |||
1575 | { | 386 | { |
1576 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); | 387 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); |
1577 | 388 | ||
1578 | kfree(vmw_user_bo); | 389 | ttm_base_object_kfree(vmw_user_bo, base); |
1579 | } | 390 | } |
1580 | 391 | ||
1581 | static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) | 392 | static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) |
@@ -1594,6 +405,79 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) | |||
1594 | ttm_bo_unref(&bo); | 405 | ttm_bo_unref(&bo); |
1595 | } | 406 | } |
1596 | 407 | ||
408 | /** | ||
409 | * vmw_user_dmabuf_alloc - Allocate a user dma buffer | ||
410 | * | ||
411 | * @dev_priv: Pointer to a struct device private. | ||
412 | * @tfile: Pointer to a struct ttm_object_file on which to register the user | ||
413 | * object. | ||
414 | * @size: Size of the dma buffer. | ||
415 | * @shareable: Boolean whether the buffer is shareable with other open files. | ||
416 | * @handle: Pointer to where the handle value should be assigned. | ||
417 | * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer | ||
418 | * should be assigned. | ||
419 | */ | ||
420 | int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | ||
421 | struct ttm_object_file *tfile, | ||
422 | uint32_t size, | ||
423 | bool shareable, | ||
424 | uint32_t *handle, | ||
425 | struct vmw_dma_buffer **p_dma_buf) | ||
426 | { | ||
427 | struct vmw_user_dma_buffer *user_bo; | ||
428 | struct ttm_buffer_object *tmp; | ||
429 | int ret; | ||
430 | |||
431 | user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); | ||
432 | if (unlikely(user_bo == NULL)) { | ||
433 | DRM_ERROR("Failed to allocate a buffer.\n"); | ||
434 | return -ENOMEM; | ||
435 | } | ||
436 | |||
437 | ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, | ||
438 | &vmw_vram_sys_placement, true, | ||
439 | &vmw_user_dmabuf_destroy); | ||
440 | if (unlikely(ret != 0)) | ||
441 | return ret; | ||
442 | |||
443 | tmp = ttm_bo_reference(&user_bo->dma.base); | ||
444 | ret = ttm_base_object_init(tfile, | ||
445 | &user_bo->base, | ||
446 | shareable, | ||
447 | ttm_buffer_type, | ||
448 | &vmw_user_dmabuf_release, NULL); | ||
449 | if (unlikely(ret != 0)) { | ||
450 | ttm_bo_unref(&tmp); | ||
451 | goto out_no_base_object; | ||
452 | } | ||
453 | |||
454 | *p_dma_buf = &user_bo->dma; | ||
455 | *handle = user_bo->base.hash.key; | ||
456 | |||
457 | out_no_base_object: | ||
458 | return ret; | ||
459 | } | ||
460 | |||
461 | /** | ||
462 | * vmw_user_dmabuf_verify_access - verify access permissions on this | ||
463 | * buffer object. | ||
464 | * | ||
465 | * @bo: Pointer to the buffer object being accessed | ||
466 | * @tfile: Identifying the caller. | ||
467 | */ | ||
468 | int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, | ||
469 | struct ttm_object_file *tfile) | ||
470 | { | ||
471 | struct vmw_user_dma_buffer *vmw_user_bo; | ||
472 | |||
473 | if (unlikely(bo->destroy != vmw_user_dmabuf_destroy)) | ||
474 | return -EPERM; | ||
475 | |||
476 | vmw_user_bo = vmw_user_dma_buffer(bo); | ||
477 | return (vmw_user_bo->base.tfile == tfile || | ||
478 | vmw_user_bo->base.shareable) ? 0 : -EPERM; | ||
479 | } | ||
480 | |||
1597 | int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | 481 | int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, |
1598 | struct drm_file *file_priv) | 482 | struct drm_file *file_priv) |
1599 | { | 483 | { |
@@ -1602,44 +486,27 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
1602 | (union drm_vmw_alloc_dmabuf_arg *)data; | 486 | (union drm_vmw_alloc_dmabuf_arg *)data; |
1603 | struct drm_vmw_alloc_dmabuf_req *req = &arg->req; | 487 | struct drm_vmw_alloc_dmabuf_req *req = &arg->req; |
1604 | struct drm_vmw_dmabuf_rep *rep = &arg->rep; | 488 | struct drm_vmw_dmabuf_rep *rep = &arg->rep; |
1605 | struct vmw_user_dma_buffer *vmw_user_bo; | 489 | struct vmw_dma_buffer *dma_buf; |
1606 | struct ttm_buffer_object *tmp; | 490 | uint32_t handle; |
1607 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 491 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
1608 | int ret; | 492 | int ret; |
1609 | 493 | ||
1610 | vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL); | ||
1611 | if (unlikely(vmw_user_bo == NULL)) | ||
1612 | return -ENOMEM; | ||
1613 | |||
1614 | ret = ttm_read_lock(&vmaster->lock, true); | 494 | ret = ttm_read_lock(&vmaster->lock, true); |
1615 | if (unlikely(ret != 0)) { | 495 | if (unlikely(ret != 0)) |
1616 | kfree(vmw_user_bo); | ||
1617 | return ret; | 496 | return ret; |
1618 | } | ||
1619 | 497 | ||
1620 | ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, | 498 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
1621 | &vmw_vram_sys_placement, true, | 499 | req->size, false, &handle, &dma_buf); |
1622 | &vmw_user_dmabuf_destroy); | ||
1623 | if (unlikely(ret != 0)) | 500 | if (unlikely(ret != 0)) |
1624 | goto out_no_dmabuf; | 501 | goto out_no_dmabuf; |
1625 | 502 | ||
1626 | tmp = ttm_bo_reference(&vmw_user_bo->dma.base); | 503 | rep->handle = handle; |
1627 | ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, | 504 | rep->map_handle = dma_buf->base.addr_space_offset; |
1628 | &vmw_user_bo->base, | 505 | rep->cur_gmr_id = handle; |
1629 | false, | 506 | rep->cur_gmr_offset = 0; |
1630 | ttm_buffer_type, | 507 | |
1631 | &vmw_user_dmabuf_release, NULL); | 508 | vmw_dmabuf_unreference(&dma_buf); |
1632 | if (unlikely(ret != 0)) | ||
1633 | goto out_no_base_object; | ||
1634 | else { | ||
1635 | rep->handle = vmw_user_bo->base.hash.key; | ||
1636 | rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; | ||
1637 | rep->cur_gmr_id = vmw_user_bo->base.hash.key; | ||
1638 | rep->cur_gmr_offset = 0; | ||
1639 | } | ||
1640 | 509 | ||
1641 | out_no_base_object: | ||
1642 | ttm_bo_unref(&tmp); | ||
1643 | out_no_dmabuf: | 510 | out_no_dmabuf: |
1644 | ttm_read_unlock(&vmaster->lock); | 511 | ttm_read_unlock(&vmaster->lock); |
1645 | 512 | ||
@@ -1657,27 +524,6 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, | |||
1657 | TTM_REF_USAGE); | 524 | TTM_REF_USAGE); |
1658 | } | 525 | } |
1659 | 526 | ||
1660 | uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, | ||
1661 | uint32_t cur_validate_node) | ||
1662 | { | ||
1663 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
1664 | |||
1665 | if (likely(vmw_bo->on_validate_list)) | ||
1666 | return vmw_bo->cur_validate_node; | ||
1667 | |||
1668 | vmw_bo->cur_validate_node = cur_validate_node; | ||
1669 | vmw_bo->on_validate_list = true; | ||
1670 | |||
1671 | return cur_validate_node; | ||
1672 | } | ||
1673 | |||
1674 | void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo) | ||
1675 | { | ||
1676 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
1677 | |||
1678 | vmw_bo->on_validate_list = false; | ||
1679 | } | ||
1680 | |||
1681 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | 527 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
1682 | uint32_t handle, struct vmw_dma_buffer **out) | 528 | uint32_t handle, struct vmw_dma_buffer **out) |
1683 | { | 529 | { |
@@ -1706,6 +552,18 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | |||
1706 | return 0; | 552 | return 0; |
1707 | } | 553 | } |
1708 | 554 | ||
555 | int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | ||
556 | struct vmw_dma_buffer *dma_buf) | ||
557 | { | ||
558 | struct vmw_user_dma_buffer *user_bo; | ||
559 | |||
560 | if (dma_buf->base.destroy != vmw_user_dmabuf_destroy) | ||
561 | return -EINVAL; | ||
562 | |||
563 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); | ||
564 | return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); | ||
565 | } | ||
566 | |||
1709 | /* | 567 | /* |
1710 | * Stream management | 568 | * Stream management |
1711 | */ | 569 | */ |
@@ -1730,8 +588,8 @@ static int vmw_stream_init(struct vmw_private *dev_priv, | |||
1730 | struct vmw_resource *res = &stream->res; | 588 | struct vmw_resource *res = &stream->res; |
1731 | int ret; | 589 | int ret; |
1732 | 590 | ||
1733 | ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr, | 591 | ret = vmw_resource_init(dev_priv, res, false, res_free, |
1734 | VMW_RES_STREAM, false, res_free, NULL); | 592 | &vmw_stream_func); |
1735 | 593 | ||
1736 | if (unlikely(ret != 0)) { | 594 | if (unlikely(ret != 0)) { |
1737 | if (res_free == NULL) | 595 | if (res_free == NULL) |
@@ -1753,17 +611,13 @@ static int vmw_stream_init(struct vmw_private *dev_priv, | |||
1753 | return 0; | 611 | return 0; |
1754 | } | 612 | } |
1755 | 613 | ||
1756 | /** | ||
1757 | * User-space context management: | ||
1758 | */ | ||
1759 | |||
1760 | static void vmw_user_stream_free(struct vmw_resource *res) | 614 | static void vmw_user_stream_free(struct vmw_resource *res) |
1761 | { | 615 | { |
1762 | struct vmw_user_stream *stream = | 616 | struct vmw_user_stream *stream = |
1763 | container_of(res, struct vmw_user_stream, stream.res); | 617 | container_of(res, struct vmw_user_stream, stream.res); |
1764 | struct vmw_private *dev_priv = res->dev_priv; | 618 | struct vmw_private *dev_priv = res->dev_priv; |
1765 | 619 | ||
1766 | kfree(stream); | 620 | ttm_base_object_kfree(stream, base); |
1767 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | 621 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
1768 | vmw_user_stream_size); | 622 | vmw_user_stream_size); |
1769 | } | 623 | } |
@@ -1792,9 +646,11 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, | |||
1792 | struct vmw_user_stream *stream; | 646 | struct vmw_user_stream *stream; |
1793 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; | 647 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; |
1794 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 648 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
649 | struct idr *idr = &dev_priv->res_idr[vmw_res_stream]; | ||
1795 | int ret = 0; | 650 | int ret = 0; |
1796 | 651 | ||
1797 | res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id); | 652 | |
653 | res = vmw_resource_lookup(dev_priv, idr, arg->stream_id); | ||
1798 | if (unlikely(res == NULL)) | 654 | if (unlikely(res == NULL)) |
1799 | return -EINVAL; | 655 | return -EINVAL; |
1800 | 656 | ||
@@ -1895,7 +751,8 @@ int vmw_user_stream_lookup(struct vmw_private *dev_priv, | |||
1895 | struct vmw_resource *res; | 751 | struct vmw_resource *res; |
1896 | int ret; | 752 | int ret; |
1897 | 753 | ||
1898 | res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id); | 754 | res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream], |
755 | *inout_id); | ||
1899 | if (unlikely(res == NULL)) | 756 | if (unlikely(res == NULL)) |
1900 | return -EINVAL; | 757 | return -EINVAL; |
1901 | 758 | ||
@@ -1990,3 +847,453 @@ int vmw_dumb_destroy(struct drm_file *file_priv, | |||
1990 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | 847 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, |
1991 | handle, TTM_REF_USAGE); | 848 | handle, TTM_REF_USAGE); |
1992 | } | 849 | } |
850 | |||
851 | /** | ||
852 | * vmw_resource_buf_alloc - Allocate a backup buffer for a resource. | ||
853 | * | ||
854 | * @res: The resource for which to allocate a backup buffer. | ||
855 | * @interruptible: Whether any sleeps during allocation should be | ||
856 | * performed while interruptible. | ||
857 | */ | ||
858 | static int vmw_resource_buf_alloc(struct vmw_resource *res, | ||
859 | bool interruptible) | ||
860 | { | ||
861 | unsigned long size = | ||
862 | (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK; | ||
863 | struct vmw_dma_buffer *backup; | ||
864 | int ret; | ||
865 | |||
866 | if (likely(res->backup)) { | ||
867 | BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size); | ||
868 | return 0; | ||
869 | } | ||
870 | |||
871 | backup = kzalloc(sizeof(*backup), GFP_KERNEL); | ||
872 | if (unlikely(backup == NULL)) | ||
873 | return -ENOMEM; | ||
874 | |||
875 | ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, | ||
876 | res->func->backup_placement, | ||
877 | interruptible, | ||
878 | &vmw_dmabuf_bo_free); | ||
879 | if (unlikely(ret != 0)) | ||
880 | goto out_no_dmabuf; | ||
881 | |||
882 | res->backup = backup; | ||
883 | |||
884 | out_no_dmabuf: | ||
885 | return ret; | ||
886 | } | ||
887 | |||
888 | /** | ||
889 | * vmw_resource_do_validate - Make a resource up-to-date and visible | ||
890 | * to the device. | ||
891 | * | ||
892 | * @res: The resource to make visible to the device. | ||
893 | * @val_buf: Information about a buffer possibly | ||
894 | * containing backup data if a bind operation is needed. | ||
895 | * | ||
896 | * On hardware resource shortage, this function returns -EBUSY and | ||
897 | * should be retried once resources have been freed up. | ||
898 | */ | ||
899 | static int vmw_resource_do_validate(struct vmw_resource *res, | ||
900 | struct ttm_validate_buffer *val_buf) | ||
901 | { | ||
902 | int ret = 0; | ||
903 | const struct vmw_res_func *func = res->func; | ||
904 | |||
905 | if (unlikely(res->id == -1)) { | ||
906 | ret = func->create(res); | ||
907 | if (unlikely(ret != 0)) | ||
908 | return ret; | ||
909 | } | ||
910 | |||
911 | if (func->bind && | ||
912 | ((func->needs_backup && list_empty(&res->mob_head) && | ||
913 | val_buf->bo != NULL) || | ||
914 | (!func->needs_backup && val_buf->bo != NULL))) { | ||
915 | ret = func->bind(res, val_buf); | ||
916 | if (unlikely(ret != 0)) | ||
917 | goto out_bind_failed; | ||
918 | if (func->needs_backup) | ||
919 | list_add_tail(&res->mob_head, &res->backup->res_list); | ||
920 | } | ||
921 | |||
922 | /* | ||
923 | * Only do this on write operations, and move to | ||
924 | * vmw_resource_unreserve if it can be called after | ||
925 | * backup buffers have been unreserved. Otherwise | ||
926 | * sort out locking. | ||
927 | */ | ||
928 | res->res_dirty = true; | ||
929 | |||
930 | return 0; | ||
931 | |||
932 | out_bind_failed: | ||
933 | func->destroy(res); | ||
934 | |||
935 | return ret; | ||
936 | } | ||
937 | |||
938 | /** | ||
939 | * vmw_resource_unreserve - Unreserve a resource previously reserved for | ||
940 | * command submission. | ||
941 | * | ||
942 | * @res: Pointer to the struct vmw_resource to unreserve. | ||
943 | * @new_backup: Pointer to new backup buffer if command submission | ||
944 | * switched. | ||
945 | * @new_backup_offset: New backup offset if @new_backup is !NULL. | ||
946 | * | ||
947 | * Currently unreserving a resource means putting it back on the device's | ||
948 | * resource lru list, so that it can be evicted if necessary. | ||
949 | */ | ||
950 | void vmw_resource_unreserve(struct vmw_resource *res, | ||
951 | struct vmw_dma_buffer *new_backup, | ||
952 | unsigned long new_backup_offset) | ||
953 | { | ||
954 | struct vmw_private *dev_priv = res->dev_priv; | ||
955 | |||
956 | if (!list_empty(&res->lru_head)) | ||
957 | return; | ||
958 | |||
959 | if (new_backup && new_backup != res->backup) { | ||
960 | |||
961 | if (res->backup) { | ||
962 | BUG_ON(atomic_read(&res->backup->base.reserved) == 0); | ||
963 | list_del_init(&res->mob_head); | ||
964 | vmw_dmabuf_unreference(&res->backup); | ||
965 | } | ||
966 | |||
967 | res->backup = vmw_dmabuf_reference(new_backup); | ||
968 | BUG_ON(atomic_read(&new_backup->base.reserved) == 0); | ||
969 | list_add_tail(&res->mob_head, &new_backup->res_list); | ||
970 | } | ||
971 | if (new_backup) | ||
972 | res->backup_offset = new_backup_offset; | ||
973 | |||
974 | if (!res->func->may_evict) | ||
975 | return; | ||
976 | |||
977 | write_lock(&dev_priv->resource_lock); | ||
978 | list_add_tail(&res->lru_head, | ||
979 | &res->dev_priv->res_lru[res->func->res_type]); | ||
980 | write_unlock(&dev_priv->resource_lock); | ||
981 | } | ||
982 | |||
983 | /** | ||
984 | * vmw_resource_check_buffer - Check whether a backup buffer is needed | ||
985 | * for a resource and in that case, allocate | ||
986 | * one, reserve and validate it. | ||
987 | * | ||
988 | * @res: The resource for which to allocate a backup buffer. | ||
989 | * @interruptible: Whether any sleeps during allocation should be | ||
990 | * performed while interruptible. | ||
991 | * @val_buf: On successful return contains data about the | ||
992 | * reserved and validated backup buffer. | ||
993 | */ | ||
994 | int vmw_resource_check_buffer(struct vmw_resource *res, | ||
995 | bool interruptible, | ||
996 | struct ttm_validate_buffer *val_buf) | ||
997 | { | ||
998 | struct list_head val_list; | ||
999 | bool backup_dirty = false; | ||
1000 | int ret; | ||
1001 | |||
1002 | if (unlikely(res->backup == NULL)) { | ||
1003 | ret = vmw_resource_buf_alloc(res, interruptible); | ||
1004 | if (unlikely(ret != 0)) | ||
1005 | return ret; | ||
1006 | } | ||
1007 | |||
1008 | INIT_LIST_HEAD(&val_list); | ||
1009 | val_buf->bo = ttm_bo_reference(&res->backup->base); | ||
1010 | list_add_tail(&val_buf->head, &val_list); | ||
1011 | ret = ttm_eu_reserve_buffers(&val_list); | ||
1012 | if (unlikely(ret != 0)) | ||
1013 | goto out_no_reserve; | ||
1014 | |||
1015 | if (res->func->needs_backup && list_empty(&res->mob_head)) | ||
1016 | return 0; | ||
1017 | |||
1018 | backup_dirty = res->backup_dirty; | ||
1019 | ret = ttm_bo_validate(&res->backup->base, | ||
1020 | res->func->backup_placement, | ||
1021 | true, false); | ||
1022 | |||
1023 | if (unlikely(ret != 0)) | ||
1024 | goto out_no_validate; | ||
1025 | |||
1026 | return 0; | ||
1027 | |||
1028 | out_no_validate: | ||
1029 | ttm_eu_backoff_reservation(&val_list); | ||
1030 | out_no_reserve: | ||
1031 | ttm_bo_unref(&val_buf->bo); | ||
1032 | if (backup_dirty) | ||
1033 | vmw_dmabuf_unreference(&res->backup); | ||
1034 | |||
1035 | return ret; | ||
1036 | } | ||
1037 | |||
1038 | /** | ||
1039 | * vmw_resource_reserve - Reserve a resource for command submission | ||
1040 | * | ||
1041 | * @res: The resource to reserve. | ||
1042 | * | ||
1043 | * This function takes the resource off the LRU list and make sure | ||
1044 | * a backup buffer is present for guest-backed resources. However, | ||
1045 | * the buffer may not be bound to the resource at this point. | ||
1046 | * | ||
1047 | */ | ||
1048 | int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) | ||
1049 | { | ||
1050 | struct vmw_private *dev_priv = res->dev_priv; | ||
1051 | int ret; | ||
1052 | |||
1053 | write_lock(&dev_priv->resource_lock); | ||
1054 | list_del_init(&res->lru_head); | ||
1055 | write_unlock(&dev_priv->resource_lock); | ||
1056 | |||
1057 | if (res->func->needs_backup && res->backup == NULL && | ||
1058 | !no_backup) { | ||
1059 | ret = vmw_resource_buf_alloc(res, true); | ||
1060 | if (unlikely(ret != 0)) | ||
1061 | return ret; | ||
1062 | } | ||
1063 | |||
1064 | return 0; | ||
1065 | } | ||
1066 | |||
1067 | /** | ||
1068 | * vmw_resource_backoff_reservation - Unreserve and unreference a | ||
1069 | * backup buffer | ||
1070 | *. | ||
1071 | * @val_buf: Backup buffer information. | ||
1072 | */ | ||
1073 | void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) | ||
1074 | { | ||
1075 | struct list_head val_list; | ||
1076 | |||
1077 | if (likely(val_buf->bo == NULL)) | ||
1078 | return; | ||
1079 | |||
1080 | INIT_LIST_HEAD(&val_list); | ||
1081 | list_add_tail(&val_buf->head, &val_list); | ||
1082 | ttm_eu_backoff_reservation(&val_list); | ||
1083 | ttm_bo_unref(&val_buf->bo); | ||
1084 | } | ||
1085 | |||
1086 | /** | ||
1087 | * vmw_resource_do_evict - Evict a resource, and transfer its data | ||
1088 | * to a backup buffer. | ||
1089 | * | ||
1090 | * @res: The resource to evict. | ||
1091 | */ | ||
1092 | int vmw_resource_do_evict(struct vmw_resource *res) | ||
1093 | { | ||
1094 | struct ttm_validate_buffer val_buf; | ||
1095 | const struct vmw_res_func *func = res->func; | ||
1096 | int ret; | ||
1097 | |||
1098 | BUG_ON(!func->may_evict); | ||
1099 | |||
1100 | val_buf.bo = NULL; | ||
1101 | ret = vmw_resource_check_buffer(res, true, &val_buf); | ||
1102 | if (unlikely(ret != 0)) | ||
1103 | return ret; | ||
1104 | |||
1105 | if (unlikely(func->unbind != NULL && | ||
1106 | (!func->needs_backup || !list_empty(&res->mob_head)))) { | ||
1107 | ret = func->unbind(res, res->res_dirty, &val_buf); | ||
1108 | if (unlikely(ret != 0)) | ||
1109 | goto out_no_unbind; | ||
1110 | list_del_init(&res->mob_head); | ||
1111 | } | ||
1112 | ret = func->destroy(res); | ||
1113 | res->backup_dirty = true; | ||
1114 | res->res_dirty = false; | ||
1115 | out_no_unbind: | ||
1116 | vmw_resource_backoff_reservation(&val_buf); | ||
1117 | |||
1118 | return ret; | ||
1119 | } | ||
1120 | |||
1121 | |||
1122 | /** | ||
1123 | * vmw_resource_validate - Make a resource up-to-date and visible | ||
1124 | * to the device. | ||
1125 | * | ||
1126 | * @res: The resource to make visible to the device. | ||
1127 | * | ||
1128 | * On succesful return, any backup DMA buffer pointed to by @res->backup will | ||
1129 | * be reserved and validated. | ||
1130 | * On hardware resource shortage, this function will repeatedly evict | ||
1131 | * resources of the same type until the validation succeeds. | ||
1132 | */ | ||
1133 | int vmw_resource_validate(struct vmw_resource *res) | ||
1134 | { | ||
1135 | int ret; | ||
1136 | struct vmw_resource *evict_res; | ||
1137 | struct vmw_private *dev_priv = res->dev_priv; | ||
1138 | struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; | ||
1139 | struct ttm_validate_buffer val_buf; | ||
1140 | |||
1141 | if (likely(!res->func->may_evict)) | ||
1142 | return 0; | ||
1143 | |||
1144 | val_buf.bo = NULL; | ||
1145 | if (res->backup) | ||
1146 | val_buf.bo = &res->backup->base; | ||
1147 | do { | ||
1148 | ret = vmw_resource_do_validate(res, &val_buf); | ||
1149 | if (likely(ret != -EBUSY)) | ||
1150 | break; | ||
1151 | |||
1152 | write_lock(&dev_priv->resource_lock); | ||
1153 | if (list_empty(lru_list) || !res->func->may_evict) { | ||
1154 | DRM_ERROR("Out of device device id entries " | ||
1155 | "for %s.\n", res->func->type_name); | ||
1156 | ret = -EBUSY; | ||
1157 | write_unlock(&dev_priv->resource_lock); | ||
1158 | break; | ||
1159 | } | ||
1160 | |||
1161 | evict_res = vmw_resource_reference | ||
1162 | (list_first_entry(lru_list, struct vmw_resource, | ||
1163 | lru_head)); | ||
1164 | list_del_init(&evict_res->lru_head); | ||
1165 | |||
1166 | write_unlock(&dev_priv->resource_lock); | ||
1167 | vmw_resource_do_evict(evict_res); | ||
1168 | vmw_resource_unreference(&evict_res); | ||
1169 | } while (1); | ||
1170 | |||
1171 | if (unlikely(ret != 0)) | ||
1172 | goto out_no_validate; | ||
1173 | else if (!res->func->needs_backup && res->backup) { | ||
1174 | list_del_init(&res->mob_head); | ||
1175 | vmw_dmabuf_unreference(&res->backup); | ||
1176 | } | ||
1177 | |||
1178 | return 0; | ||
1179 | |||
1180 | out_no_validate: | ||
1181 | return ret; | ||
1182 | } | ||
1183 | |||
1184 | /** | ||
1185 | * vmw_fence_single_bo - Utility function to fence a single TTM buffer | ||
1186 | * object without unreserving it. | ||
1187 | * | ||
1188 | * @bo: Pointer to the struct ttm_buffer_object to fence. | ||
1189 | * @fence: Pointer to the fence. If NULL, this function will | ||
1190 | * insert a fence into the command stream.. | ||
1191 | * | ||
1192 | * Contrary to the ttm_eu version of this function, it takes only | ||
1193 | * a single buffer object instead of a list, and it also doesn't | ||
1194 | * unreserve the buffer object, which needs to be done separately. | ||
1195 | */ | ||
1196 | void vmw_fence_single_bo(struct ttm_buffer_object *bo, | ||
1197 | struct vmw_fence_obj *fence) | ||
1198 | { | ||
1199 | struct ttm_bo_device *bdev = bo->bdev; | ||
1200 | struct ttm_bo_driver *driver = bdev->driver; | ||
1201 | struct vmw_fence_obj *old_fence_obj; | ||
1202 | struct vmw_private *dev_priv = | ||
1203 | container_of(bdev, struct vmw_private, bdev); | ||
1204 | |||
1205 | if (fence == NULL) | ||
1206 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | ||
1207 | else | ||
1208 | driver->sync_obj_ref(fence); | ||
1209 | |||
1210 | spin_lock(&bdev->fence_lock); | ||
1211 | |||
1212 | old_fence_obj = bo->sync_obj; | ||
1213 | bo->sync_obj = fence; | ||
1214 | |||
1215 | spin_unlock(&bdev->fence_lock); | ||
1216 | |||
1217 | if (old_fence_obj) | ||
1218 | vmw_fence_obj_unreference(&old_fence_obj); | ||
1219 | } | ||
1220 | |||
1221 | /** | ||
1222 | * vmw_resource_move_notify - TTM move_notify_callback | ||
1223 | * | ||
1224 | * @bo: The TTM buffer object about to move. | ||
1225 | * @mem: The truct ttm_mem_reg indicating to what memory | ||
1226 | * region the move is taking place. | ||
1227 | * | ||
1228 | * For now does nothing. | ||
1229 | */ | ||
1230 | void vmw_resource_move_notify(struct ttm_buffer_object *bo, | ||
1231 | struct ttm_mem_reg *mem) | ||
1232 | { | ||
1233 | } | ||
1234 | |||
1235 | /** | ||
1236 | * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. | ||
1237 | * | ||
1238 | * @res: The resource being queried. | ||
1239 | */ | ||
1240 | bool vmw_resource_needs_backup(const struct vmw_resource *res) | ||
1241 | { | ||
1242 | return res->func->needs_backup; | ||
1243 | } | ||
1244 | |||
1245 | /** | ||
1246 | * vmw_resource_evict_type - Evict all resources of a specific type | ||
1247 | * | ||
1248 | * @dev_priv: Pointer to a device private struct | ||
1249 | * @type: The resource type to evict | ||
1250 | * | ||
1251 | * To avoid thrashing starvation or as part of the hibernation sequence, | ||
1252 | * evict all evictable resources of a specific type. | ||
1253 | */ | ||
1254 | static void vmw_resource_evict_type(struct vmw_private *dev_priv, | ||
1255 | enum vmw_res_type type) | ||
1256 | { | ||
1257 | struct list_head *lru_list = &dev_priv->res_lru[type]; | ||
1258 | struct vmw_resource *evict_res; | ||
1259 | |||
1260 | do { | ||
1261 | write_lock(&dev_priv->resource_lock); | ||
1262 | |||
1263 | if (list_empty(lru_list)) | ||
1264 | goto out_unlock; | ||
1265 | |||
1266 | evict_res = vmw_resource_reference( | ||
1267 | list_first_entry(lru_list, struct vmw_resource, | ||
1268 | lru_head)); | ||
1269 | list_del_init(&evict_res->lru_head); | ||
1270 | write_unlock(&dev_priv->resource_lock); | ||
1271 | vmw_resource_do_evict(evict_res); | ||
1272 | vmw_resource_unreference(&evict_res); | ||
1273 | } while (1); | ||
1274 | |||
1275 | out_unlock: | ||
1276 | write_unlock(&dev_priv->resource_lock); | ||
1277 | } | ||
1278 | |||
1279 | /** | ||
1280 | * vmw_resource_evict_all - Evict all evictable resources | ||
1281 | * | ||
1282 | * @dev_priv: Pointer to a device private struct | ||
1283 | * | ||
1284 | * To avoid thrashing starvation or as part of the hibernation sequence, | ||
1285 | * evict all evictable resources. In particular this means that all | ||
1286 | * guest-backed resources that are registered with the device are | ||
1287 | * evicted and the OTable becomes clean. | ||
1288 | */ | ||
1289 | void vmw_resource_evict_all(struct vmw_private *dev_priv) | ||
1290 | { | ||
1291 | enum vmw_res_type type; | ||
1292 | |||
1293 | mutex_lock(&dev_priv->cmdbuf_mutex); | ||
1294 | |||
1295 | for (type = 0; type < vmw_res_max; ++type) | ||
1296 | vmw_resource_evict_type(dev_priv, type); | ||
1297 | |||
1298 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
1299 | } | ||