diff options
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 32 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 144 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 898 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 1506 |
6 files changed, 1783 insertions, 811 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c index bd78257cba8b..655d57f188d9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | |||
@@ -60,7 +60,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, | |||
60 | if (unlikely(ret != 0)) | 60 | if (unlikely(ret != 0)) |
61 | return ret; | 61 | return ret; |
62 | 62 | ||
63 | vmw_execbuf_release_pinned_bo(dev_priv, false, 0); | 63 | vmw_execbuf_release_pinned_bo(dev_priv); |
64 | 64 | ||
65 | ret = ttm_bo_reserve(bo, interruptible, false, false, 0); | 65 | ret = ttm_bo_reserve(bo, interruptible, false, false, 0); |
66 | if (unlikely(ret != 0)) | 66 | if (unlikely(ret != 0)) |
@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, | |||
105 | return ret; | 105 | return ret; |
106 | 106 | ||
107 | if (pin) | 107 | if (pin) |
108 | vmw_execbuf_release_pinned_bo(dev_priv, false, 0); | 108 | vmw_execbuf_release_pinned_bo(dev_priv); |
109 | 109 | ||
110 | ret = ttm_bo_reserve(bo, interruptible, false, false, 0); | 110 | ret = ttm_bo_reserve(bo, interruptible, false, false, 0); |
111 | if (unlikely(ret != 0)) | 111 | if (unlikely(ret != 0)) |
@@ -214,8 +214,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, | |||
214 | return ret; | 214 | return ret; |
215 | 215 | ||
216 | if (pin) | 216 | if (pin) |
217 | vmw_execbuf_release_pinned_bo(dev_priv, false, 0); | 217 | vmw_execbuf_release_pinned_bo(dev_priv); |
218 | |||
219 | ret = ttm_bo_reserve(bo, interruptible, false, false, 0); | 218 | ret = ttm_bo_reserve(bo, interruptible, false, false, 0); |
220 | if (unlikely(ret != 0)) | 219 | if (unlikely(ret != 0)) |
221 | goto err_unlock; | 220 | goto err_unlock; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 56973cd41735..91581fd5004b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -432,6 +432,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
432 | struct vmw_private *dev_priv; | 432 | struct vmw_private *dev_priv; |
433 | int ret; | 433 | int ret; |
434 | uint32_t svga_id; | 434 | uint32_t svga_id; |
435 | enum vmw_res_type i; | ||
435 | 436 | ||
436 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | 437 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); |
437 | if (unlikely(dev_priv == NULL)) { | 438 | if (unlikely(dev_priv == NULL)) { |
@@ -448,15 +449,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
448 | mutex_init(&dev_priv->cmdbuf_mutex); | 449 | mutex_init(&dev_priv->cmdbuf_mutex); |
449 | mutex_init(&dev_priv->release_mutex); | 450 | mutex_init(&dev_priv->release_mutex); |
450 | rwlock_init(&dev_priv->resource_lock); | 451 | rwlock_init(&dev_priv->resource_lock); |
451 | idr_init(&dev_priv->context_idr); | 452 | |
452 | idr_init(&dev_priv->surface_idr); | 453 | for (i = vmw_res_context; i < vmw_res_max; ++i) { |
453 | idr_init(&dev_priv->stream_idr); | 454 | idr_init(&dev_priv->res_idr[i]); |
455 | INIT_LIST_HEAD(&dev_priv->res_lru[i]); | ||
456 | } | ||
457 | |||
454 | mutex_init(&dev_priv->init_mutex); | 458 | mutex_init(&dev_priv->init_mutex); |
455 | init_waitqueue_head(&dev_priv->fence_queue); | 459 | init_waitqueue_head(&dev_priv->fence_queue); |
456 | init_waitqueue_head(&dev_priv->fifo_queue); | 460 | init_waitqueue_head(&dev_priv->fifo_queue); |
457 | dev_priv->fence_queue_waiters = 0; | 461 | dev_priv->fence_queue_waiters = 0; |
458 | atomic_set(&dev_priv->fifo_queue_waiters, 0); | 462 | atomic_set(&dev_priv->fifo_queue_waiters, 0); |
459 | INIT_LIST_HEAD(&dev_priv->surface_lru); | 463 | |
460 | dev_priv->used_memory_size = 0; | 464 | dev_priv->used_memory_size = 0; |
461 | 465 | ||
462 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); | 466 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); |
@@ -670,9 +674,9 @@ out_err2: | |||
670 | out_err1: | 674 | out_err1: |
671 | vmw_ttm_global_release(dev_priv); | 675 | vmw_ttm_global_release(dev_priv); |
672 | out_err0: | 676 | out_err0: |
673 | idr_destroy(&dev_priv->surface_idr); | 677 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
674 | idr_destroy(&dev_priv->context_idr); | 678 | idr_destroy(&dev_priv->res_idr[i]); |
675 | idr_destroy(&dev_priv->stream_idr); | 679 | |
676 | kfree(dev_priv); | 680 | kfree(dev_priv); |
677 | return ret; | 681 | return ret; |
678 | } | 682 | } |
@@ -680,9 +684,12 @@ out_err0: | |||
680 | static int vmw_driver_unload(struct drm_device *dev) | 684 | static int vmw_driver_unload(struct drm_device *dev) |
681 | { | 685 | { |
682 | struct vmw_private *dev_priv = vmw_priv(dev); | 686 | struct vmw_private *dev_priv = vmw_priv(dev); |
687 | enum vmw_res_type i; | ||
683 | 688 | ||
684 | unregister_pm_notifier(&dev_priv->pm_nb); | 689 | unregister_pm_notifier(&dev_priv->pm_nb); |
685 | 690 | ||
691 | if (dev_priv->ctx.res_ht_initialized) | ||
692 | drm_ht_remove(&dev_priv->ctx.res_ht); | ||
686 | if (dev_priv->ctx.cmd_bounce) | 693 | if (dev_priv->ctx.cmd_bounce) |
687 | vfree(dev_priv->ctx.cmd_bounce); | 694 | vfree(dev_priv->ctx.cmd_bounce); |
688 | if (dev_priv->enable_fb) { | 695 | if (dev_priv->enable_fb) { |
@@ -709,9 +716,9 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
709 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | 716 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
710 | (void)ttm_bo_device_release(&dev_priv->bdev); | 717 | (void)ttm_bo_device_release(&dev_priv->bdev); |
711 | vmw_ttm_global_release(dev_priv); | 718 | vmw_ttm_global_release(dev_priv); |
712 | idr_destroy(&dev_priv->surface_idr); | 719 | |
713 | idr_destroy(&dev_priv->context_idr); | 720 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
714 | idr_destroy(&dev_priv->stream_idr); | 721 | idr_destroy(&dev_priv->res_idr[i]); |
715 | 722 | ||
716 | kfree(dev_priv); | 723 | kfree(dev_priv); |
717 | 724 | ||
@@ -935,7 +942,7 @@ static void vmw_master_drop(struct drm_device *dev, | |||
935 | 942 | ||
936 | vmw_fp->locked_master = drm_master_get(file_priv->master); | 943 | vmw_fp->locked_master = drm_master_get(file_priv->master); |
937 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); | 944 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); |
938 | vmw_execbuf_release_pinned_bo(dev_priv, false, 0); | 945 | vmw_execbuf_release_pinned_bo(dev_priv); |
939 | 946 | ||
940 | if (unlikely((ret != 0))) { | 947 | if (unlikely((ret != 0))) { |
941 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); | 948 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); |
@@ -987,7 +994,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | |||
987 | * This empties VRAM and unbinds all GMR bindings. | 994 | * This empties VRAM and unbinds all GMR bindings. |
988 | * Buffer contents is moved to swappable memory. | 995 | * Buffer contents is moved to swappable memory. |
989 | */ | 996 | */ |
990 | vmw_execbuf_release_pinned_bo(dev_priv, false, 0); | 997 | vmw_execbuf_release_pinned_bo(dev_priv); |
998 | vmw_resource_evict_all(dev_priv); | ||
991 | ttm_bo_swapout_all(&dev_priv->bdev); | 999 | ttm_bo_swapout_all(&dev_priv->bdev); |
992 | 1000 | ||
993 | break; | 1001 | break; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 7c6f6e3a3c81..34dce9e2b6d2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -67,31 +67,46 @@ struct vmw_fpriv { | |||
67 | 67 | ||
68 | struct vmw_dma_buffer { | 68 | struct vmw_dma_buffer { |
69 | struct ttm_buffer_object base; | 69 | struct ttm_buffer_object base; |
70 | struct list_head validate_list; | 70 | struct list_head res_list; |
71 | bool gmr_bound; | ||
72 | uint32_t cur_validate_node; | ||
73 | bool on_validate_list; | ||
74 | }; | 71 | }; |
75 | 72 | ||
73 | /** | ||
74 | * struct vmw_validate_buffer - Carries validation info about buffers. | ||
75 | * | ||
76 | * @base: Validation info for TTM. | ||
77 | * @hash: Hash entry for quick lookup of the TTM buffer object. | ||
78 | * | ||
79 | * This structure contains also driver private validation info | ||
80 | * on top of the info needed by TTM. | ||
81 | */ | ||
82 | struct vmw_validate_buffer { | ||
83 | struct ttm_validate_buffer base; | ||
84 | struct drm_hash_item hash; | ||
85 | }; | ||
86 | |||
87 | struct vmw_res_func; | ||
76 | struct vmw_resource { | 88 | struct vmw_resource { |
77 | struct kref kref; | 89 | struct kref kref; |
78 | struct vmw_private *dev_priv; | 90 | struct vmw_private *dev_priv; |
79 | struct idr *idr; | ||
80 | int id; | 91 | int id; |
81 | enum ttm_object_type res_type; | ||
82 | bool avail; | 92 | bool avail; |
83 | void (*remove_from_lists) (struct vmw_resource *res); | 93 | unsigned long backup_size; |
84 | void (*hw_destroy) (struct vmw_resource *res); | 94 | bool res_dirty; /* Protected by backup buffer reserved */ |
95 | bool backup_dirty; /* Protected by backup buffer reserved */ | ||
96 | struct vmw_dma_buffer *backup; | ||
97 | unsigned long backup_offset; | ||
98 | const struct vmw_res_func *func; | ||
99 | struct list_head lru_head; /* Protected by the resource lock */ | ||
100 | struct list_head mob_head; /* Protected by @backup reserved */ | ||
85 | void (*res_free) (struct vmw_resource *res); | 101 | void (*res_free) (struct vmw_resource *res); |
86 | struct list_head validate_head; | 102 | void (*hw_destroy) (struct vmw_resource *res); |
87 | struct list_head query_head; /* Protected by the cmdbuf mutex */ | 103 | }; |
88 | /* TODO is a generic snooper needed? */ | 104 | |
89 | #if 0 | 105 | enum vmw_res_type { |
90 | void (*snoop)(struct vmw_resource *res, | 106 | vmw_res_context, |
91 | struct ttm_object_file *tfile, | 107 | vmw_res_surface, |
92 | SVGA3dCmdHeader *header); | 108 | vmw_res_stream, |
93 | void *snoop_priv; | 109 | vmw_res_max |
94 | #endif | ||
95 | }; | 110 | }; |
96 | 111 | ||
97 | struct vmw_cursor_snooper { | 112 | struct vmw_cursor_snooper { |
@@ -105,20 +120,18 @@ struct vmw_surface_offset; | |||
105 | 120 | ||
106 | struct vmw_surface { | 121 | struct vmw_surface { |
107 | struct vmw_resource res; | 122 | struct vmw_resource res; |
108 | struct list_head lru_head; /* Protected by the resource lock */ | ||
109 | uint32_t flags; | 123 | uint32_t flags; |
110 | uint32_t format; | 124 | uint32_t format; |
111 | uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; | 125 | uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; |
126 | struct drm_vmw_size base_size; | ||
112 | struct drm_vmw_size *sizes; | 127 | struct drm_vmw_size *sizes; |
113 | uint32_t num_sizes; | 128 | uint32_t num_sizes; |
114 | |||
115 | bool scanout; | 129 | bool scanout; |
116 | |||
117 | /* TODO so far just a extra pointer */ | 130 | /* TODO so far just a extra pointer */ |
118 | struct vmw_cursor_snooper snooper; | 131 | struct vmw_cursor_snooper snooper; |
119 | struct ttm_buffer_object *backup; | ||
120 | struct vmw_surface_offset *offsets; | 132 | struct vmw_surface_offset *offsets; |
121 | uint32_t backup_size; | 133 | SVGA3dTextureFilter autogen_filter; |
134 | uint32_t multisample_count; | ||
122 | }; | 135 | }; |
123 | 136 | ||
124 | struct vmw_marker_queue { | 137 | struct vmw_marker_queue { |
@@ -145,29 +158,46 @@ struct vmw_relocation { | |||
145 | uint32_t index; | 158 | uint32_t index; |
146 | }; | 159 | }; |
147 | 160 | ||
161 | /** | ||
162 | * struct vmw_res_cache_entry - resource information cache entry | ||
163 | * | ||
164 | * @valid: Whether the entry is valid, which also implies that the execbuf | ||
165 | * code holds a reference to the resource, and it's placed on the | ||
166 | * validation list. | ||
167 | * @handle: User-space handle of a resource. | ||
168 | * @res: Non-ref-counted pointer to the resource. | ||
169 | * | ||
170 | * Used to avoid frequent repeated user-space handle lookups of the | ||
171 | * same resource. | ||
172 | */ | ||
173 | struct vmw_res_cache_entry { | ||
174 | bool valid; | ||
175 | uint32_t handle; | ||
176 | struct vmw_resource *res; | ||
177 | struct vmw_resource_val_node *node; | ||
178 | }; | ||
179 | |||
148 | struct vmw_sw_context{ | 180 | struct vmw_sw_context{ |
149 | struct ida bo_list; | 181 | struct drm_open_hash res_ht; |
150 | uint32_t last_cid; | 182 | bool res_ht_initialized; |
151 | bool cid_valid; | ||
152 | bool kernel; /**< is the called made from the kernel */ | 183 | bool kernel; /**< is the called made from the kernel */ |
153 | struct vmw_resource *cur_ctx; | ||
154 | uint32_t last_sid; | ||
155 | uint32_t sid_translation; | ||
156 | bool sid_valid; | ||
157 | struct ttm_object_file *tfile; | 184 | struct ttm_object_file *tfile; |
158 | struct list_head validate_nodes; | 185 | struct list_head validate_nodes; |
159 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; | 186 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; |
160 | uint32_t cur_reloc; | 187 | uint32_t cur_reloc; |
161 | struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; | 188 | struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; |
162 | uint32_t cur_val_buf; | 189 | uint32_t cur_val_buf; |
163 | uint32_t *cmd_bounce; | 190 | uint32_t *cmd_bounce; |
164 | uint32_t cmd_bounce_size; | 191 | uint32_t cmd_bounce_size; |
165 | struct list_head resource_list; | 192 | struct list_head resource_list; |
166 | uint32_t fence_flags; | 193 | uint32_t fence_flags; |
167 | struct list_head query_list; | ||
168 | struct ttm_buffer_object *cur_query_bo; | 194 | struct ttm_buffer_object *cur_query_bo; |
169 | uint32_t cur_query_cid; | 195 | struct list_head res_relocations; |
170 | bool query_cid_valid; | 196 | uint32_t *buf_start; |
197 | struct vmw_res_cache_entry res_cache[vmw_res_max]; | ||
198 | struct vmw_resource *last_query_ctx; | ||
199 | bool needs_post_query_barrier; | ||
200 | struct vmw_resource *error_resource; | ||
171 | }; | 201 | }; |
172 | 202 | ||
173 | struct vmw_legacy_display; | 203 | struct vmw_legacy_display; |
@@ -242,10 +272,7 @@ struct vmw_private { | |||
242 | */ | 272 | */ |
243 | 273 | ||
244 | rwlock_t resource_lock; | 274 | rwlock_t resource_lock; |
245 | struct idr context_idr; | 275 | struct idr res_idr[vmw_res_max]; |
246 | struct idr surface_idr; | ||
247 | struct idr stream_idr; | ||
248 | |||
249 | /* | 276 | /* |
250 | * Block lastclose from racing with firstopen. | 277 | * Block lastclose from racing with firstopen. |
251 | */ | 278 | */ |
@@ -320,6 +347,7 @@ struct vmw_private { | |||
320 | struct ttm_buffer_object *dummy_query_bo; | 347 | struct ttm_buffer_object *dummy_query_bo; |
321 | struct ttm_buffer_object *pinned_bo; | 348 | struct ttm_buffer_object *pinned_bo; |
322 | uint32_t query_cid; | 349 | uint32_t query_cid; |
350 | uint32_t query_cid_valid; | ||
323 | bool dummy_query_bo_pinned; | 351 | bool dummy_query_bo_pinned; |
324 | 352 | ||
325 | /* | 353 | /* |
@@ -329,10 +357,15 @@ struct vmw_private { | |||
329 | * protected by the cmdbuf mutex for simplicity. | 357 | * protected by the cmdbuf mutex for simplicity. |
330 | */ | 358 | */ |
331 | 359 | ||
332 | struct list_head surface_lru; | 360 | struct list_head res_lru[vmw_res_max]; |
333 | uint32_t used_memory_size; | 361 | uint32_t used_memory_size; |
334 | }; | 362 | }; |
335 | 363 | ||
364 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) | ||
365 | { | ||
366 | return container_of(res, struct vmw_surface, res); | ||
367 | } | ||
368 | |||
336 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) | 369 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) |
337 | { | 370 | { |
338 | return (struct vmw_private *)dev->dev_private; | 371 | return (struct vmw_private *)dev->dev_private; |
@@ -381,10 +414,16 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); | |||
381 | /** | 414 | /** |
382 | * Resource utilities - vmwgfx_resource.c | 415 | * Resource utilities - vmwgfx_resource.c |
383 | */ | 416 | */ |
417 | struct vmw_user_resource_conv; | ||
418 | extern const struct vmw_user_resource_conv *user_surface_converter; | ||
419 | extern const struct vmw_user_resource_conv *user_context_converter; | ||
384 | 420 | ||
385 | extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); | 421 | extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); |
386 | extern void vmw_resource_unreference(struct vmw_resource **p_res); | 422 | extern void vmw_resource_unreference(struct vmw_resource **p_res); |
387 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); | 423 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); |
424 | extern int vmw_resource_validate(struct vmw_resource *res); | ||
425 | extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); | ||
426 | extern bool vmw_resource_needs_backup(const struct vmw_resource *res); | ||
388 | extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | 427 | extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, |
389 | struct drm_file *file_priv); | 428 | struct drm_file *file_priv); |
390 | extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, | 429 | extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, |
@@ -398,14 +437,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, | |||
398 | uint32_t handle, | 437 | uint32_t handle, |
399 | struct vmw_surface **out_surf, | 438 | struct vmw_surface **out_surf, |
400 | struct vmw_dma_buffer **out_buf); | 439 | struct vmw_dma_buffer **out_buf); |
440 | extern int vmw_user_resource_lookup_handle( | ||
441 | struct vmw_private *dev_priv, | ||
442 | struct ttm_object_file *tfile, | ||
443 | uint32_t handle, | ||
444 | const struct vmw_user_resource_conv *converter, | ||
445 | struct vmw_resource **p_res); | ||
401 | extern void vmw_surface_res_free(struct vmw_resource *res); | 446 | extern void vmw_surface_res_free(struct vmw_resource *res); |
402 | extern int vmw_surface_init(struct vmw_private *dev_priv, | ||
403 | struct vmw_surface *srf, | ||
404 | void (*res_free) (struct vmw_resource *res)); | ||
405 | extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, | ||
406 | struct ttm_object_file *tfile, | ||
407 | uint32_t handle, | ||
408 | struct vmw_surface **out); | ||
409 | extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | 447 | extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, |
410 | struct drm_file *file_priv); | 448 | struct drm_file *file_priv); |
411 | extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | 449 | extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, |
@@ -440,7 +478,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, | |||
440 | struct ttm_object_file *tfile, | 478 | struct ttm_object_file *tfile, |
441 | uint32_t *inout_id, | 479 | uint32_t *inout_id, |
442 | struct vmw_resource **out); | 480 | struct vmw_resource **out); |
443 | extern void vmw_resource_unreserve(struct list_head *list); | 481 | extern void vmw_resource_unreserve(struct vmw_resource *res, |
482 | struct vmw_dma_buffer *new_backup, | ||
483 | unsigned long new_backup_offset); | ||
484 | extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, | ||
485 | struct ttm_mem_reg *mem); | ||
486 | extern void vmw_fence_single_bo(struct ttm_buffer_object *bo, | ||
487 | struct vmw_fence_obj *fence); | ||
488 | extern void vmw_resource_evict_all(struct vmw_private *dev_priv); | ||
444 | 489 | ||
445 | /** | 490 | /** |
446 | * DMA buffer helper routines - vmwgfx_dmabuf.c | 491 | * DMA buffer helper routines - vmwgfx_dmabuf.c |
@@ -538,10 +583,9 @@ extern int vmw_execbuf_process(struct drm_file *file_priv, | |||
538 | struct drm_vmw_fence_rep __user | 583 | struct drm_vmw_fence_rep __user |
539 | *user_fence_rep, | 584 | *user_fence_rep, |
540 | struct vmw_fence_obj **out_fence); | 585 | struct vmw_fence_obj **out_fence); |
541 | 586 | extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | |
542 | extern void | 587 | struct vmw_fence_obj *fence); |
543 | vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | 588 | extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv); |
544 | bool only_on_cid_match, uint32_t cid); | ||
545 | 589 | ||
546 | extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, | 590 | extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, |
547 | struct vmw_private *dev_priv, | 591 | struct vmw_private *dev_priv, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index e5775a0db495..534c96703c3f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -30,6 +30,181 @@ | |||
30 | #include <drm/ttm/ttm_bo_api.h> | 30 | #include <drm/ttm/ttm_bo_api.h> |
31 | #include <drm/ttm/ttm_placement.h> | 31 | #include <drm/ttm/ttm_placement.h> |
32 | 32 | ||
33 | #define VMW_RES_HT_ORDER 12 | ||
34 | |||
35 | /** | ||
36 | * struct vmw_resource_relocation - Relocation info for resources | ||
37 | * | ||
38 | * @head: List head for the software context's relocation list. | ||
39 | * @res: Non-ref-counted pointer to the resource. | ||
40 | * @offset: Offset of 4 byte entries into the command buffer where the | ||
41 | * id that needs fixup is located. | ||
42 | */ | ||
43 | struct vmw_resource_relocation { | ||
44 | struct list_head head; | ||
45 | const struct vmw_resource *res; | ||
46 | unsigned long offset; | ||
47 | }; | ||
48 | |||
49 | /** | ||
50 | * struct vmw_resource_val_node - Validation info for resources | ||
51 | * | ||
52 | * @head: List head for the software context's resource list. | ||
53 | * @hash: Hash entry for quick resouce to val_node lookup. | ||
54 | * @res: Ref-counted pointer to the resource. | ||
55 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. | ||
56 | * @new_backup: Refcounted pointer to the new backup buffer. | ||
57 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. | ||
58 | * @first_usage: Set to true the first time the resource is referenced in | ||
59 | * the command stream. | ||
60 | * @no_buffer_needed: Resources do not need to allocate buffer backup on | ||
61 | * reservation. The command stream will provide one. | ||
62 | */ | ||
63 | struct vmw_resource_val_node { | ||
64 | struct list_head head; | ||
65 | struct drm_hash_item hash; | ||
66 | struct vmw_resource *res; | ||
67 | struct vmw_dma_buffer *new_backup; | ||
68 | unsigned long new_backup_offset; | ||
69 | bool first_usage; | ||
70 | bool no_buffer_needed; | ||
71 | }; | ||
72 | |||
73 | /** | ||
74 | * vmw_resource_unreserve - unreserve resources previously reserved for | ||
75 | * command submission. | ||
76 | * | ||
77 | * @list_head: list of resources to unreserve. | ||
78 | * @backoff: Whether command submission failed. | ||
79 | */ | ||
80 | static void vmw_resource_list_unreserve(struct list_head *list, | ||
81 | bool backoff) | ||
82 | { | ||
83 | struct vmw_resource_val_node *val; | ||
84 | |||
85 | list_for_each_entry(val, list, head) { | ||
86 | struct vmw_resource *res = val->res; | ||
87 | struct vmw_dma_buffer *new_backup = | ||
88 | backoff ? NULL : val->new_backup; | ||
89 | |||
90 | vmw_resource_unreserve(res, new_backup, | ||
91 | val->new_backup_offset); | ||
92 | vmw_dmabuf_unreference(&val->new_backup); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | |||
97 | /** | ||
98 | * vmw_resource_val_add - Add a resource to the software context's | ||
99 | * resource list if it's not already on it. | ||
100 | * | ||
101 | * @sw_context: Pointer to the software context. | ||
102 | * @res: Pointer to the resource. | ||
103 | * @p_node On successful return points to a valid pointer to a | ||
104 | * struct vmw_resource_val_node, if non-NULL on entry. | ||
105 | */ | ||
106 | static int vmw_resource_val_add(struct vmw_sw_context *sw_context, | ||
107 | struct vmw_resource *res, | ||
108 | struct vmw_resource_val_node **p_node) | ||
109 | { | ||
110 | struct vmw_resource_val_node *node; | ||
111 | struct drm_hash_item *hash; | ||
112 | int ret; | ||
113 | |||
114 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res, | ||
115 | &hash) == 0)) { | ||
116 | node = container_of(hash, struct vmw_resource_val_node, hash); | ||
117 | node->first_usage = false; | ||
118 | if (unlikely(p_node != NULL)) | ||
119 | *p_node = node; | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | node = kzalloc(sizeof(*node), GFP_KERNEL); | ||
124 | if (unlikely(node == NULL)) { | ||
125 | DRM_ERROR("Failed to allocate a resource validation " | ||
126 | "entry.\n"); | ||
127 | return -ENOMEM; | ||
128 | } | ||
129 | |||
130 | node->hash.key = (unsigned long) res; | ||
131 | ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash); | ||
132 | if (unlikely(ret != 0)) { | ||
133 | DRM_ERROR("Failed to initialize a resource validation " | ||
134 | "entry.\n"); | ||
135 | kfree(node); | ||
136 | return ret; | ||
137 | } | ||
138 | list_add_tail(&node->head, &sw_context->resource_list); | ||
139 | node->res = vmw_resource_reference(res); | ||
140 | node->first_usage = true; | ||
141 | |||
142 | if (unlikely(p_node != NULL)) | ||
143 | *p_node = node; | ||
144 | |||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | /** | ||
149 | * vmw_resource_relocation_add - Add a relocation to the relocation list | ||
150 | * | ||
151 | * @list: Pointer to head of relocation list. | ||
152 | * @res: The resource. | ||
153 | * @offset: Offset into the command buffer currently being parsed where the | ||
154 | * id that needs fixup is located. Granularity is 4 bytes. | ||
155 | */ | ||
156 | static int vmw_resource_relocation_add(struct list_head *list, | ||
157 | const struct vmw_resource *res, | ||
158 | unsigned long offset) | ||
159 | { | ||
160 | struct vmw_resource_relocation *rel; | ||
161 | |||
162 | rel = kmalloc(sizeof(*rel), GFP_KERNEL); | ||
163 | if (unlikely(rel == NULL)) { | ||
164 | DRM_ERROR("Failed to allocate a resource relocation.\n"); | ||
165 | return -ENOMEM; | ||
166 | } | ||
167 | |||
168 | rel->res = res; | ||
169 | rel->offset = offset; | ||
170 | list_add_tail(&rel->head, list); | ||
171 | |||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * vmw_resource_relocations_free - Free all relocations on a list | ||
177 | * | ||
178 | * @list: Pointer to the head of the relocation list. | ||
179 | */ | ||
180 | static void vmw_resource_relocations_free(struct list_head *list) | ||
181 | { | ||
182 | struct vmw_resource_relocation *rel, *n; | ||
183 | |||
184 | list_for_each_entry_safe(rel, n, list, head) { | ||
185 | list_del(&rel->head); | ||
186 | kfree(rel); | ||
187 | } | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * vmw_resource_relocations_apply - Apply all relocations on a list | ||
192 | * | ||
193 | * @cb: Pointer to the start of the command buffer bein patch. This need | ||
194 | * not be the same buffer as the one being parsed when the relocation | ||
195 | * list was built, but the contents must be the same modulo the | ||
196 | * resource ids. | ||
197 | * @list: Pointer to the head of the relocation list. | ||
198 | */ | ||
199 | static void vmw_resource_relocations_apply(uint32_t *cb, | ||
200 | struct list_head *list) | ||
201 | { | ||
202 | struct vmw_resource_relocation *rel; | ||
203 | |||
204 | list_for_each_entry(rel, list, head) | ||
205 | cb[rel->offset] = rel->res->id; | ||
206 | } | ||
207 | |||
33 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, | 208 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, |
34 | struct vmw_sw_context *sw_context, | 209 | struct vmw_sw_context *sw_context, |
35 | SVGA3dCmdHeader *header) | 210 | SVGA3dCmdHeader *header) |
@@ -44,25 +219,11 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, | |||
44 | return 0; | 219 | return 0; |
45 | } | 220 | } |
46 | 221 | ||
47 | static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context, | ||
48 | struct vmw_resource **p_res) | ||
49 | { | ||
50 | struct vmw_resource *res = *p_res; | ||
51 | |||
52 | if (list_empty(&res->validate_head)) { | ||
53 | list_add_tail(&res->validate_head, &sw_context->resource_list); | ||
54 | *p_res = NULL; | ||
55 | } else | ||
56 | vmw_resource_unreference(p_res); | ||
57 | } | ||
58 | |||
59 | /** | 222 | /** |
60 | * vmw_bo_to_validate_list - add a bo to a validate list | 223 | * vmw_bo_to_validate_list - add a bo to a validate list |
61 | * | 224 | * |
62 | * @sw_context: The software context used for this command submission batch. | 225 | * @sw_context: The software context used for this command submission batch. |
63 | * @bo: The buffer object to add. | 226 | * @bo: The buffer object to add. |
64 | * @fence_flags: Fence flags to be or'ed with any other fence flags for | ||
65 | * this buffer on this submission batch. | ||
66 | * @p_val_node: If non-NULL Will be updated with the validate node number | 227 | * @p_val_node: If non-NULL Will be updated with the validate node number |
67 | * on return. | 228 | * on return. |
68 | * | 229 | * |
@@ -74,21 +235,37 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | |||
74 | uint32_t *p_val_node) | 235 | uint32_t *p_val_node) |
75 | { | 236 | { |
76 | uint32_t val_node; | 237 | uint32_t val_node; |
238 | struct vmw_validate_buffer *vval_buf; | ||
77 | struct ttm_validate_buffer *val_buf; | 239 | struct ttm_validate_buffer *val_buf; |
240 | struct drm_hash_item *hash; | ||
241 | int ret; | ||
78 | 242 | ||
79 | val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); | 243 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo, |
80 | 244 | &hash) == 0)) { | |
81 | if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { | 245 | vval_buf = container_of(hash, struct vmw_validate_buffer, |
82 | DRM_ERROR("Max number of DMA buffers per submission" | 246 | hash); |
83 | " exceeded.\n"); | 247 | val_buf = &vval_buf->base; |
84 | return -EINVAL; | 248 | val_node = vval_buf - sw_context->val_bufs; |
85 | } | 249 | } else { |
86 | 250 | val_node = sw_context->cur_val_buf; | |
87 | val_buf = &sw_context->val_bufs[val_node]; | 251 | if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { |
88 | if (unlikely(val_node == sw_context->cur_val_buf)) { | 252 | DRM_ERROR("Max number of DMA buffers per submission " |
253 | "exceeded.\n"); | ||
254 | return -EINVAL; | ||
255 | } | ||
256 | vval_buf = &sw_context->val_bufs[val_node]; | ||
257 | vval_buf->hash.key = (unsigned long) bo; | ||
258 | ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); | ||
259 | if (unlikely(ret != 0)) { | ||
260 | DRM_ERROR("Failed to initialize a buffer validation " | ||
261 | "entry.\n"); | ||
262 | return ret; | ||
263 | } | ||
264 | ++sw_context->cur_val_buf; | ||
265 | val_buf = &vval_buf->base; | ||
89 | val_buf->bo = ttm_bo_reference(bo); | 266 | val_buf->bo = ttm_bo_reference(bo); |
267 | val_buf->reserved = false; | ||
90 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | 268 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
91 | ++sw_context->cur_val_buf; | ||
92 | } | 269 | } |
93 | 270 | ||
94 | sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; | 271 | sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; |
@@ -99,85 +276,174 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | |||
99 | return 0; | 276 | return 0; |
100 | } | 277 | } |
101 | 278 | ||
102 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | 279 | /** |
103 | struct vmw_sw_context *sw_context, | 280 | * vmw_resources_reserve - Reserve all resources on the sw_context's |
104 | SVGA3dCmdHeader *header) | 281 | * resource list. |
282 | * | ||
283 | * @sw_context: Pointer to the software context. | ||
284 | * | ||
285 | * Note that since vmware's command submission currently is protected by | ||
286 | * the cmdbuf mutex, no fancy deadlock avoidance is required for resources, | ||
287 | * since only a single thread at once will attempt this. | ||
288 | */ | ||
289 | static int vmw_resources_reserve(struct vmw_sw_context *sw_context) | ||
105 | { | 290 | { |
106 | struct vmw_resource *ctx; | 291 | struct vmw_resource_val_node *val; |
107 | |||
108 | struct vmw_cid_cmd { | ||
109 | SVGA3dCmdHeader header; | ||
110 | __le32 cid; | ||
111 | } *cmd; | ||
112 | int ret; | 292 | int ret; |
113 | 293 | ||
114 | cmd = container_of(header, struct vmw_cid_cmd, header); | 294 | list_for_each_entry(val, &sw_context->resource_list, head) { |
115 | if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid)) | 295 | struct vmw_resource *res = val->res; |
116 | return 0; | ||
117 | 296 | ||
118 | ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid, | 297 | ret = vmw_resource_reserve(res, val->no_buffer_needed); |
119 | &ctx); | 298 | if (unlikely(ret != 0)) |
120 | if (unlikely(ret != 0)) { | 299 | return ret; |
121 | DRM_ERROR("Could not find or use context %u\n", | 300 | |
122 | (unsigned) cmd->cid); | 301 | if (res->backup) { |
123 | return ret; | 302 | struct ttm_buffer_object *bo = &res->backup->base; |
303 | |||
304 | ret = vmw_bo_to_validate_list | ||
305 | (sw_context, bo, NULL); | ||
306 | |||
307 | if (unlikely(ret != 0)) | ||
308 | return ret; | ||
309 | } | ||
124 | } | 310 | } |
311 | return 0; | ||
312 | } | ||
125 | 313 | ||
126 | sw_context->last_cid = cmd->cid; | 314 | /** |
127 | sw_context->cid_valid = true; | 315 | * vmw_resources_validate - Validate all resources on the sw_context's |
128 | sw_context->cur_ctx = ctx; | 316 | * resource list. |
129 | vmw_resource_to_validate_list(sw_context, &ctx); | 317 | * |
318 | * @sw_context: Pointer to the software context. | ||
319 | * | ||
320 | * Before this function is called, all resource backup buffers must have | ||
321 | * been validated. | ||
322 | */ | ||
323 | static int vmw_resources_validate(struct vmw_sw_context *sw_context) | ||
324 | { | ||
325 | struct vmw_resource_val_node *val; | ||
326 | int ret; | ||
327 | |||
328 | list_for_each_entry(val, &sw_context->resource_list, head) { | ||
329 | struct vmw_resource *res = val->res; | ||
130 | 330 | ||
331 | ret = vmw_resource_validate(res); | ||
332 | if (unlikely(ret != 0)) { | ||
333 | if (ret != -ERESTARTSYS) | ||
334 | DRM_ERROR("Failed to validate resource.\n"); | ||
335 | return ret; | ||
336 | } | ||
337 | } | ||
131 | return 0; | 338 | return 0; |
132 | } | 339 | } |
133 | 340 | ||
134 | static int vmw_cmd_sid_check(struct vmw_private *dev_priv, | 341 | /** |
342 | * vmw_cmd_res_check - Check that a resource is present and if so, put it | ||
343 | * on the resource validate list unless it's already there. | ||
344 | * | ||
345 | * @dev_priv: Pointer to a device private structure. | ||
346 | * @sw_context: Pointer to the software context. | ||
347 | * @res_type: Resource type. | ||
348 | * @converter: User-space visisble type specific information. | ||
349 | * @id: Pointer to the location in the command buffer currently being | ||
350 | * parsed from where the user-space resource id handle is located. | ||
351 | */ | ||
352 | static int vmw_cmd_res_check(struct vmw_private *dev_priv, | ||
135 | struct vmw_sw_context *sw_context, | 353 | struct vmw_sw_context *sw_context, |
136 | uint32_t *sid) | 354 | enum vmw_res_type res_type, |
355 | const struct vmw_user_resource_conv *converter, | ||
356 | uint32_t *id, | ||
357 | struct vmw_resource_val_node **p_val) | ||
137 | { | 358 | { |
138 | struct vmw_surface *srf; | 359 | struct vmw_res_cache_entry *rcache = |
139 | int ret; | 360 | &sw_context->res_cache[res_type]; |
140 | struct vmw_resource *res; | 361 | struct vmw_resource *res; |
362 | struct vmw_resource_val_node *node; | ||
363 | int ret; | ||
141 | 364 | ||
142 | if (*sid == SVGA3D_INVALID_ID) | 365 | if (*id == SVGA3D_INVALID_ID) |
143 | return 0; | 366 | return 0; |
144 | 367 | ||
145 | if (likely((sw_context->sid_valid && | 368 | /* |
146 | *sid == sw_context->last_sid))) { | 369 | * Fastpath in case of repeated commands referencing the same |
147 | *sid = sw_context->sid_translation; | 370 | * resource |
148 | return 0; | 371 | */ |
149 | } | ||
150 | 372 | ||
151 | ret = vmw_user_surface_lookup_handle(dev_priv, | 373 | if (likely(rcache->valid && *id == rcache->handle)) { |
152 | sw_context->tfile, | 374 | const struct vmw_resource *res = rcache->res; |
153 | *sid, &srf); | 375 | |
154 | if (unlikely(ret != 0)) { | 376 | rcache->node->first_usage = false; |
155 | DRM_ERROR("Could ot find or use surface 0x%08x " | 377 | if (p_val) |
156 | "address 0x%08lx\n", | 378 | *p_val = rcache->node; |
157 | (unsigned int) *sid, | 379 | |
158 | (unsigned long) sid); | 380 | return vmw_resource_relocation_add |
159 | return ret; | 381 | (&sw_context->res_relocations, res, |
382 | id - sw_context->buf_start); | ||
160 | } | 383 | } |
161 | 384 | ||
162 | ret = vmw_surface_validate(dev_priv, srf); | 385 | ret = vmw_user_resource_lookup_handle(dev_priv, |
386 | sw_context->tfile, | ||
387 | *id, | ||
388 | converter, | ||
389 | &res); | ||
163 | if (unlikely(ret != 0)) { | 390 | if (unlikely(ret != 0)) { |
164 | if (ret != -ERESTARTSYS) | 391 | DRM_ERROR("Could not find or use resource 0x%08x.\n", |
165 | DRM_ERROR("Could not validate surface.\n"); | 392 | (unsigned) *id); |
166 | vmw_surface_unreference(&srf); | 393 | dump_stack(); |
167 | return ret; | 394 | return ret; |
168 | } | 395 | } |
169 | 396 | ||
170 | sw_context->last_sid = *sid; | 397 | rcache->valid = true; |
171 | sw_context->sid_valid = true; | 398 | rcache->res = res; |
172 | sw_context->sid_translation = srf->res.id; | 399 | rcache->handle = *id; |
173 | *sid = sw_context->sid_translation; | ||
174 | 400 | ||
175 | res = &srf->res; | 401 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, |
176 | vmw_resource_to_validate_list(sw_context, &res); | 402 | res, |
403 | id - sw_context->buf_start); | ||
404 | if (unlikely(ret != 0)) | ||
405 | goto out_no_reloc; | ||
406 | |||
407 | ret = vmw_resource_val_add(sw_context, res, &node); | ||
408 | if (unlikely(ret != 0)) | ||
409 | goto out_no_reloc; | ||
177 | 410 | ||
411 | rcache->node = node; | ||
412 | if (p_val) | ||
413 | *p_val = node; | ||
414 | vmw_resource_unreference(&res); | ||
178 | return 0; | 415 | return 0; |
416 | |||
417 | out_no_reloc: | ||
418 | BUG_ON(sw_context->error_resource != NULL); | ||
419 | sw_context->error_resource = res; | ||
420 | |||
421 | return ret; | ||
179 | } | 422 | } |
180 | 423 | ||
424 | /** | ||
425 | * vmw_cmd_cid_check - Check a command header for valid context information. | ||
426 | * | ||
427 | * @dev_priv: Pointer to a device private structure. | ||
428 | * @sw_context: Pointer to the software context. | ||
429 | * @header: A command header with an embedded user-space context handle. | ||
430 | * | ||
431 | * Convenience function: Call vmw_cmd_res_check with the user-space context | ||
432 | * handle embedded in @header. | ||
433 | */ | ||
434 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | ||
435 | struct vmw_sw_context *sw_context, | ||
436 | SVGA3dCmdHeader *header) | ||
437 | { | ||
438 | struct vmw_cid_cmd { | ||
439 | SVGA3dCmdHeader header; | ||
440 | __le32 cid; | ||
441 | } *cmd; | ||
442 | |||
443 | cmd = container_of(header, struct vmw_cid_cmd, header); | ||
444 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
445 | user_context_converter, &cmd->cid, NULL); | ||
446 | } | ||
181 | 447 | ||
182 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | 448 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, |
183 | struct vmw_sw_context *sw_context, | 449 | struct vmw_sw_context *sw_context, |
@@ -194,7 +460,9 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | |||
194 | return ret; | 460 | return ret; |
195 | 461 | ||
196 | cmd = container_of(header, struct vmw_sid_cmd, header); | 462 | cmd = container_of(header, struct vmw_sid_cmd, header); |
197 | ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid); | 463 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
464 | user_surface_converter, | ||
465 | &cmd->body.target.sid, NULL); | ||
198 | return ret; | 466 | return ret; |
199 | } | 467 | } |
200 | 468 | ||
@@ -209,10 +477,14 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, | |||
209 | int ret; | 477 | int ret; |
210 | 478 | ||
211 | cmd = container_of(header, struct vmw_sid_cmd, header); | 479 | cmd = container_of(header, struct vmw_sid_cmd, header); |
212 | ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); | 480 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
481 | user_surface_converter, | ||
482 | &cmd->body.src.sid, NULL); | ||
213 | if (unlikely(ret != 0)) | 483 | if (unlikely(ret != 0)) |
214 | return ret; | 484 | return ret; |
215 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); | 485 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
486 | user_surface_converter, | ||
487 | &cmd->body.dest.sid, NULL); | ||
216 | } | 488 | } |
217 | 489 | ||
218 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, | 490 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, |
@@ -226,10 +498,14 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, | |||
226 | int ret; | 498 | int ret; |
227 | 499 | ||
228 | cmd = container_of(header, struct vmw_sid_cmd, header); | 500 | cmd = container_of(header, struct vmw_sid_cmd, header); |
229 | ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); | 501 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
502 | user_surface_converter, | ||
503 | &cmd->body.src.sid, NULL); | ||
230 | if (unlikely(ret != 0)) | 504 | if (unlikely(ret != 0)) |
231 | return ret; | 505 | return ret; |
232 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); | 506 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
507 | user_surface_converter, | ||
508 | &cmd->body.dest.sid, NULL); | ||
233 | } | 509 | } |
234 | 510 | ||
235 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, | 511 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, |
@@ -248,7 +524,9 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, | |||
248 | return -EPERM; | 524 | return -EPERM; |
249 | } | 525 | } |
250 | 526 | ||
251 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid); | 527 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
528 | user_surface_converter, | ||
529 | &cmd->body.srcImage.sid, NULL); | ||
252 | } | 530 | } |
253 | 531 | ||
254 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, | 532 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, |
@@ -268,14 +546,15 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, | |||
268 | return -EPERM; | 546 | return -EPERM; |
269 | } | 547 | } |
270 | 548 | ||
271 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); | 549 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
550 | user_surface_converter, &cmd->body.sid, | ||
551 | NULL); | ||
272 | } | 552 | } |
273 | 553 | ||
274 | /** | 554 | /** |
275 | * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. | 555 | * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. |
276 | * | 556 | * |
277 | * @dev_priv: The device private structure. | 557 | * @dev_priv: The device private structure. |
278 | * @cid: The hardware context for the next query. | ||
279 | * @new_query_bo: The new buffer holding query results. | 558 | * @new_query_bo: The new buffer holding query results. |
280 | * @sw_context: The software context used for this command submission. | 559 | * @sw_context: The software context used for this command submission. |
281 | * | 560 | * |
@@ -283,18 +562,18 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, | |||
283 | * query results, and if another buffer currently is pinned for query | 562 | * query results, and if another buffer currently is pinned for query |
284 | * results. If so, the function prepares the state of @sw_context for | 563 | * results. If so, the function prepares the state of @sw_context for |
285 | * switching pinned buffers after successful submission of the current | 564 | * switching pinned buffers after successful submission of the current |
286 | * command batch. It also checks whether we're using a new query context. | 565 | * command batch. |
287 | * In that case, it makes sure we emit a query barrier for the old | ||
288 | * context before the current query buffer is fenced. | ||
289 | */ | 566 | */ |
290 | static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | 567 | static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, |
291 | uint32_t cid, | ||
292 | struct ttm_buffer_object *new_query_bo, | 568 | struct ttm_buffer_object *new_query_bo, |
293 | struct vmw_sw_context *sw_context) | 569 | struct vmw_sw_context *sw_context) |
294 | { | 570 | { |
571 | struct vmw_res_cache_entry *ctx_entry = | ||
572 | &sw_context->res_cache[vmw_res_context]; | ||
295 | int ret; | 573 | int ret; |
296 | bool add_cid = false; | 574 | |
297 | uint32_t cid_to_add; | 575 | BUG_ON(!ctx_entry->valid); |
576 | sw_context->last_query_ctx = ctx_entry->res; | ||
298 | 577 | ||
299 | if (unlikely(new_query_bo != sw_context->cur_query_bo)) { | 578 | if (unlikely(new_query_bo != sw_context->cur_query_bo)) { |
300 | 579 | ||
@@ -304,9 +583,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | |||
304 | } | 583 | } |
305 | 584 | ||
306 | if (unlikely(sw_context->cur_query_bo != NULL)) { | 585 | if (unlikely(sw_context->cur_query_bo != NULL)) { |
307 | BUG_ON(!sw_context->query_cid_valid); | 586 | sw_context->needs_post_query_barrier = true; |
308 | add_cid = true; | ||
309 | cid_to_add = sw_context->cur_query_cid; | ||
310 | ret = vmw_bo_to_validate_list(sw_context, | 587 | ret = vmw_bo_to_validate_list(sw_context, |
311 | sw_context->cur_query_bo, | 588 | sw_context->cur_query_bo, |
312 | NULL); | 589 | NULL); |
@@ -323,27 +600,6 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | |||
323 | 600 | ||
324 | } | 601 | } |
325 | 602 | ||
326 | if (unlikely(cid != sw_context->cur_query_cid && | ||
327 | sw_context->query_cid_valid)) { | ||
328 | add_cid = true; | ||
329 | cid_to_add = sw_context->cur_query_cid; | ||
330 | } | ||
331 | |||
332 | sw_context->cur_query_cid = cid; | ||
333 | sw_context->query_cid_valid = true; | ||
334 | |||
335 | if (add_cid) { | ||
336 | struct vmw_resource *ctx = sw_context->cur_ctx; | ||
337 | |||
338 | if (list_empty(&ctx->query_head)) | ||
339 | list_add_tail(&ctx->query_head, | ||
340 | &sw_context->query_list); | ||
341 | ret = vmw_bo_to_validate_list(sw_context, | ||
342 | dev_priv->dummy_query_bo, | ||
343 | NULL); | ||
344 | if (unlikely(ret != 0)) | ||
345 | return ret; | ||
346 | } | ||
347 | return 0; | 603 | return 0; |
348 | } | 604 | } |
349 | 605 | ||
@@ -355,10 +611,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | |||
355 | * @sw_context: The software context used for this command submission batch. | 611 | * @sw_context: The software context used for this command submission batch. |
356 | * | 612 | * |
357 | * This function will check if we're switching query buffers, and will then, | 613 | * This function will check if we're switching query buffers, and will then, |
358 | * if no other query waits are issued this command submission batch, | ||
359 | * issue a dummy occlusion query wait used as a query barrier. When the fence | 614 | * issue a dummy occlusion query wait used as a query barrier. When the fence |
360 | * object following that query wait has signaled, we are sure that all | 615 | * object following that query wait has signaled, we are sure that all |
361 | * preseding queries have finished, and the old query buffer can be unpinned. | 616 | * preceding queries have finished, and the old query buffer can be unpinned. |
362 | * However, since both the new query buffer and the old one are fenced with | 617 | * However, since both the new query buffer and the old one are fenced with |
363 | * that fence, we can do an asynchronus unpin now, and be sure that the | 618 | * that fence, we can do an asynchronus unpin now, and be sure that the |
364 | * old query buffer won't be moved until the fence has signaled. | 619 | * old query buffer won't be moved until the fence has signaled. |
@@ -369,20 +624,19 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | |||
369 | static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, | 624 | static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, |
370 | struct vmw_sw_context *sw_context) | 625 | struct vmw_sw_context *sw_context) |
371 | { | 626 | { |
372 | |||
373 | struct vmw_resource *ctx, *next_ctx; | ||
374 | int ret; | ||
375 | |||
376 | /* | 627 | /* |
377 | * The validate list should still hold references to all | 628 | * The validate list should still hold references to all |
378 | * contexts here. | 629 | * contexts here. |
379 | */ | 630 | */ |
380 | 631 | ||
381 | list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list, | 632 | if (sw_context->needs_post_query_barrier) { |
382 | query_head) { | 633 | struct vmw_res_cache_entry *ctx_entry = |
383 | list_del_init(&ctx->query_head); | 634 | &sw_context->res_cache[vmw_res_context]; |
635 | struct vmw_resource *ctx; | ||
636 | int ret; | ||
384 | 637 | ||
385 | BUG_ON(list_empty(&ctx->validate_head)); | 638 | BUG_ON(!ctx_entry->valid); |
639 | ctx = ctx_entry->res; | ||
386 | 640 | ||
387 | ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); | 641 | ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); |
388 | 642 | ||
@@ -396,40 +650,46 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, | |||
396 | ttm_bo_unref(&dev_priv->pinned_bo); | 650 | ttm_bo_unref(&dev_priv->pinned_bo); |
397 | } | 651 | } |
398 | 652 | ||
399 | vmw_bo_pin(sw_context->cur_query_bo, true); | 653 | if (!sw_context->needs_post_query_barrier) { |
654 | vmw_bo_pin(sw_context->cur_query_bo, true); | ||
400 | 655 | ||
401 | /* | 656 | /* |
402 | * We pin also the dummy_query_bo buffer so that we | 657 | * We pin also the dummy_query_bo buffer so that we |
403 | * don't need to validate it when emitting | 658 | * don't need to validate it when emitting |
404 | * dummy queries in context destroy paths. | 659 | * dummy queries in context destroy paths. |
405 | */ | 660 | */ |
406 | 661 | ||
407 | vmw_bo_pin(dev_priv->dummy_query_bo, true); | 662 | vmw_bo_pin(dev_priv->dummy_query_bo, true); |
408 | dev_priv->dummy_query_bo_pinned = true; | 663 | dev_priv->dummy_query_bo_pinned = true; |
409 | 664 | ||
410 | dev_priv->query_cid = sw_context->cur_query_cid; | 665 | BUG_ON(sw_context->last_query_ctx == NULL); |
411 | dev_priv->pinned_bo = | 666 | dev_priv->query_cid = sw_context->last_query_ctx->id; |
412 | ttm_bo_reference(sw_context->cur_query_bo); | 667 | dev_priv->query_cid_valid = true; |
668 | dev_priv->pinned_bo = | ||
669 | ttm_bo_reference(sw_context->cur_query_bo); | ||
670 | } | ||
413 | } | 671 | } |
414 | } | 672 | } |
415 | 673 | ||
416 | /** | 674 | /** |
417 | * vmw_query_switch_backoff - clear query barrier list | 675 | * vmw_translate_guest_pointer - Prepare to translate a user-space buffer |
418 | * @sw_context: The sw context used for this submission batch. | 676 | * handle to a valid SVGAGuestPtr |
419 | * | 677 | * |
420 | * This function is used as part of an error path, where a previously | 678 | * @dev_priv: Pointer to a device private structure. |
421 | * set up list of query barriers needs to be cleared. | 679 | * @sw_context: The software context used for this command batch validation. |
680 | * @ptr: Pointer to the user-space handle to be translated. | ||
681 | * @vmw_bo_p: Points to a location that, on successful return will carry | ||
682 | * a reference-counted pointer to the DMA buffer identified by the | ||
683 | * user-space handle in @id. | ||
422 | * | 684 | * |
685 | * This function saves information needed to translate a user-space buffer | ||
686 | * handle to a valid SVGAGuestPtr. The translation does not take place | ||
687 | * immediately, but during a call to vmw_apply_relocations(). | ||
688 | * This function builds a relocation list and a list of buffers to validate. | ||
689 | * The former needs to be freed using either vmw_apply_relocations() or | ||
690 | * vmw_free_relocations(). The latter needs to be freed using | ||
691 | * vmw_clear_validations. | ||
423 | */ | 692 | */ |
424 | static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context) | ||
425 | { | ||
426 | struct list_head *list, *next; | ||
427 | |||
428 | list_for_each_safe(list, next, &sw_context->query_list) { | ||
429 | list_del_init(list); | ||
430 | } | ||
431 | } | ||
432 | |||
433 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | 693 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
434 | struct vmw_sw_context *sw_context, | 694 | struct vmw_sw_context *sw_context, |
435 | SVGAGuestPtr *ptr, | 695 | SVGAGuestPtr *ptr, |
@@ -471,6 +731,37 @@ out_no_reloc: | |||
471 | return ret; | 731 | return ret; |
472 | } | 732 | } |
473 | 733 | ||
734 | /** | ||
735 | * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. | ||
736 | * | ||
737 | * @dev_priv: Pointer to a device private struct. | ||
738 | * @sw_context: The software context used for this command submission. | ||
739 | * @header: Pointer to the command header in the command stream. | ||
740 | */ | ||
741 | static int vmw_cmd_begin_query(struct vmw_private *dev_priv, | ||
742 | struct vmw_sw_context *sw_context, | ||
743 | SVGA3dCmdHeader *header) | ||
744 | { | ||
745 | struct vmw_begin_query_cmd { | ||
746 | SVGA3dCmdHeader header; | ||
747 | SVGA3dCmdBeginQuery q; | ||
748 | } *cmd; | ||
749 | |||
750 | cmd = container_of(header, struct vmw_begin_query_cmd, | ||
751 | header); | ||
752 | |||
753 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
754 | user_context_converter, &cmd->q.cid, | ||
755 | NULL); | ||
756 | } | ||
757 | |||
758 | /** | ||
759 | * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. | ||
760 | * | ||
761 | * @dev_priv: Pointer to a device private struct. | ||
762 | * @sw_context: The software context used for this command submission. | ||
763 | * @header: Pointer to the command header in the command stream. | ||
764 | */ | ||
474 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, | 765 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, |
475 | struct vmw_sw_context *sw_context, | 766 | struct vmw_sw_context *sw_context, |
476 | SVGA3dCmdHeader *header) | 767 | SVGA3dCmdHeader *header) |
@@ -493,13 +784,19 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, | |||
493 | if (unlikely(ret != 0)) | 784 | if (unlikely(ret != 0)) |
494 | return ret; | 785 | return ret; |
495 | 786 | ||
496 | ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid, | 787 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); |
497 | &vmw_bo->base, sw_context); | ||
498 | 788 | ||
499 | vmw_dmabuf_unreference(&vmw_bo); | 789 | vmw_dmabuf_unreference(&vmw_bo); |
500 | return ret; | 790 | return ret; |
501 | } | 791 | } |
502 | 792 | ||
793 | /* | ||
794 | * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. | ||
795 | * | ||
796 | * @dev_priv: Pointer to a device private struct. | ||
797 | * @sw_context: The software context used for this command submission. | ||
798 | * @header: Pointer to the command header in the command stream. | ||
799 | */ | ||
503 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | 800 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, |
504 | struct vmw_sw_context *sw_context, | 801 | struct vmw_sw_context *sw_context, |
505 | SVGA3dCmdHeader *header) | 802 | SVGA3dCmdHeader *header) |
@@ -510,7 +807,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | |||
510 | SVGA3dCmdWaitForQuery q; | 807 | SVGA3dCmdWaitForQuery q; |
511 | } *cmd; | 808 | } *cmd; |
512 | int ret; | 809 | int ret; |
513 | struct vmw_resource *ctx; | ||
514 | 810 | ||
515 | cmd = container_of(header, struct vmw_query_cmd, header); | 811 | cmd = container_of(header, struct vmw_query_cmd, header); |
516 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 812 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
@@ -524,16 +820,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | |||
524 | return ret; | 820 | return ret; |
525 | 821 | ||
526 | vmw_dmabuf_unreference(&vmw_bo); | 822 | vmw_dmabuf_unreference(&vmw_bo); |
527 | |||
528 | /* | ||
529 | * This wait will act as a barrier for previous waits for this | ||
530 | * context. | ||
531 | */ | ||
532 | |||
533 | ctx = sw_context->cur_ctx; | ||
534 | if (!list_empty(&ctx->query_head)) | ||
535 | list_del_init(&ctx->query_head); | ||
536 | |||
537 | return 0; | 823 | return 0; |
538 | } | 824 | } |
539 | 825 | ||
@@ -542,14 +828,12 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
542 | SVGA3dCmdHeader *header) | 828 | SVGA3dCmdHeader *header) |
543 | { | 829 | { |
544 | struct vmw_dma_buffer *vmw_bo = NULL; | 830 | struct vmw_dma_buffer *vmw_bo = NULL; |
545 | struct ttm_buffer_object *bo; | ||
546 | struct vmw_surface *srf = NULL; | 831 | struct vmw_surface *srf = NULL; |
547 | struct vmw_dma_cmd { | 832 | struct vmw_dma_cmd { |
548 | SVGA3dCmdHeader header; | 833 | SVGA3dCmdHeader header; |
549 | SVGA3dCmdSurfaceDMA dma; | 834 | SVGA3dCmdSurfaceDMA dma; |
550 | } *cmd; | 835 | } *cmd; |
551 | int ret; | 836 | int ret; |
552 | struct vmw_resource *res; | ||
553 | 837 | ||
554 | cmd = container_of(header, struct vmw_dma_cmd, header); | 838 | cmd = container_of(header, struct vmw_dma_cmd, header); |
555 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | 839 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
@@ -558,37 +842,20 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
558 | if (unlikely(ret != 0)) | 842 | if (unlikely(ret != 0)) |
559 | return ret; | 843 | return ret; |
560 | 844 | ||
561 | bo = &vmw_bo->base; | 845 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
562 | ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, | 846 | user_surface_converter, &cmd->dma.host.sid, |
563 | cmd->dma.host.sid, &srf); | 847 | NULL); |
564 | if (ret) { | ||
565 | DRM_ERROR("could not find surface\n"); | ||
566 | goto out_no_reloc; | ||
567 | } | ||
568 | |||
569 | ret = vmw_surface_validate(dev_priv, srf); | ||
570 | if (unlikely(ret != 0)) { | 848 | if (unlikely(ret != 0)) { |
571 | if (ret != -ERESTARTSYS) | 849 | if (unlikely(ret != -ERESTARTSYS)) |
572 | DRM_ERROR("Culd not validate surface.\n"); | 850 | DRM_ERROR("could not find surface for DMA.\n"); |
573 | goto out_no_validate; | 851 | goto out_no_surface; |
574 | } | 852 | } |
575 | 853 | ||
576 | /* | 854 | srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); |
577 | * Patch command stream with device SID. | ||
578 | */ | ||
579 | cmd->dma.host.sid = srf->res.id; | ||
580 | vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); | ||
581 | |||
582 | vmw_dmabuf_unreference(&vmw_bo); | ||
583 | |||
584 | res = &srf->res; | ||
585 | vmw_resource_to_validate_list(sw_context, &res); | ||
586 | 855 | ||
587 | return 0; | 856 | vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); |
588 | 857 | ||
589 | out_no_validate: | 858 | out_no_surface: |
590 | vmw_surface_unreference(&srf); | ||
591 | out_no_reloc: | ||
592 | vmw_dmabuf_unreference(&vmw_bo); | 859 | vmw_dmabuf_unreference(&vmw_bo); |
593 | return ret; | 860 | return ret; |
594 | } | 861 | } |
@@ -621,8 +888,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv, | |||
621 | } | 888 | } |
622 | 889 | ||
623 | for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { | 890 | for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { |
624 | ret = vmw_cmd_sid_check(dev_priv, sw_context, | 891 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
625 | &decl->array.surfaceId); | 892 | user_surface_converter, |
893 | &decl->array.surfaceId, NULL); | ||
626 | if (unlikely(ret != 0)) | 894 | if (unlikely(ret != 0)) |
627 | return ret; | 895 | return ret; |
628 | } | 896 | } |
@@ -636,8 +904,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv, | |||
636 | 904 | ||
637 | range = (SVGA3dPrimitiveRange *) decl; | 905 | range = (SVGA3dPrimitiveRange *) decl; |
638 | for (i = 0; i < cmd->body.numRanges; ++i, ++range) { | 906 | for (i = 0; i < cmd->body.numRanges; ++i, ++range) { |
639 | ret = vmw_cmd_sid_check(dev_priv, sw_context, | 907 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
640 | &range->indexArray.surfaceId); | 908 | user_surface_converter, |
909 | &range->indexArray.surfaceId, NULL); | ||
641 | if (unlikely(ret != 0)) | 910 | if (unlikely(ret != 0)) |
642 | return ret; | 911 | return ret; |
643 | } | 912 | } |
@@ -668,8 +937,9 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
668 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) | 937 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) |
669 | continue; | 938 | continue; |
670 | 939 | ||
671 | ret = vmw_cmd_sid_check(dev_priv, sw_context, | 940 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
672 | &cur_state->value); | 941 | user_surface_converter, |
942 | &cur_state->value, NULL); | ||
673 | if (unlikely(ret != 0)) | 943 | if (unlikely(ret != 0)) |
674 | return ret; | 944 | return ret; |
675 | } | 945 | } |
@@ -700,6 +970,34 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, | |||
700 | return ret; | 970 | return ret; |
701 | } | 971 | } |
702 | 972 | ||
973 | /** | ||
974 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER | ||
975 | * command | ||
976 | * | ||
977 | * @dev_priv: Pointer to a device private struct. | ||
978 | * @sw_context: The software context being used for this batch. | ||
979 | * @header: Pointer to the command header in the command stream. | ||
980 | */ | ||
981 | static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | ||
982 | struct vmw_sw_context *sw_context, | ||
983 | SVGA3dCmdHeader *header) | ||
984 | { | ||
985 | struct vmw_set_shader_cmd { | ||
986 | SVGA3dCmdHeader header; | ||
987 | SVGA3dCmdSetShader body; | ||
988 | } *cmd; | ||
989 | int ret; | ||
990 | |||
991 | cmd = container_of(header, struct vmw_set_shader_cmd, | ||
992 | header); | ||
993 | |||
994 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
995 | if (unlikely(ret != 0)) | ||
996 | return ret; | ||
997 | |||
998 | return 0; | ||
999 | } | ||
1000 | |||
703 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | 1001 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
704 | struct vmw_sw_context *sw_context, | 1002 | struct vmw_sw_context *sw_context, |
705 | void *buf, uint32_t *size) | 1003 | void *buf, uint32_t *size) |
@@ -773,16 +1071,20 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { | |||
773 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), | 1071 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), |
774 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), | 1072 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), |
775 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), | 1073 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), |
776 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check), | 1074 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader), |
777 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), | 1075 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), |
778 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), | 1076 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), |
779 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), | 1077 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), |
780 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), | 1078 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query), |
781 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), | 1079 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), |
782 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), | 1080 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), |
783 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), | 1081 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), |
784 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, | 1082 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
785 | &vmw_cmd_blt_surf_screen_check) | 1083 | &vmw_cmd_blt_surf_screen_check), |
1084 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid), | ||
1085 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid), | ||
1086 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid), | ||
1087 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid), | ||
786 | }; | 1088 | }; |
787 | 1089 | ||
788 | static int vmw_cmd_check(struct vmw_private *dev_priv, | 1090 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
@@ -829,6 +1131,8 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv, | |||
829 | int32_t cur_size = size; | 1131 | int32_t cur_size = size; |
830 | int ret; | 1132 | int ret; |
831 | 1133 | ||
1134 | sw_context->buf_start = buf; | ||
1135 | |||
832 | while (cur_size > 0) { | 1136 | while (cur_size > 0) { |
833 | size = cur_size; | 1137 | size = cur_size; |
834 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); | 1138 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); |
@@ -860,43 +1164,63 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) | |||
860 | 1164 | ||
861 | for (i = 0; i < sw_context->cur_reloc; ++i) { | 1165 | for (i = 0; i < sw_context->cur_reloc; ++i) { |
862 | reloc = &sw_context->relocs[i]; | 1166 | reloc = &sw_context->relocs[i]; |
863 | validate = &sw_context->val_bufs[reloc->index]; | 1167 | validate = &sw_context->val_bufs[reloc->index].base; |
864 | bo = validate->bo; | 1168 | bo = validate->bo; |
865 | if (bo->mem.mem_type == TTM_PL_VRAM) { | 1169 | switch (bo->mem.mem_type) { |
1170 | case TTM_PL_VRAM: | ||
866 | reloc->location->offset += bo->offset; | 1171 | reloc->location->offset += bo->offset; |
867 | reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; | 1172 | reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; |
868 | } else | 1173 | break; |
1174 | case VMW_PL_GMR: | ||
869 | reloc->location->gmrId = bo->mem.start; | 1175 | reloc->location->gmrId = bo->mem.start; |
1176 | break; | ||
1177 | default: | ||
1178 | BUG(); | ||
1179 | } | ||
870 | } | 1180 | } |
871 | vmw_free_relocations(sw_context); | 1181 | vmw_free_relocations(sw_context); |
872 | } | 1182 | } |
873 | 1183 | ||
1184 | /** | ||
1185 | * vmw_resource_list_unrefererence - Free up a resource list and unreference | ||
1186 | * all resources referenced by it. | ||
1187 | * | ||
1188 | * @list: The resource list. | ||
1189 | */ | ||
1190 | static void vmw_resource_list_unreference(struct list_head *list) | ||
1191 | { | ||
1192 | struct vmw_resource_val_node *val, *val_next; | ||
1193 | |||
1194 | /* | ||
1195 | * Drop references to resources held during command submission. | ||
1196 | */ | ||
1197 | |||
1198 | list_for_each_entry_safe(val, val_next, list, head) { | ||
1199 | list_del_init(&val->head); | ||
1200 | vmw_resource_unreference(&val->res); | ||
1201 | kfree(val); | ||
1202 | } | ||
1203 | } | ||
1204 | |||
874 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) | 1205 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) |
875 | { | 1206 | { |
876 | struct ttm_validate_buffer *entry, *next; | 1207 | struct vmw_validate_buffer *entry, *next; |
877 | struct vmw_resource *res, *res_next; | 1208 | struct vmw_resource_val_node *val; |
878 | 1209 | ||
879 | /* | 1210 | /* |
880 | * Drop references to DMA buffers held during command submission. | 1211 | * Drop references to DMA buffers held during command submission. |
881 | */ | 1212 | */ |
882 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, | 1213 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, |
883 | head) { | 1214 | base.head) { |
884 | list_del(&entry->head); | 1215 | list_del(&entry->base.head); |
885 | vmw_dmabuf_validate_clear(entry->bo); | 1216 | ttm_bo_unref(&entry->base.bo); |
886 | ttm_bo_unref(&entry->bo); | 1217 | (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash); |
887 | sw_context->cur_val_buf--; | 1218 | sw_context->cur_val_buf--; |
888 | } | 1219 | } |
889 | BUG_ON(sw_context->cur_val_buf != 0); | 1220 | BUG_ON(sw_context->cur_val_buf != 0); |
890 | 1221 | ||
891 | /* | 1222 | list_for_each_entry(val, &sw_context->resource_list, head) |
892 | * Drop references to resources held during command submission. | 1223 | (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); |
893 | */ | ||
894 | vmw_resource_unreserve(&sw_context->resource_list); | ||
895 | list_for_each_entry_safe(res, res_next, &sw_context->resource_list, | ||
896 | validate_head) { | ||
897 | list_del_init(&res->validate_head); | ||
898 | vmw_resource_unreference(&res); | ||
899 | } | ||
900 | } | 1224 | } |
901 | 1225 | ||
902 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | 1226 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
@@ -939,11 +1263,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
939 | static int vmw_validate_buffers(struct vmw_private *dev_priv, | 1263 | static int vmw_validate_buffers(struct vmw_private *dev_priv, |
940 | struct vmw_sw_context *sw_context) | 1264 | struct vmw_sw_context *sw_context) |
941 | { | 1265 | { |
942 | struct ttm_validate_buffer *entry; | 1266 | struct vmw_validate_buffer *entry; |
943 | int ret; | 1267 | int ret; |
944 | 1268 | ||
945 | list_for_each_entry(entry, &sw_context->validate_nodes, head) { | 1269 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { |
946 | ret = vmw_validate_single_buffer(dev_priv, entry->bo); | 1270 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo); |
947 | if (unlikely(ret != 0)) | 1271 | if (unlikely(ret != 0)) |
948 | return ret; | 1272 | return ret; |
949 | } | 1273 | } |
@@ -1106,6 +1430,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
1106 | { | 1430 | { |
1107 | struct vmw_sw_context *sw_context = &dev_priv->ctx; | 1431 | struct vmw_sw_context *sw_context = &dev_priv->ctx; |
1108 | struct vmw_fence_obj *fence = NULL; | 1432 | struct vmw_fence_obj *fence = NULL; |
1433 | struct vmw_resource *error_resource; | ||
1434 | struct list_head resource_list; | ||
1109 | uint32_t handle; | 1435 | uint32_t handle; |
1110 | void *cmd; | 1436 | void *cmd; |
1111 | int ret; | 1437 | int ret; |
@@ -1135,24 +1461,33 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
1135 | sw_context->kernel = true; | 1461 | sw_context->kernel = true; |
1136 | 1462 | ||
1137 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; | 1463 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; |
1138 | sw_context->cid_valid = false; | ||
1139 | sw_context->sid_valid = false; | ||
1140 | sw_context->cur_reloc = 0; | 1464 | sw_context->cur_reloc = 0; |
1141 | sw_context->cur_val_buf = 0; | 1465 | sw_context->cur_val_buf = 0; |
1142 | sw_context->fence_flags = 0; | 1466 | sw_context->fence_flags = 0; |
1143 | INIT_LIST_HEAD(&sw_context->query_list); | ||
1144 | INIT_LIST_HEAD(&sw_context->resource_list); | 1467 | INIT_LIST_HEAD(&sw_context->resource_list); |
1145 | sw_context->cur_query_bo = dev_priv->pinned_bo; | 1468 | sw_context->cur_query_bo = dev_priv->pinned_bo; |
1146 | sw_context->cur_query_cid = dev_priv->query_cid; | 1469 | sw_context->last_query_ctx = NULL; |
1147 | sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL); | 1470 | sw_context->needs_post_query_barrier = false; |
1148 | 1471 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); | |
1149 | INIT_LIST_HEAD(&sw_context->validate_nodes); | 1472 | INIT_LIST_HEAD(&sw_context->validate_nodes); |
1473 | INIT_LIST_HEAD(&sw_context->res_relocations); | ||
1474 | if (!sw_context->res_ht_initialized) { | ||
1475 | ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); | ||
1476 | if (unlikely(ret != 0)) | ||
1477 | goto out_unlock; | ||
1478 | sw_context->res_ht_initialized = true; | ||
1479 | } | ||
1150 | 1480 | ||
1481 | INIT_LIST_HEAD(&resource_list); | ||
1151 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, | 1482 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
1152 | command_size); | 1483 | command_size); |
1153 | if (unlikely(ret != 0)) | 1484 | if (unlikely(ret != 0)) |
1154 | goto out_err; | 1485 | goto out_err; |
1155 | 1486 | ||
1487 | ret = vmw_resources_reserve(sw_context); | ||
1488 | if (unlikely(ret != 0)) | ||
1489 | goto out_err; | ||
1490 | |||
1156 | ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); | 1491 | ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); |
1157 | if (unlikely(ret != 0)) | 1492 | if (unlikely(ret != 0)) |
1158 | goto out_err; | 1493 | goto out_err; |
@@ -1161,24 +1496,31 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
1161 | if (unlikely(ret != 0)) | 1496 | if (unlikely(ret != 0)) |
1162 | goto out_err; | 1497 | goto out_err; |
1163 | 1498 | ||
1164 | vmw_apply_relocations(sw_context); | 1499 | ret = vmw_resources_validate(sw_context); |
1500 | if (unlikely(ret != 0)) | ||
1501 | goto out_err; | ||
1165 | 1502 | ||
1166 | if (throttle_us) { | 1503 | if (throttle_us) { |
1167 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, | 1504 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, |
1168 | throttle_us); | 1505 | throttle_us); |
1169 | 1506 | ||
1170 | if (unlikely(ret != 0)) | 1507 | if (unlikely(ret != 0)) |
1171 | goto out_throttle; | 1508 | goto out_err; |
1172 | } | 1509 | } |
1173 | 1510 | ||
1174 | cmd = vmw_fifo_reserve(dev_priv, command_size); | 1511 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
1175 | if (unlikely(cmd == NULL)) { | 1512 | if (unlikely(cmd == NULL)) { |
1176 | DRM_ERROR("Failed reserving fifo space for commands.\n"); | 1513 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
1177 | ret = -ENOMEM; | 1514 | ret = -ENOMEM; |
1178 | goto out_throttle; | 1515 | goto out_err; |
1179 | } | 1516 | } |
1180 | 1517 | ||
1518 | vmw_apply_relocations(sw_context); | ||
1181 | memcpy(cmd, kernel_commands, command_size); | 1519 | memcpy(cmd, kernel_commands, command_size); |
1520 | |||
1521 | vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); | ||
1522 | vmw_resource_relocations_free(&sw_context->res_relocations); | ||
1523 | |||
1182 | vmw_fifo_commit(dev_priv, command_size); | 1524 | vmw_fifo_commit(dev_priv, command_size); |
1183 | 1525 | ||
1184 | vmw_query_bo_switch_commit(dev_priv, sw_context); | 1526 | vmw_query_bo_switch_commit(dev_priv, sw_context); |
@@ -1194,9 +1536,14 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
1194 | if (ret != 0) | 1536 | if (ret != 0) |
1195 | DRM_ERROR("Fence submission error. Syncing.\n"); | 1537 | DRM_ERROR("Fence submission error. Syncing.\n"); |
1196 | 1538 | ||
1539 | vmw_resource_list_unreserve(&sw_context->resource_list, false); | ||
1197 | ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, | 1540 | ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, |
1198 | (void *) fence); | 1541 | (void *) fence); |
1199 | 1542 | ||
1543 | if (unlikely(dev_priv->pinned_bo != NULL && | ||
1544 | !dev_priv->query_cid_valid)) | ||
1545 | __vmw_execbuf_release_pinned_bo(dev_priv, fence); | ||
1546 | |||
1200 | vmw_clear_validations(sw_context); | 1547 | vmw_clear_validations(sw_context); |
1201 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, | 1548 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, |
1202 | user_fence_rep, fence, handle); | 1549 | user_fence_rep, fence, handle); |
@@ -1209,17 +1556,40 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
1209 | vmw_fence_obj_unreference(&fence); | 1556 | vmw_fence_obj_unreference(&fence); |
1210 | } | 1557 | } |
1211 | 1558 | ||
1559 | list_splice_init(&sw_context->resource_list, &resource_list); | ||
1212 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 1560 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
1561 | |||
1562 | /* | ||
1563 | * Unreference resources outside of the cmdbuf_mutex to | ||
1564 | * avoid deadlocks in resource destruction paths. | ||
1565 | */ | ||
1566 | vmw_resource_list_unreference(&resource_list); | ||
1567 | |||
1213 | return 0; | 1568 | return 0; |
1214 | 1569 | ||
1215 | out_err: | 1570 | out_err: |
1571 | vmw_resource_relocations_free(&sw_context->res_relocations); | ||
1216 | vmw_free_relocations(sw_context); | 1572 | vmw_free_relocations(sw_context); |
1217 | out_throttle: | ||
1218 | vmw_query_switch_backoff(sw_context); | ||
1219 | ttm_eu_backoff_reservation(&sw_context->validate_nodes); | 1573 | ttm_eu_backoff_reservation(&sw_context->validate_nodes); |
1574 | vmw_resource_list_unreserve(&sw_context->resource_list, true); | ||
1220 | vmw_clear_validations(sw_context); | 1575 | vmw_clear_validations(sw_context); |
1576 | if (unlikely(dev_priv->pinned_bo != NULL && | ||
1577 | !dev_priv->query_cid_valid)) | ||
1578 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); | ||
1221 | out_unlock: | 1579 | out_unlock: |
1580 | list_splice_init(&sw_context->resource_list, &resource_list); | ||
1581 | error_resource = sw_context->error_resource; | ||
1582 | sw_context->error_resource = NULL; | ||
1222 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 1583 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
1584 | |||
1585 | /* | ||
1586 | * Unreference resources outside of the cmdbuf_mutex to | ||
1587 | * avoid deadlocks in resource destruction paths. | ||
1588 | */ | ||
1589 | vmw_resource_list_unreference(&resource_list); | ||
1590 | if (unlikely(error_resource != NULL)) | ||
1591 | vmw_resource_unreference(&error_resource); | ||
1592 | |||
1223 | return ret; | 1593 | return ret; |
1224 | } | 1594 | } |
1225 | 1595 | ||
@@ -1244,13 +1614,13 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) | |||
1244 | 1614 | ||
1245 | 1615 | ||
1246 | /** | 1616 | /** |
1247 | * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned | 1617 | * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
1248 | * query bo. | 1618 | * query bo. |
1249 | * | 1619 | * |
1250 | * @dev_priv: The device private structure. | 1620 | * @dev_priv: The device private structure. |
1251 | * @only_on_cid_match: Only flush and unpin if the current active query cid | 1621 | * @fence: If non-NULL should point to a struct vmw_fence_obj issued |
1252 | * matches @cid. | 1622 | * _after_ a query barrier that flushes all queries touching the current |
1253 | * @cid: Optional context id to match. | 1623 | * buffer pointed to by @dev_priv->pinned_bo |
1254 | * | 1624 | * |
1255 | * This function should be used to unpin the pinned query bo, or | 1625 | * This function should be used to unpin the pinned query bo, or |
1256 | * as a query barrier when we need to make sure that all queries have | 1626 | * as a query barrier when we need to make sure that all queries have |
@@ -1263,23 +1633,21 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) | |||
1263 | * | 1633 | * |
1264 | * The function will synchronize on the previous query barrier, and will | 1634 | * The function will synchronize on the previous query barrier, and will |
1265 | * thus not finish until that barrier has executed. | 1635 | * thus not finish until that barrier has executed. |
1636 | * | ||
1637 | * the @dev_priv->cmdbuf_mutex needs to be held by the current thread | ||
1638 | * before calling this function. | ||
1266 | */ | 1639 | */ |
1267 | void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | 1640 | void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, |
1268 | bool only_on_cid_match, uint32_t cid) | 1641 | struct vmw_fence_obj *fence) |
1269 | { | 1642 | { |
1270 | int ret = 0; | 1643 | int ret = 0; |
1271 | struct list_head validate_list; | 1644 | struct list_head validate_list; |
1272 | struct ttm_validate_buffer pinned_val, query_val; | 1645 | struct ttm_validate_buffer pinned_val, query_val; |
1273 | struct vmw_fence_obj *fence; | 1646 | struct vmw_fence_obj *lfence = NULL; |
1274 | |||
1275 | mutex_lock(&dev_priv->cmdbuf_mutex); | ||
1276 | 1647 | ||
1277 | if (dev_priv->pinned_bo == NULL) | 1648 | if (dev_priv->pinned_bo == NULL) |
1278 | goto out_unlock; | 1649 | goto out_unlock; |
1279 | 1650 | ||
1280 | if (only_on_cid_match && cid != dev_priv->query_cid) | ||
1281 | goto out_unlock; | ||
1282 | |||
1283 | INIT_LIST_HEAD(&validate_list); | 1651 | INIT_LIST_HEAD(&validate_list); |
1284 | 1652 | ||
1285 | pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); | 1653 | pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); |
@@ -1297,25 +1665,34 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | |||
1297 | goto out_no_reserve; | 1665 | goto out_no_reserve; |
1298 | } | 1666 | } |
1299 | 1667 | ||
1300 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); | 1668 | if (dev_priv->query_cid_valid) { |
1301 | if (unlikely(ret != 0)) { | 1669 | BUG_ON(fence != NULL); |
1302 | vmw_execbuf_unpin_panic(dev_priv); | 1670 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); |
1303 | goto out_no_emit; | 1671 | if (unlikely(ret != 0)) { |
1672 | vmw_execbuf_unpin_panic(dev_priv); | ||
1673 | goto out_no_emit; | ||
1674 | } | ||
1675 | dev_priv->query_cid_valid = false; | ||
1304 | } | 1676 | } |
1305 | 1677 | ||
1306 | vmw_bo_pin(dev_priv->pinned_bo, false); | 1678 | vmw_bo_pin(dev_priv->pinned_bo, false); |
1307 | vmw_bo_pin(dev_priv->dummy_query_bo, false); | 1679 | vmw_bo_pin(dev_priv->dummy_query_bo, false); |
1308 | dev_priv->dummy_query_bo_pinned = false; | 1680 | dev_priv->dummy_query_bo_pinned = false; |
1309 | 1681 | ||
1310 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | 1682 | if (fence == NULL) { |
1683 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, | ||
1684 | NULL); | ||
1685 | fence = lfence; | ||
1686 | } | ||
1311 | ttm_eu_fence_buffer_objects(&validate_list, (void *) fence); | 1687 | ttm_eu_fence_buffer_objects(&validate_list, (void *) fence); |
1688 | if (lfence != NULL) | ||
1689 | vmw_fence_obj_unreference(&lfence); | ||
1312 | 1690 | ||
1313 | ttm_bo_unref(&query_val.bo); | 1691 | ttm_bo_unref(&query_val.bo); |
1314 | ttm_bo_unref(&pinned_val.bo); | 1692 | ttm_bo_unref(&pinned_val.bo); |
1315 | ttm_bo_unref(&dev_priv->pinned_bo); | 1693 | ttm_bo_unref(&dev_priv->pinned_bo); |
1316 | 1694 | ||
1317 | out_unlock: | 1695 | out_unlock: |
1318 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
1319 | return; | 1696 | return; |
1320 | 1697 | ||
1321 | out_no_emit: | 1698 | out_no_emit: |
@@ -1324,6 +1701,31 @@ out_no_reserve: | |||
1324 | ttm_bo_unref(&query_val.bo); | 1701 | ttm_bo_unref(&query_val.bo); |
1325 | ttm_bo_unref(&pinned_val.bo); | 1702 | ttm_bo_unref(&pinned_val.bo); |
1326 | ttm_bo_unref(&dev_priv->pinned_bo); | 1703 | ttm_bo_unref(&dev_priv->pinned_bo); |
1704 | } | ||
1705 | |||
1706 | /** | ||
1707 | * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned | ||
1708 | * query bo. | ||
1709 | * | ||
1710 | * @dev_priv: The device private structure. | ||
1711 | * | ||
1712 | * This function should be used to unpin the pinned query bo, or | ||
1713 | * as a query barrier when we need to make sure that all queries have | ||
1714 | * finished before the next fifo command. (For example on hardware | ||
1715 | * context destructions where the hardware may otherwise leak unfinished | ||
1716 | * queries). | ||
1717 | * | ||
1718 | * This function does not return any failure codes, but make attempts | ||
1719 | * to do safe unpinning in case of errors. | ||
1720 | * | ||
1721 | * The function will synchronize on the previous query barrier, and will | ||
1722 | * thus not finish until that barrier has executed. | ||
1723 | */ | ||
1724 | void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) | ||
1725 | { | ||
1726 | mutex_lock(&dev_priv->cmdbuf_mutex); | ||
1727 | if (dev_priv->query_cid_valid) | ||
1728 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); | ||
1327 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 1729 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
1328 | } | 1730 | } |
1329 | 1731 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index b07ca2e4d04b..2f7c08ebf568 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
@@ -131,6 +131,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, | |||
131 | struct drm_vmw_rect *clips = NULL; | 131 | struct drm_vmw_rect *clips = NULL; |
132 | struct drm_mode_object *obj; | 132 | struct drm_mode_object *obj; |
133 | struct vmw_framebuffer *vfb; | 133 | struct vmw_framebuffer *vfb; |
134 | struct vmw_resource *res; | ||
134 | uint32_t num_clips; | 135 | uint32_t num_clips; |
135 | int ret; | 136 | int ret; |
136 | 137 | ||
@@ -178,11 +179,13 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, | |||
178 | if (unlikely(ret != 0)) | 179 | if (unlikely(ret != 0)) |
179 | goto out_no_ttm_lock; | 180 | goto out_no_ttm_lock; |
180 | 181 | ||
181 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid, | 182 | ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid, |
182 | &surface); | 183 | user_surface_converter, |
184 | &res); | ||
183 | if (ret) | 185 | if (ret) |
184 | goto out_no_surface; | 186 | goto out_no_surface; |
185 | 187 | ||
188 | surface = vmw_res_to_srf(res); | ||
186 | ret = vmw_kms_present(dev_priv, file_priv, | 189 | ret = vmw_kms_present(dev_priv, file_priv, |
187 | vfb, surface, arg->sid, | 190 | vfb, surface, arg->sid, |
188 | arg->dest_x, arg->dest_y, | 191 | arg->dest_x, arg->dest_y, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 292c988c54ea..44ac46bb5629 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -31,15 +31,47 @@ | |||
31 | #include <drm/ttm/ttm_placement.h> | 31 | #include <drm/ttm/ttm_placement.h> |
32 | #include <drm/drmP.h> | 32 | #include <drm/drmP.h> |
33 | 33 | ||
34 | struct vmw_user_context { | 34 | /** |
35 | struct ttm_base_object base; | 35 | * struct vmw_user_resource_conv - Identify a derived user-exported resource |
36 | struct vmw_resource res; | 36 | * type and provide a function to convert its ttm_base_object pointer to |
37 | * a struct vmw_resource | ||
38 | */ | ||
39 | struct vmw_user_resource_conv { | ||
40 | enum ttm_object_type object_type; | ||
41 | struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base); | ||
42 | void (*res_free) (struct vmw_resource *res); | ||
37 | }; | 43 | }; |
38 | 44 | ||
39 | struct vmw_user_surface { | 45 | /** |
40 | struct ttm_base_object base; | 46 | * struct vmw_res_func - members and functions common for a resource type |
41 | struct vmw_surface srf; | 47 | * |
42 | uint32_t size; | 48 | * @res_type: Enum that identifies the lru list to use for eviction. |
49 | * @needs_backup: Whether the resource is guest-backed and needs | ||
50 | * persistent buffer storage. | ||
51 | * @type_name: String that identifies the resource type. | ||
52 | * @backup_placement: TTM placement for backup buffers. | ||
53 | * @may_evict Whether the resource may be evicted. | ||
54 | * @create: Create a hardware resource. | ||
55 | * @destroy: Destroy a hardware resource. | ||
56 | * @bind: Bind a hardware resource to persistent buffer storage. | ||
57 | * @unbind: Unbind a hardware resource from persistent | ||
58 | * buffer storage. | ||
59 | */ | ||
60 | |||
61 | struct vmw_res_func { | ||
62 | enum vmw_res_type res_type; | ||
63 | bool needs_backup; | ||
64 | const char *type_name; | ||
65 | struct ttm_placement *backup_placement; | ||
66 | bool may_evict; | ||
67 | |||
68 | int (*create) (struct vmw_resource *res); | ||
69 | int (*destroy) (struct vmw_resource *res); | ||
70 | int (*bind) (struct vmw_resource *res, | ||
71 | struct ttm_validate_buffer *val_buf); | ||
72 | int (*unbind) (struct vmw_resource *res, | ||
73 | bool readback, | ||
74 | struct ttm_validate_buffer *val_buf); | ||
43 | }; | 75 | }; |
44 | 76 | ||
45 | struct vmw_user_dma_buffer { | 77 | struct vmw_user_dma_buffer { |
@@ -62,16 +94,118 @@ struct vmw_user_stream { | |||
62 | struct vmw_stream stream; | 94 | struct vmw_stream stream; |
63 | }; | 95 | }; |
64 | 96 | ||
97 | |||
98 | static uint64_t vmw_user_stream_size; | ||
99 | |||
100 | static const struct vmw_res_func vmw_stream_func = { | ||
101 | .res_type = vmw_res_stream, | ||
102 | .needs_backup = false, | ||
103 | .may_evict = false, | ||
104 | .type_name = "video streams", | ||
105 | .backup_placement = NULL, | ||
106 | .create = NULL, | ||
107 | .destroy = NULL, | ||
108 | .bind = NULL, | ||
109 | .unbind = NULL | ||
110 | }; | ||
111 | |||
112 | struct vmw_user_context { | ||
113 | struct ttm_base_object base; | ||
114 | struct vmw_resource res; | ||
115 | }; | ||
116 | |||
117 | static void vmw_user_context_free(struct vmw_resource *res); | ||
118 | static struct vmw_resource * | ||
119 | vmw_user_context_base_to_res(struct ttm_base_object *base); | ||
120 | |||
121 | static uint64_t vmw_user_context_size; | ||
122 | |||
123 | static const struct vmw_user_resource_conv user_context_conv = { | ||
124 | .object_type = VMW_RES_CONTEXT, | ||
125 | .base_obj_to_res = vmw_user_context_base_to_res, | ||
126 | .res_free = vmw_user_context_free | ||
127 | }; | ||
128 | |||
129 | const struct vmw_user_resource_conv *user_context_converter = | ||
130 | &user_context_conv; | ||
131 | |||
132 | |||
133 | static const struct vmw_res_func vmw_legacy_context_func = { | ||
134 | .res_type = vmw_res_context, | ||
135 | .needs_backup = false, | ||
136 | .may_evict = false, | ||
137 | .type_name = "legacy contexts", | ||
138 | .backup_placement = NULL, | ||
139 | .create = NULL, | ||
140 | .destroy = NULL, | ||
141 | .bind = NULL, | ||
142 | .unbind = NULL | ||
143 | }; | ||
144 | |||
145 | |||
146 | /** | ||
147 | * struct vmw_user_surface - User-space visible surface resource | ||
148 | * | ||
149 | * @base: The TTM base object handling user-space visibility. | ||
150 | * @srf: The surface metadata. | ||
151 | * @size: TTM accounting size for the surface. | ||
152 | */ | ||
153 | struct vmw_user_surface { | ||
154 | struct ttm_base_object base; | ||
155 | struct vmw_surface srf; | ||
156 | uint32_t size; | ||
157 | uint32_t backup_handle; | ||
158 | }; | ||
159 | |||
160 | /** | ||
161 | * struct vmw_surface_offset - Backing store mip level offset info | ||
162 | * | ||
163 | * @face: Surface face. | ||
164 | * @mip: Mip level. | ||
165 | * @bo_offset: Offset into backing store of this mip level. | ||
166 | * | ||
167 | */ | ||
65 | struct vmw_surface_offset { | 168 | struct vmw_surface_offset { |
66 | uint32_t face; | 169 | uint32_t face; |
67 | uint32_t mip; | 170 | uint32_t mip; |
68 | uint32_t bo_offset; | 171 | uint32_t bo_offset; |
69 | }; | 172 | }; |
70 | 173 | ||
174 | static void vmw_user_surface_free(struct vmw_resource *res); | ||
175 | static struct vmw_resource * | ||
176 | vmw_user_surface_base_to_res(struct ttm_base_object *base); | ||
177 | static int vmw_legacy_srf_bind(struct vmw_resource *res, | ||
178 | struct ttm_validate_buffer *val_buf); | ||
179 | static int vmw_legacy_srf_unbind(struct vmw_resource *res, | ||
180 | bool readback, | ||
181 | struct ttm_validate_buffer *val_buf); | ||
182 | static int vmw_legacy_srf_create(struct vmw_resource *res); | ||
183 | static int vmw_legacy_srf_destroy(struct vmw_resource *res); | ||
184 | |||
185 | static const struct vmw_user_resource_conv user_surface_conv = { | ||
186 | .object_type = VMW_RES_SURFACE, | ||
187 | .base_obj_to_res = vmw_user_surface_base_to_res, | ||
188 | .res_free = vmw_user_surface_free | ||
189 | }; | ||
190 | |||
191 | const struct vmw_user_resource_conv *user_surface_converter = | ||
192 | &user_surface_conv; | ||
193 | |||
71 | 194 | ||
72 | static uint64_t vmw_user_context_size; | ||
73 | static uint64_t vmw_user_surface_size; | 195 | static uint64_t vmw_user_surface_size; |
74 | static uint64_t vmw_user_stream_size; | 196 | |
197 | static const struct vmw_res_func vmw_legacy_surface_func = { | ||
198 | .res_type = vmw_res_surface, | ||
199 | .needs_backup = false, | ||
200 | .may_evict = true, | ||
201 | .type_name = "legacy surfaces", | ||
202 | .backup_placement = &vmw_srf_placement, | ||
203 | .create = &vmw_legacy_srf_create, | ||
204 | .destroy = &vmw_legacy_srf_destroy, | ||
205 | .bind = &vmw_legacy_srf_bind, | ||
206 | .unbind = &vmw_legacy_srf_unbind | ||
207 | }; | ||
208 | |||
75 | 209 | ||
76 | static inline struct vmw_dma_buffer * | 210 | static inline struct vmw_dma_buffer * |
77 | vmw_dma_buffer(struct ttm_buffer_object *bo) | 211 | vmw_dma_buffer(struct ttm_buffer_object *bo) |
@@ -103,10 +237,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) | |||
103 | static void vmw_resource_release_id(struct vmw_resource *res) | 237 | static void vmw_resource_release_id(struct vmw_resource *res) |
104 | { | 238 | { |
105 | struct vmw_private *dev_priv = res->dev_priv; | 239 | struct vmw_private *dev_priv = res->dev_priv; |
240 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; | ||
106 | 241 | ||
107 | write_lock(&dev_priv->resource_lock); | 242 | write_lock(&dev_priv->resource_lock); |
108 | if (res->id != -1) | 243 | if (res->id != -1) |
109 | idr_remove(res->idr, res->id); | 244 | idr_remove(idr, res->id); |
110 | res->id = -1; | 245 | res->id = -1; |
111 | write_unlock(&dev_priv->resource_lock); | 246 | write_unlock(&dev_priv->resource_lock); |
112 | } | 247 | } |
@@ -116,17 +251,33 @@ static void vmw_resource_release(struct kref *kref) | |||
116 | struct vmw_resource *res = | 251 | struct vmw_resource *res = |
117 | container_of(kref, struct vmw_resource, kref); | 252 | container_of(kref, struct vmw_resource, kref); |
118 | struct vmw_private *dev_priv = res->dev_priv; | 253 | struct vmw_private *dev_priv = res->dev_priv; |
119 | int id = res->id; | 254 | int id; |
120 | struct idr *idr = res->idr; | 255 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
121 | 256 | ||
122 | res->avail = false; | 257 | res->avail = false; |
123 | if (res->remove_from_lists != NULL) | 258 | list_del_init(&res->lru_head); |
124 | res->remove_from_lists(res); | ||
125 | write_unlock(&dev_priv->resource_lock); | 259 | write_unlock(&dev_priv->resource_lock); |
260 | if (res->backup) { | ||
261 | struct ttm_buffer_object *bo = &res->backup->base; | ||
262 | |||
263 | ttm_bo_reserve(bo, false, false, false, 0); | ||
264 | if (!list_empty(&res->mob_head) && | ||
265 | res->func->unbind != NULL) { | ||
266 | struct ttm_validate_buffer val_buf; | ||
267 | |||
268 | val_buf.bo = bo; | ||
269 | res->func->unbind(res, false, &val_buf); | ||
270 | } | ||
271 | res->backup_dirty = false; | ||
272 | list_del_init(&res->mob_head); | ||
273 | ttm_bo_unreserve(bo); | ||
274 | vmw_dmabuf_unreference(&res->backup); | ||
275 | } | ||
126 | 276 | ||
127 | if (likely(res->hw_destroy != NULL)) | 277 | if (likely(res->hw_destroy != NULL)) |
128 | res->hw_destroy(res); | 278 | res->hw_destroy(res); |
129 | 279 | ||
280 | id = res->id; | ||
130 | if (res->res_free != NULL) | 281 | if (res->res_free != NULL) |
131 | res->res_free(res); | 282 | res->res_free(res); |
132 | else | 283 | else |
@@ -153,25 +304,25 @@ void vmw_resource_unreference(struct vmw_resource **p_res) | |||
153 | /** | 304 | /** |
154 | * vmw_resource_alloc_id - release a resource id to the id manager. | 305 | * vmw_resource_alloc_id - release a resource id to the id manager. |
155 | * | 306 | * |
156 | * @dev_priv: Pointer to the device private structure. | ||
157 | * @res: Pointer to the resource. | 307 | * @res: Pointer to the resource. |
158 | * | 308 | * |
159 | * Allocate the lowest free resource from the resource manager, and set | 309 | * Allocate the lowest free resource from the resource manager, and set |
160 | * @res->id to that id. Returns 0 on success and -ENOMEM on failure. | 310 | * @res->id to that id. Returns 0 on success and -ENOMEM on failure. |
161 | */ | 311 | */ |
162 | static int vmw_resource_alloc_id(struct vmw_private *dev_priv, | 312 | static int vmw_resource_alloc_id(struct vmw_resource *res) |
163 | struct vmw_resource *res) | ||
164 | { | 313 | { |
314 | struct vmw_private *dev_priv = res->dev_priv; | ||
165 | int ret; | 315 | int ret; |
316 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; | ||
166 | 317 | ||
167 | BUG_ON(res->id != -1); | 318 | BUG_ON(res->id != -1); |
168 | 319 | ||
169 | do { | 320 | do { |
170 | if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0)) | 321 | if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) |
171 | return -ENOMEM; | 322 | return -ENOMEM; |
172 | 323 | ||
173 | write_lock(&dev_priv->resource_lock); | 324 | write_lock(&dev_priv->resource_lock); |
174 | ret = idr_get_new_above(res->idr, res, 1, &res->id); | 325 | ret = idr_get_new_above(idr, res, 1, &res->id); |
175 | write_unlock(&dev_priv->resource_lock); | 326 | write_unlock(&dev_priv->resource_lock); |
176 | 327 | ||
177 | } while (ret == -EAGAIN); | 328 | } while (ret == -EAGAIN); |
@@ -179,31 +330,40 @@ static int vmw_resource_alloc_id(struct vmw_private *dev_priv, | |||
179 | return ret; | 330 | return ret; |
180 | } | 331 | } |
181 | 332 | ||
182 | 333 | /** | |
334 | * vmw_resource_init - initialize a struct vmw_resource | ||
335 | * | ||
336 | * @dev_priv: Pointer to a device private struct. | ||
337 | * @res: The struct vmw_resource to initialize. | ||
338 | * @obj_type: Resource object type. | ||
339 | * @delay_id: Boolean whether to defer device id allocation until | ||
340 | * the first validation. | ||
341 | * @res_free: Resource destructor. | ||
342 | * @func: Resource function table. | ||
343 | */ | ||
183 | static int vmw_resource_init(struct vmw_private *dev_priv, | 344 | static int vmw_resource_init(struct vmw_private *dev_priv, |
184 | struct vmw_resource *res, | 345 | struct vmw_resource *res, |
185 | struct idr *idr, | ||
186 | enum ttm_object_type obj_type, | ||
187 | bool delay_id, | 346 | bool delay_id, |
188 | void (*res_free) (struct vmw_resource *res), | 347 | void (*res_free) (struct vmw_resource *res), |
189 | void (*remove_from_lists) | 348 | const struct vmw_res_func *func) |
190 | (struct vmw_resource *res)) | ||
191 | { | 349 | { |
192 | kref_init(&res->kref); | 350 | kref_init(&res->kref); |
193 | res->hw_destroy = NULL; | 351 | res->hw_destroy = NULL; |
194 | res->res_free = res_free; | 352 | res->res_free = res_free; |
195 | res->remove_from_lists = remove_from_lists; | ||
196 | res->res_type = obj_type; | ||
197 | res->idr = idr; | ||
198 | res->avail = false; | 353 | res->avail = false; |
199 | res->dev_priv = dev_priv; | 354 | res->dev_priv = dev_priv; |
200 | INIT_LIST_HEAD(&res->query_head); | 355 | res->func = func; |
201 | INIT_LIST_HEAD(&res->validate_head); | 356 | INIT_LIST_HEAD(&res->lru_head); |
357 | INIT_LIST_HEAD(&res->mob_head); | ||
202 | res->id = -1; | 358 | res->id = -1; |
359 | res->backup = NULL; | ||
360 | res->backup_offset = 0; | ||
361 | res->backup_dirty = false; | ||
362 | res->res_dirty = false; | ||
203 | if (delay_id) | 363 | if (delay_id) |
204 | return 0; | 364 | return 0; |
205 | else | 365 | else |
206 | return vmw_resource_alloc_id(dev_priv, res); | 366 | return vmw_resource_alloc_id(res); |
207 | } | 367 | } |
208 | 368 | ||
209 | /** | 369 | /** |
@@ -218,7 +378,6 @@ static int vmw_resource_init(struct vmw_private *dev_priv, | |||
218 | * Activate basically means that the function vmw_resource_lookup will | 378 | * Activate basically means that the function vmw_resource_lookup will |
219 | * find it. | 379 | * find it. |
220 | */ | 380 | */ |
221 | |||
222 | static void vmw_resource_activate(struct vmw_resource *res, | 381 | static void vmw_resource_activate(struct vmw_resource *res, |
223 | void (*hw_destroy) (struct vmw_resource *)) | 382 | void (*hw_destroy) (struct vmw_resource *)) |
224 | { | 383 | { |
@@ -263,8 +422,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
263 | } *cmd; | 422 | } *cmd; |
264 | 423 | ||
265 | 424 | ||
266 | vmw_execbuf_release_pinned_bo(dev_priv, true, res->id); | 425 | vmw_execbuf_release_pinned_bo(dev_priv); |
267 | |||
268 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 426 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
269 | if (unlikely(cmd == NULL)) { | 427 | if (unlikely(cmd == NULL)) { |
270 | DRM_ERROR("Failed reserving FIFO space for surface " | 428 | DRM_ERROR("Failed reserving FIFO space for surface " |
@@ -291,8 +449,8 @@ static int vmw_context_init(struct vmw_private *dev_priv, | |||
291 | SVGA3dCmdDefineContext body; | 449 | SVGA3dCmdDefineContext body; |
292 | } *cmd; | 450 | } *cmd; |
293 | 451 | ||
294 | ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr, | 452 | ret = vmw_resource_init(dev_priv, res, false, |
295 | VMW_RES_CONTEXT, false, res_free, NULL); | 453 | res_free, &vmw_legacy_context_func); |
296 | 454 | ||
297 | if (unlikely(ret != 0)) { | 455 | if (unlikely(ret != 0)) { |
298 | DRM_ERROR("Failed to allocate a resource id.\n"); | 456 | DRM_ERROR("Failed to allocate a resource id.\n"); |
@@ -338,6 +496,7 @@ struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) | |||
338 | return NULL; | 496 | return NULL; |
339 | 497 | ||
340 | ret = vmw_context_init(dev_priv, res, NULL); | 498 | ret = vmw_context_init(dev_priv, res, NULL); |
499 | |||
341 | return (ret == 0) ? res : NULL; | 500 | return (ret == 0) ? res : NULL; |
342 | } | 501 | } |
343 | 502 | ||
@@ -345,6 +504,12 @@ struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) | |||
345 | * User-space context management: | 504 | * User-space context management: |
346 | */ | 505 | */ |
347 | 506 | ||
507 | static struct vmw_resource * | ||
508 | vmw_user_context_base_to_res(struct ttm_base_object *base) | ||
509 | { | ||
510 | return &(container_of(base, struct vmw_user_context, base)->res); | ||
511 | } | ||
512 | |||
348 | static void vmw_user_context_free(struct vmw_resource *res) | 513 | static void vmw_user_context_free(struct vmw_resource *res) |
349 | { | 514 | { |
350 | struct vmw_user_context *ctx = | 515 | struct vmw_user_context *ctx = |
@@ -375,32 +540,10 @@ static void vmw_user_context_base_release(struct ttm_base_object **p_base) | |||
375 | int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | 540 | int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, |
376 | struct drm_file *file_priv) | 541 | struct drm_file *file_priv) |
377 | { | 542 | { |
378 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
379 | struct vmw_resource *res; | ||
380 | struct vmw_user_context *ctx; | ||
381 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; | 543 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; |
382 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 544 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
383 | int ret = 0; | ||
384 | |||
385 | res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid); | ||
386 | if (unlikely(res == NULL)) | ||
387 | return -EINVAL; | ||
388 | |||
389 | if (res->res_free != &vmw_user_context_free) { | ||
390 | ret = -EINVAL; | ||
391 | goto out; | ||
392 | } | ||
393 | |||
394 | ctx = container_of(res, struct vmw_user_context, res); | ||
395 | if (ctx->base.tfile != tfile && !ctx->base.shareable) { | ||
396 | ret = -EPERM; | ||
397 | goto out; | ||
398 | } | ||
399 | 545 | ||
400 | ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE); | 546 | return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); |
401 | out: | ||
402 | vmw_resource_unreference(&res); | ||
403 | return ret; | ||
404 | } | 547 | } |
405 | 548 | ||
406 | int vmw_context_define_ioctl(struct drm_device *dev, void *data, | 549 | int vmw_context_define_ioctl(struct drm_device *dev, void *data, |
@@ -438,7 +581,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, | |||
438 | goto out_unlock; | 581 | goto out_unlock; |
439 | } | 582 | } |
440 | 583 | ||
441 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); | 584 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
442 | if (unlikely(ctx == NULL)) { | 585 | if (unlikely(ctx == NULL)) { |
443 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | 586 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
444 | vmw_user_context_size); | 587 | vmw_user_context_size); |
@@ -467,7 +610,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, | |||
467 | goto out_err; | 610 | goto out_err; |
468 | } | 611 | } |
469 | 612 | ||
470 | arg->cid = res->id; | 613 | arg->cid = ctx->base.hash.key; |
471 | out_err: | 614 | out_err: |
472 | vmw_resource_unreference(&res); | 615 | vmw_resource_unreference(&res); |
473 | out_unlock: | 616 | out_unlock: |
@@ -476,30 +619,13 @@ out_unlock: | |||
476 | 619 | ||
477 | } | 620 | } |
478 | 621 | ||
479 | int vmw_context_check(struct vmw_private *dev_priv, | 622 | /** |
480 | struct ttm_object_file *tfile, | 623 | * struct vmw_bpp - Bits per pixel info for surface storage size computation. |
481 | int id, | 624 | * |
482 | struct vmw_resource **p_res) | 625 | * @bpp: Bits per pixel. |
483 | { | 626 | * @s_bpp: Stride bits per pixel. See definition below. |
484 | struct vmw_resource *res; | 627 | * |
485 | int ret = 0; | 628 | */ |
486 | |||
487 | read_lock(&dev_priv->resource_lock); | ||
488 | res = idr_find(&dev_priv->context_idr, id); | ||
489 | if (res && res->avail) { | ||
490 | struct vmw_user_context *ctx = | ||
491 | container_of(res, struct vmw_user_context, res); | ||
492 | if (ctx->base.tfile != tfile && !ctx->base.shareable) | ||
493 | ret = -EPERM; | ||
494 | if (p_res) | ||
495 | *p_res = vmw_resource_reference(res); | ||
496 | } else | ||
497 | ret = -EINVAL; | ||
498 | read_unlock(&dev_priv->resource_lock); | ||
499 | |||
500 | return ret; | ||
501 | } | ||
502 | |||
503 | struct vmw_bpp { | 629 | struct vmw_bpp { |
504 | uint8_t bpp; | 630 | uint8_t bpp; |
505 | uint8_t s_bpp; | 631 | uint8_t s_bpp; |
@@ -573,9 +699,8 @@ static const struct vmw_bpp vmw_sf_bpp[] = { | |||
573 | 699 | ||
574 | 700 | ||
575 | /** | 701 | /** |
576 | * Surface management. | 702 | * struct vmw_surface_dma - SVGA3D DMA command |
577 | */ | 703 | */ |
578 | |||
579 | struct vmw_surface_dma { | 704 | struct vmw_surface_dma { |
580 | SVGA3dCmdHeader header; | 705 | SVGA3dCmdHeader header; |
581 | SVGA3dCmdSurfaceDMA body; | 706 | SVGA3dCmdSurfaceDMA body; |
@@ -583,11 +708,17 @@ struct vmw_surface_dma { | |||
583 | SVGA3dCmdSurfaceDMASuffix suffix; | 708 | SVGA3dCmdSurfaceDMASuffix suffix; |
584 | }; | 709 | }; |
585 | 710 | ||
711 | /** | ||
712 | * struct vmw_surface_define - SVGA3D Surface Define command | ||
713 | */ | ||
586 | struct vmw_surface_define { | 714 | struct vmw_surface_define { |
587 | SVGA3dCmdHeader header; | 715 | SVGA3dCmdHeader header; |
588 | SVGA3dCmdDefineSurface body; | 716 | SVGA3dCmdDefineSurface body; |
589 | }; | 717 | }; |
590 | 718 | ||
719 | /** | ||
720 | * struct vmw_surface_destroy - SVGA3D Surface Destroy command | ||
721 | */ | ||
591 | struct vmw_surface_destroy { | 722 | struct vmw_surface_destroy { |
592 | SVGA3dCmdHeader header; | 723 | SVGA3dCmdHeader header; |
593 | SVGA3dCmdDestroySurface body; | 724 | SVGA3dCmdDestroySurface body; |
@@ -688,7 +819,6 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf, | |||
688 | } | 819 | } |
689 | } | 820 | } |
690 | 821 | ||
691 | |||
692 | /** | 822 | /** |
693 | * vmw_surface_dma_encode - Encode a surface_dma command. | 823 | * vmw_surface_dma_encode - Encode a surface_dma command. |
694 | * | 824 | * |
@@ -748,6 +878,15 @@ static void vmw_surface_dma_encode(struct vmw_surface *srf, | |||
748 | }; | 878 | }; |
749 | 879 | ||
750 | 880 | ||
881 | /** | ||
882 | * vmw_hw_surface_destroy - destroy a Device surface | ||
883 | * | ||
884 | * @res: Pointer to a struct vmw_resource embedded in a struct | ||
885 | * vmw_surface. | ||
886 | * | ||
887 | * Destroys a the device surface associated with a struct vmw_surface if | ||
888 | * any, and adjusts accounting and resource count accordingly. | ||
889 | */ | ||
751 | static void vmw_hw_surface_destroy(struct vmw_resource *res) | 890 | static void vmw_hw_surface_destroy(struct vmw_resource *res) |
752 | { | 891 | { |
753 | 892 | ||
@@ -774,47 +913,30 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) | |||
774 | */ | 913 | */ |
775 | 914 | ||
776 | mutex_lock(&dev_priv->cmdbuf_mutex); | 915 | mutex_lock(&dev_priv->cmdbuf_mutex); |
777 | srf = container_of(res, struct vmw_surface, res); | 916 | srf = vmw_res_to_srf(res); |
778 | dev_priv->used_memory_size -= srf->backup_size; | 917 | dev_priv->used_memory_size -= res->backup_size; |
779 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 918 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
780 | |||
781 | } | 919 | } |
782 | vmw_3d_resource_dec(dev_priv, false); | 920 | vmw_3d_resource_dec(dev_priv, false); |
783 | } | 921 | } |
784 | 922 | ||
785 | void vmw_surface_res_free(struct vmw_resource *res) | ||
786 | { | ||
787 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); | ||
788 | |||
789 | if (srf->backup) | ||
790 | ttm_bo_unref(&srf->backup); | ||
791 | kfree(srf->offsets); | ||
792 | kfree(srf->sizes); | ||
793 | kfree(srf->snooper.image); | ||
794 | kfree(srf); | ||
795 | } | ||
796 | |||
797 | |||
798 | /** | 923 | /** |
799 | * vmw_surface_do_validate - make a surface available to the device. | 924 | * vmw_legacy_srf_create - Create a device surface as part of the |
925 | * resource validation process. | ||
800 | * | 926 | * |
801 | * @dev_priv: Pointer to a device private struct. | 927 | * @res: Pointer to a struct vmw_surface. |
802 | * @srf: Pointer to a struct vmw_surface. | ||
803 | * | 928 | * |
804 | * If the surface doesn't have a hw id, allocate one, and optionally | 929 | * If the surface doesn't have a hw id. |
805 | * DMA the backed up surface contents to the device. | ||
806 | * | 930 | * |
807 | * Returns -EBUSY if there wasn't sufficient device resources to | 931 | * Returns -EBUSY if there wasn't sufficient device resources to |
808 | * complete the validation. Retry after freeing up resources. | 932 | * complete the validation. Retry after freeing up resources. |
809 | * | 933 | * |
810 | * May return other errors if the kernel is out of guest resources. | 934 | * May return other errors if the kernel is out of guest resources. |
811 | */ | 935 | */ |
812 | int vmw_surface_do_validate(struct vmw_private *dev_priv, | 936 | static int vmw_legacy_srf_create(struct vmw_resource *res) |
813 | struct vmw_surface *srf) | ||
814 | { | 937 | { |
815 | struct vmw_resource *res = &srf->res; | 938 | struct vmw_private *dev_priv = res->dev_priv; |
816 | struct list_head val_list; | 939 | struct vmw_surface *srf; |
817 | struct ttm_validate_buffer val_buf; | ||
818 | uint32_t submit_size; | 940 | uint32_t submit_size; |
819 | uint8_t *cmd; | 941 | uint8_t *cmd; |
820 | int ret; | 942 | int ret; |
@@ -822,324 +944,261 @@ int vmw_surface_do_validate(struct vmw_private *dev_priv, | |||
822 | if (likely(res->id != -1)) | 944 | if (likely(res->id != -1)) |
823 | return 0; | 945 | return 0; |
824 | 946 | ||
825 | if (unlikely(dev_priv->used_memory_size + srf->backup_size >= | 947 | srf = vmw_res_to_srf(res); |
948 | if (unlikely(dev_priv->used_memory_size + res->backup_size >= | ||
826 | dev_priv->memory_size)) | 949 | dev_priv->memory_size)) |
827 | return -EBUSY; | 950 | return -EBUSY; |
828 | 951 | ||
829 | /* | 952 | /* |
830 | * Reserve- and validate the backup DMA bo. | ||
831 | */ | ||
832 | |||
833 | if (srf->backup) { | ||
834 | INIT_LIST_HEAD(&val_list); | ||
835 | val_buf.bo = ttm_bo_reference(srf->backup); | ||
836 | list_add_tail(&val_buf.head, &val_list); | ||
837 | ret = ttm_eu_reserve_buffers(&val_list); | ||
838 | if (unlikely(ret != 0)) | ||
839 | goto out_no_reserve; | ||
840 | |||
841 | ret = ttm_bo_validate(srf->backup, &vmw_srf_placement, | ||
842 | true, false, false); | ||
843 | if (unlikely(ret != 0)) | ||
844 | goto out_no_validate; | ||
845 | } | ||
846 | |||
847 | /* | ||
848 | * Alloc id for the resource. | 953 | * Alloc id for the resource. |
849 | */ | 954 | */ |
850 | 955 | ||
851 | ret = vmw_resource_alloc_id(dev_priv, res); | 956 | ret = vmw_resource_alloc_id(res); |
852 | if (unlikely(ret != 0)) { | 957 | if (unlikely(ret != 0)) { |
853 | DRM_ERROR("Failed to allocate a surface id.\n"); | 958 | DRM_ERROR("Failed to allocate a surface id.\n"); |
854 | goto out_no_id; | 959 | goto out_no_id; |
855 | } | 960 | } |
961 | |||
856 | if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) { | 962 | if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) { |
857 | ret = -EBUSY; | 963 | ret = -EBUSY; |
858 | goto out_no_fifo; | 964 | goto out_no_fifo; |
859 | } | 965 | } |
860 | 966 | ||
861 | |||
862 | /* | 967 | /* |
863 | * Encode surface define- and dma commands. | 968 | * Encode surface define- commands. |
864 | */ | 969 | */ |
865 | 970 | ||
866 | submit_size = vmw_surface_define_size(srf); | 971 | submit_size = vmw_surface_define_size(srf); |
867 | if (srf->backup) | ||
868 | submit_size += vmw_surface_dma_size(srf); | ||
869 | |||
870 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | 972 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
871 | if (unlikely(cmd == NULL)) { | 973 | if (unlikely(cmd == NULL)) { |
872 | DRM_ERROR("Failed reserving FIFO space for surface " | 974 | DRM_ERROR("Failed reserving FIFO space for surface " |
873 | "validation.\n"); | 975 | "creation.\n"); |
874 | ret = -ENOMEM; | 976 | ret = -ENOMEM; |
875 | goto out_no_fifo; | 977 | goto out_no_fifo; |
876 | } | 978 | } |
877 | 979 | ||
878 | vmw_surface_define_encode(srf, cmd); | 980 | vmw_surface_define_encode(srf, cmd); |
879 | if (srf->backup) { | ||
880 | SVGAGuestPtr ptr; | ||
881 | |||
882 | cmd += vmw_surface_define_size(srf); | ||
883 | vmw_bo_get_guest_ptr(srf->backup, &ptr); | ||
884 | vmw_surface_dma_encode(srf, cmd, &ptr, true); | ||
885 | } | ||
886 | |||
887 | vmw_fifo_commit(dev_priv, submit_size); | 981 | vmw_fifo_commit(dev_priv, submit_size); |
888 | |||
889 | /* | ||
890 | * Create a fence object and fence the backup buffer. | ||
891 | */ | ||
892 | |||
893 | if (srf->backup) { | ||
894 | struct vmw_fence_obj *fence; | ||
895 | |||
896 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
897 | &fence, NULL); | ||
898 | ttm_eu_fence_buffer_objects(&val_list, fence); | ||
899 | if (likely(fence != NULL)) | ||
900 | vmw_fence_obj_unreference(&fence); | ||
901 | ttm_bo_unref(&val_buf.bo); | ||
902 | ttm_bo_unref(&srf->backup); | ||
903 | } | ||
904 | |||
905 | /* | 982 | /* |
906 | * Surface memory usage accounting. | 983 | * Surface memory usage accounting. |
907 | */ | 984 | */ |
908 | 985 | ||
909 | dev_priv->used_memory_size += srf->backup_size; | 986 | dev_priv->used_memory_size += res->backup_size; |
910 | |||
911 | return 0; | 987 | return 0; |
912 | 988 | ||
913 | out_no_fifo: | 989 | out_no_fifo: |
914 | vmw_resource_release_id(res); | 990 | vmw_resource_release_id(res); |
915 | out_no_id: | 991 | out_no_id: |
916 | out_no_validate: | ||
917 | if (srf->backup) | ||
918 | ttm_eu_backoff_reservation(&val_list); | ||
919 | out_no_reserve: | ||
920 | if (srf->backup) | ||
921 | ttm_bo_unref(&val_buf.bo); | ||
922 | return ret; | 992 | return ret; |
923 | } | 993 | } |
924 | 994 | ||
925 | /** | 995 | /** |
926 | * vmw_surface_evict - Evict a hw surface. | 996 | * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface. |
927 | * | 997 | * |
928 | * @dev_priv: Pointer to a device private struct. | 998 | * @res: Pointer to a struct vmw_res embedded in a struct |
929 | * @srf: Pointer to a struct vmw_surface | 999 | * vmw_surface. |
1000 | * @val_buf: Pointer to a struct ttm_validate_buffer containing | ||
1001 | * information about the backup buffer. | ||
1002 | * @bind: Boolean wether to DMA to the surface. | ||
930 | * | 1003 | * |
931 | * DMA the contents of a hw surface to a backup guest buffer object, | 1004 | * Transfer backup data to or from a legacy surface as part of the |
932 | * and destroy the hw surface, releasing its id. | 1005 | * validation process. |
1006 | * May return other errors if the kernel is out of guest resources. | ||
1007 | * The backup buffer will be fenced or idle upon successful completion, | ||
1008 | * and if the surface needs persistent backup storage, the backup buffer | ||
1009 | * will also be returned reserved iff @bind is true. | ||
933 | */ | 1010 | */ |
934 | int vmw_surface_evict(struct vmw_private *dev_priv, | 1011 | static int vmw_legacy_srf_dma(struct vmw_resource *res, |
935 | struct vmw_surface *srf) | 1012 | struct ttm_validate_buffer *val_buf, |
1013 | bool bind) | ||
936 | { | 1014 | { |
937 | struct vmw_resource *res = &srf->res; | 1015 | SVGAGuestPtr ptr; |
938 | struct list_head val_list; | 1016 | struct vmw_fence_obj *fence; |
939 | struct ttm_validate_buffer val_buf; | ||
940 | uint32_t submit_size; | 1017 | uint32_t submit_size; |
1018 | struct vmw_surface *srf = vmw_res_to_srf(res); | ||
941 | uint8_t *cmd; | 1019 | uint8_t *cmd; |
942 | int ret; | 1020 | struct vmw_private *dev_priv = res->dev_priv; |
943 | struct vmw_fence_obj *fence; | ||
944 | SVGAGuestPtr ptr; | ||
945 | |||
946 | BUG_ON(res->id == -1); | ||
947 | |||
948 | /* | ||
949 | * Create a surface backup buffer object. | ||
950 | */ | ||
951 | |||
952 | if (!srf->backup) { | ||
953 | ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size, | ||
954 | ttm_bo_type_device, | ||
955 | &vmw_srf_placement, 0, true, | ||
956 | NULL, &srf->backup); | ||
957 | if (unlikely(ret != 0)) | ||
958 | return ret; | ||
959 | } | ||
960 | |||
961 | /* | ||
962 | * Reserve- and validate the backup DMA bo. | ||
963 | */ | ||
964 | |||
965 | INIT_LIST_HEAD(&val_list); | ||
966 | val_buf.bo = ttm_bo_reference(srf->backup); | ||
967 | list_add_tail(&val_buf.head, &val_list); | ||
968 | ret = ttm_eu_reserve_buffers(&val_list); | ||
969 | if (unlikely(ret != 0)) | ||
970 | goto out_no_reserve; | ||
971 | |||
972 | ret = ttm_bo_validate(srf->backup, &vmw_srf_placement, | ||
973 | true, false, false); | ||
974 | if (unlikely(ret != 0)) | ||
975 | goto out_no_validate; | ||
976 | |||
977 | 1021 | ||
978 | /* | 1022 | BUG_ON(val_buf->bo == NULL); |
979 | * Encode the dma- and surface destroy commands. | ||
980 | */ | ||
981 | 1023 | ||
982 | submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size(); | 1024 | submit_size = vmw_surface_dma_size(srf); |
983 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | 1025 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
984 | if (unlikely(cmd == NULL)) { | 1026 | if (unlikely(cmd == NULL)) { |
985 | DRM_ERROR("Failed reserving FIFO space for surface " | 1027 | DRM_ERROR("Failed reserving FIFO space for surface " |
986 | "eviction.\n"); | 1028 | "DMA.\n"); |
987 | ret = -ENOMEM; | 1029 | return -ENOMEM; |
988 | goto out_no_fifo; | ||
989 | } | 1030 | } |
1031 | vmw_bo_get_guest_ptr(val_buf->bo, &ptr); | ||
1032 | vmw_surface_dma_encode(srf, cmd, &ptr, bind); | ||
990 | 1033 | ||
991 | vmw_bo_get_guest_ptr(srf->backup, &ptr); | ||
992 | vmw_surface_dma_encode(srf, cmd, &ptr, false); | ||
993 | cmd += vmw_surface_dma_size(srf); | ||
994 | vmw_surface_destroy_encode(res->id, cmd); | ||
995 | vmw_fifo_commit(dev_priv, submit_size); | 1034 | vmw_fifo_commit(dev_priv, submit_size); |
996 | 1035 | ||
997 | /* | 1036 | /* |
998 | * Surface memory usage accounting. | 1037 | * Create a fence object and fence the backup buffer. |
999 | */ | ||
1000 | |||
1001 | dev_priv->used_memory_size -= srf->backup_size; | ||
1002 | |||
1003 | /* | ||
1004 | * Create a fence object and fence the DMA buffer. | ||
1005 | */ | 1038 | */ |
1006 | 1039 | ||
1007 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | 1040 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, |
1008 | &fence, NULL); | 1041 | &fence, NULL); |
1009 | ttm_eu_fence_buffer_objects(&val_list, fence); | ||
1010 | if (likely(fence != NULL)) | ||
1011 | vmw_fence_obj_unreference(&fence); | ||
1012 | ttm_bo_unref(&val_buf.bo); | ||
1013 | 1042 | ||
1014 | /* | 1043 | vmw_fence_single_bo(val_buf->bo, fence); |
1015 | * Release the surface ID. | ||
1016 | */ | ||
1017 | 1044 | ||
1018 | vmw_resource_release_id(res); | 1045 | if (likely(fence != NULL)) |
1046 | vmw_fence_obj_unreference(&fence); | ||
1019 | 1047 | ||
1020 | return 0; | 1048 | return 0; |
1049 | } | ||
1021 | 1050 | ||
1022 | out_no_fifo: | 1051 | /** |
1023 | out_no_validate: | 1052 | * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the |
1024 | if (srf->backup) | 1053 | * surface validation process. |
1025 | ttm_eu_backoff_reservation(&val_list); | 1054 | * |
1026 | out_no_reserve: | 1055 | * @res: Pointer to a struct vmw_res embedded in a struct |
1027 | ttm_bo_unref(&val_buf.bo); | 1056 | * vmw_surface. |
1028 | ttm_bo_unref(&srf->backup); | 1057 | * @val_buf: Pointer to a struct ttm_validate_buffer containing |
1029 | return ret; | 1058 | * information about the backup buffer. |
1059 | * | ||
1060 | * This function will copy backup data to the surface if the | ||
1061 | * backup buffer is dirty. | ||
1062 | */ | ||
1063 | static int vmw_legacy_srf_bind(struct vmw_resource *res, | ||
1064 | struct ttm_validate_buffer *val_buf) | ||
1065 | { | ||
1066 | if (!res->backup_dirty) | ||
1067 | return 0; | ||
1068 | |||
1069 | return vmw_legacy_srf_dma(res, val_buf, true); | ||
1030 | } | 1070 | } |
1031 | 1071 | ||
1032 | 1072 | ||
1033 | /** | 1073 | /** |
1034 | * vmw_surface_validate - make a surface available to the device, evicting | 1074 | * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the |
1035 | * other surfaces if needed. | 1075 | * surface eviction process. |
1036 | * | 1076 | * |
1037 | * @dev_priv: Pointer to a device private struct. | 1077 | * @res: Pointer to a struct vmw_res embedded in a struct |
1038 | * @srf: Pointer to a struct vmw_surface. | 1078 | * vmw_surface. |
1079 | * @val_buf: Pointer to a struct ttm_validate_buffer containing | ||
1080 | * information about the backup buffer. | ||
1039 | * | 1081 | * |
1040 | * Try to validate a surface and if it fails due to limited device resources, | 1082 | * This function will copy backup data from the surface. |
1041 | * repeatedly try to evict other surfaces until the request can be | 1083 | */ |
1042 | * acommodated. | 1084 | static int vmw_legacy_srf_unbind(struct vmw_resource *res, |
1085 | bool readback, | ||
1086 | struct ttm_validate_buffer *val_buf) | ||
1087 | { | ||
1088 | if (unlikely(readback)) | ||
1089 | return vmw_legacy_srf_dma(res, val_buf, false); | ||
1090 | return 0; | ||
1091 | } | ||
1092 | |||
1093 | /** | ||
1094 | * vmw_legacy_srf_destroy - Destroy a device surface as part of a | ||
1095 | * resource eviction process. | ||
1043 | * | 1096 | * |
1044 | * May return errors if out of resources. | 1097 | * @res: Pointer to a struct vmw_res embedded in a struct |
1098 | * vmw_surface. | ||
1045 | */ | 1099 | */ |
1046 | int vmw_surface_validate(struct vmw_private *dev_priv, | 1100 | static int vmw_legacy_srf_destroy(struct vmw_resource *res) |
1047 | struct vmw_surface *srf) | ||
1048 | { | 1101 | { |
1049 | int ret; | 1102 | struct vmw_private *dev_priv = res->dev_priv; |
1050 | struct vmw_surface *evict_srf; | 1103 | uint32_t submit_size; |
1104 | uint8_t *cmd; | ||
1051 | 1105 | ||
1052 | do { | 1106 | BUG_ON(res->id == -1); |
1053 | write_lock(&dev_priv->resource_lock); | ||
1054 | list_del_init(&srf->lru_head); | ||
1055 | write_unlock(&dev_priv->resource_lock); | ||
1056 | 1107 | ||
1057 | ret = vmw_surface_do_validate(dev_priv, srf); | 1108 | /* |
1058 | if (likely(ret != -EBUSY)) | 1109 | * Encode the dma- and surface destroy commands. |
1059 | break; | 1110 | */ |
1060 | 1111 | ||
1061 | write_lock(&dev_priv->resource_lock); | 1112 | submit_size = vmw_surface_destroy_size(); |
1062 | if (list_empty(&dev_priv->surface_lru)) { | 1113 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
1063 | DRM_ERROR("Out of device memory for surfaces.\n"); | 1114 | if (unlikely(cmd == NULL)) { |
1064 | ret = -EBUSY; | 1115 | DRM_ERROR("Failed reserving FIFO space for surface " |
1065 | write_unlock(&dev_priv->resource_lock); | 1116 | "eviction.\n"); |
1066 | break; | 1117 | return -ENOMEM; |
1067 | } | 1118 | } |
1068 | 1119 | ||
1069 | evict_srf = vmw_surface_reference | 1120 | vmw_surface_destroy_encode(res->id, cmd); |
1070 | (list_first_entry(&dev_priv->surface_lru, | 1121 | vmw_fifo_commit(dev_priv, submit_size); |
1071 | struct vmw_surface, | ||
1072 | lru_head)); | ||
1073 | list_del_init(&evict_srf->lru_head); | ||
1074 | 1122 | ||
1075 | write_unlock(&dev_priv->resource_lock); | 1123 | /* |
1076 | (void) vmw_surface_evict(dev_priv, evict_srf); | 1124 | * Surface memory usage accounting. |
1125 | */ | ||
1077 | 1126 | ||
1078 | vmw_surface_unreference(&evict_srf); | 1127 | dev_priv->used_memory_size -= res->backup_size; |
1079 | 1128 | ||
1080 | } while (1); | 1129 | /* |
1130 | * Release the surface ID. | ||
1131 | */ | ||
1081 | 1132 | ||
1082 | if (unlikely(ret != 0 && srf->res.id != -1)) { | 1133 | vmw_resource_release_id(res); |
1083 | write_lock(&dev_priv->resource_lock); | ||
1084 | list_add_tail(&srf->lru_head, &dev_priv->surface_lru); | ||
1085 | write_unlock(&dev_priv->resource_lock); | ||
1086 | } | ||
1087 | 1134 | ||
1088 | return ret; | 1135 | return 0; |
1089 | } | 1136 | } |
1090 | 1137 | ||
1091 | 1138 | ||
1092 | /** | 1139 | /** |
1093 | * vmw_surface_remove_from_lists - Remove surface resources from lookup lists | 1140 | * vmw_surface_init - initialize a struct vmw_surface |
1094 | * | 1141 | * |
1095 | * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface | 1142 | * @dev_priv: Pointer to a device private struct. |
1096 | * | 1143 | * @srf: Pointer to the struct vmw_surface to initialize. |
1097 | * As part of the resource destruction, remove the surface from any | 1144 | * @res_free: Pointer to a resource destructor used to free |
1098 | * lookup lists. | 1145 | * the object. |
1099 | */ | 1146 | */ |
1100 | static void vmw_surface_remove_from_lists(struct vmw_resource *res) | 1147 | static int vmw_surface_init(struct vmw_private *dev_priv, |
1101 | { | 1148 | struct vmw_surface *srf, |
1102 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); | 1149 | void (*res_free) (struct vmw_resource *res)) |
1103 | |||
1104 | list_del_init(&srf->lru_head); | ||
1105 | } | ||
1106 | |||
1107 | int vmw_surface_init(struct vmw_private *dev_priv, | ||
1108 | struct vmw_surface *srf, | ||
1109 | void (*res_free) (struct vmw_resource *res)) | ||
1110 | { | 1150 | { |
1111 | int ret; | 1151 | int ret; |
1112 | struct vmw_resource *res = &srf->res; | 1152 | struct vmw_resource *res = &srf->res; |
1113 | 1153 | ||
1114 | BUG_ON(res_free == NULL); | 1154 | BUG_ON(res_free == NULL); |
1115 | INIT_LIST_HEAD(&srf->lru_head); | 1155 | (void) vmw_3d_resource_inc(dev_priv, false); |
1116 | ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr, | 1156 | ret = vmw_resource_init(dev_priv, res, true, res_free, |
1117 | VMW_RES_SURFACE, true, res_free, | 1157 | &vmw_legacy_surface_func); |
1118 | vmw_surface_remove_from_lists); | ||
1119 | 1158 | ||
1120 | if (unlikely(ret != 0)) | 1159 | if (unlikely(ret != 0)) { |
1160 | vmw_3d_resource_dec(dev_priv, false); | ||
1121 | res_free(res); | 1161 | res_free(res); |
1162 | return ret; | ||
1163 | } | ||
1122 | 1164 | ||
1123 | /* | 1165 | /* |
1124 | * The surface won't be visible to hardware until a | 1166 | * The surface won't be visible to hardware until a |
1125 | * surface validate. | 1167 | * surface validate. |
1126 | */ | 1168 | */ |
1127 | 1169 | ||
1128 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
1129 | vmw_resource_activate(res, vmw_hw_surface_destroy); | 1170 | vmw_resource_activate(res, vmw_hw_surface_destroy); |
1130 | return ret; | 1171 | return ret; |
1131 | } | 1172 | } |
1132 | 1173 | ||
1174 | /** | ||
1175 | * vmw_user_surface_base_to_res - TTM base object to resource converter for | ||
1176 | * user visible surfaces | ||
1177 | * | ||
1178 | * @base: Pointer to a TTM base object | ||
1179 | * | ||
1180 | * Returns the struct vmw_resource embedded in a struct vmw_surface | ||
1181 | * for the user-visible object identified by the TTM base object @base. | ||
1182 | */ | ||
1183 | static struct vmw_resource * | ||
1184 | vmw_user_surface_base_to_res(struct ttm_base_object *base) | ||
1185 | { | ||
1186 | return &(container_of(base, struct vmw_user_surface, base)->srf.res); | ||
1187 | } | ||
1188 | |||
1189 | /** | ||
1190 | * vmw_user_surface_free - User visible surface resource destructor | ||
1191 | * | ||
1192 | * @res: A struct vmw_resource embedded in a struct vmw_surface. | ||
1193 | */ | ||
1133 | static void vmw_user_surface_free(struct vmw_resource *res) | 1194 | static void vmw_user_surface_free(struct vmw_resource *res) |
1134 | { | 1195 | { |
1135 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); | 1196 | struct vmw_surface *srf = vmw_res_to_srf(res); |
1136 | struct vmw_user_surface *user_srf = | 1197 | struct vmw_user_surface *user_srf = |
1137 | container_of(srf, struct vmw_user_surface, srf); | 1198 | container_of(srf, struct vmw_user_surface, srf); |
1138 | struct vmw_private *dev_priv = srf->res.dev_priv; | 1199 | struct vmw_private *dev_priv = srf->res.dev_priv; |
1139 | uint32_t size = user_srf->size; | 1200 | uint32_t size = user_srf->size; |
1140 | 1201 | ||
1141 | if (srf->backup) | ||
1142 | ttm_bo_unref(&srf->backup); | ||
1143 | kfree(srf->offsets); | 1202 | kfree(srf->offsets); |
1144 | kfree(srf->sizes); | 1203 | kfree(srf->sizes); |
1145 | kfree(srf->snooper.image); | 1204 | kfree(srf->snooper.image); |
@@ -1148,108 +1207,14 @@ static void vmw_user_surface_free(struct vmw_resource *res) | |||
1148 | } | 1207 | } |
1149 | 1208 | ||
1150 | /** | 1209 | /** |
1151 | * vmw_resource_unreserve - unreserve resources previously reserved for | 1210 | * vmw_user_surface_free - User visible surface TTM base object destructor |
1152 | * command submission. | ||
1153 | * | 1211 | * |
1154 | * @list_head: list of resources to unreserve. | 1212 | * @p_base: Pointer to a pointer to a TTM base object |
1213 | * embedded in a struct vmw_user_surface. | ||
1155 | * | 1214 | * |
1156 | * Currently only surfaces are considered, and unreserving a surface | 1215 | * Drops the base object's reference on its resource, and the |
1157 | * means putting it back on the device's surface lru list, | 1216 | * pointer pointed to by *p_base is set to NULL. |
1158 | * so that it can be evicted if necessary. | ||
1159 | * This function traverses the resource list and | ||
1160 | * checks whether resources are surfaces, and in that case puts them back | ||
1161 | * on the device's surface LRU list. | ||
1162 | */ | 1217 | */ |
1163 | void vmw_resource_unreserve(struct list_head *list) | ||
1164 | { | ||
1165 | struct vmw_resource *res; | ||
1166 | struct vmw_surface *srf; | ||
1167 | rwlock_t *lock = NULL; | ||
1168 | |||
1169 | list_for_each_entry(res, list, validate_head) { | ||
1170 | |||
1171 | if (res->res_free != &vmw_surface_res_free && | ||
1172 | res->res_free != &vmw_user_surface_free) | ||
1173 | continue; | ||
1174 | |||
1175 | if (unlikely(lock == NULL)) { | ||
1176 | lock = &res->dev_priv->resource_lock; | ||
1177 | write_lock(lock); | ||
1178 | } | ||
1179 | |||
1180 | srf = container_of(res, struct vmw_surface, res); | ||
1181 | list_del_init(&srf->lru_head); | ||
1182 | list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru); | ||
1183 | } | ||
1184 | |||
1185 | if (lock != NULL) | ||
1186 | write_unlock(lock); | ||
1187 | } | ||
1188 | |||
1189 | /** | ||
1190 | * Helper function that looks either a surface or dmabuf. | ||
1191 | * | ||
1192 | * The pointer this pointed at by out_surf and out_buf needs to be null. | ||
1193 | */ | ||
1194 | int vmw_user_lookup_handle(struct vmw_private *dev_priv, | ||
1195 | struct ttm_object_file *tfile, | ||
1196 | uint32_t handle, | ||
1197 | struct vmw_surface **out_surf, | ||
1198 | struct vmw_dma_buffer **out_buf) | ||
1199 | { | ||
1200 | int ret; | ||
1201 | |||
1202 | BUG_ON(*out_surf || *out_buf); | ||
1203 | |||
1204 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf); | ||
1205 | if (!ret) | ||
1206 | return 0; | ||
1207 | |||
1208 | ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); | ||
1209 | return ret; | ||
1210 | } | ||
1211 | |||
1212 | |||
1213 | int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, | ||
1214 | struct ttm_object_file *tfile, | ||
1215 | uint32_t handle, struct vmw_surface **out) | ||
1216 | { | ||
1217 | struct vmw_resource *res; | ||
1218 | struct vmw_surface *srf; | ||
1219 | struct vmw_user_surface *user_srf; | ||
1220 | struct ttm_base_object *base; | ||
1221 | int ret = -EINVAL; | ||
1222 | |||
1223 | base = ttm_base_object_lookup(tfile, handle); | ||
1224 | if (unlikely(base == NULL)) | ||
1225 | return -EINVAL; | ||
1226 | |||
1227 | if (unlikely(base->object_type != VMW_RES_SURFACE)) | ||
1228 | goto out_bad_resource; | ||
1229 | |||
1230 | user_srf = container_of(base, struct vmw_user_surface, base); | ||
1231 | srf = &user_srf->srf; | ||
1232 | res = &srf->res; | ||
1233 | |||
1234 | read_lock(&dev_priv->resource_lock); | ||
1235 | |||
1236 | if (!res->avail || res->res_free != &vmw_user_surface_free) { | ||
1237 | read_unlock(&dev_priv->resource_lock); | ||
1238 | goto out_bad_resource; | ||
1239 | } | ||
1240 | |||
1241 | kref_get(&res->kref); | ||
1242 | read_unlock(&dev_priv->resource_lock); | ||
1243 | |||
1244 | *out = srf; | ||
1245 | ret = 0; | ||
1246 | |||
1247 | out_bad_resource: | ||
1248 | ttm_base_object_unref(&base); | ||
1249 | |||
1250 | return ret; | ||
1251 | } | ||
1252 | |||
1253 | static void vmw_user_surface_base_release(struct ttm_base_object **p_base) | 1218 | static void vmw_user_surface_base_release(struct ttm_base_object **p_base) |
1254 | { | 1219 | { |
1255 | struct ttm_base_object *base = *p_base; | 1220 | struct ttm_base_object *base = *p_base; |
@@ -1261,6 +1226,14 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) | |||
1261 | vmw_resource_unreference(&res); | 1226 | vmw_resource_unreference(&res); |
1262 | } | 1227 | } |
1263 | 1228 | ||
1229 | /** | ||
1230 | * vmw_user_surface_destroy_ioctl - Ioctl function implementing | ||
1231 | * the user surface destroy functionality. | ||
1232 | * | ||
1233 | * @dev: Pointer to a struct drm_device. | ||
1234 | * @data: Pointer to data copied from / to user-space. | ||
1235 | * @file_priv: Pointer to a drm file private structure. | ||
1236 | */ | ||
1264 | int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | 1237 | int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, |
1265 | struct drm_file *file_priv) | 1238 | struct drm_file *file_priv) |
1266 | { | 1239 | { |
@@ -1270,6 +1243,14 @@ int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | |||
1270 | return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); | 1243 | return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); |
1271 | } | 1244 | } |
1272 | 1245 | ||
1246 | /** | ||
1247 | * vmw_user_surface_define_ioctl - Ioctl function implementing | ||
1248 | * the user surface define functionality. | ||
1249 | * | ||
1250 | * @dev: Pointer to a struct drm_device. | ||
1251 | * @data: Pointer to data copied from / to user-space. | ||
1252 | * @file_priv: Pointer to a drm file private structure. | ||
1253 | */ | ||
1273 | int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | 1254 | int vmw_surface_define_ioctl(struct drm_device *dev, void *data, |
1274 | struct drm_file *file_priv) | 1255 | struct drm_file *file_priv) |
1275 | { | 1256 | { |
@@ -1325,7 +1306,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
1325 | goto out_unlock; | 1306 | goto out_unlock; |
1326 | } | 1307 | } |
1327 | 1308 | ||
1328 | user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL); | 1309 | user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); |
1329 | if (unlikely(user_srf == NULL)) { | 1310 | if (unlikely(user_srf == NULL)) { |
1330 | ret = -ENOMEM; | 1311 | ret = -ENOMEM; |
1331 | goto out_no_user_srf; | 1312 | goto out_no_user_srf; |
@@ -1337,7 +1318,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
1337 | srf->flags = req->flags; | 1318 | srf->flags = req->flags; |
1338 | srf->format = req->format; | 1319 | srf->format = req->format; |
1339 | srf->scanout = req->scanout; | 1320 | srf->scanout = req->scanout; |
1340 | srf->backup = NULL; | ||
1341 | 1321 | ||
1342 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); | 1322 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); |
1343 | srf->num_sizes = num_sizes; | 1323 | srf->num_sizes = num_sizes; |
@@ -1365,6 +1345,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
1365 | goto out_no_copy; | 1345 | goto out_no_copy; |
1366 | } | 1346 | } |
1367 | 1347 | ||
1348 | srf->base_size = *srf->sizes; | ||
1349 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; | ||
1350 | srf->multisample_count = 1; | ||
1351 | |||
1368 | cur_bo_offset = 0; | 1352 | cur_bo_offset = 0; |
1369 | cur_offset = srf->offsets; | 1353 | cur_offset = srf->offsets; |
1370 | cur_size = srf->sizes; | 1354 | cur_size = srf->sizes; |
@@ -1386,7 +1370,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
1386 | ++cur_size; | 1370 | ++cur_size; |
1387 | } | 1371 | } |
1388 | } | 1372 | } |
1389 | srf->backup_size = cur_bo_offset; | 1373 | res->backup_size = cur_bo_offset; |
1390 | 1374 | ||
1391 | if (srf->scanout && | 1375 | if (srf->scanout && |
1392 | srf->num_sizes == 1 && | 1376 | srf->num_sizes == 1 && |
@@ -1430,9 +1414,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
1430 | } | 1414 | } |
1431 | 1415 | ||
1432 | rep->sid = user_srf->base.hash.key; | 1416 | rep->sid = user_srf->base.hash.key; |
1433 | if (rep->sid == SVGA3D_INVALID_ID) | ||
1434 | DRM_ERROR("Created bad Surface ID.\n"); | ||
1435 | |||
1436 | vmw_resource_unreference(&res); | 1417 | vmw_resource_unreference(&res); |
1437 | 1418 | ||
1438 | ttm_read_unlock(&vmaster->lock); | 1419 | ttm_read_unlock(&vmaster->lock); |
@@ -1450,6 +1431,14 @@ out_unlock: | |||
1450 | return ret; | 1431 | return ret; |
1451 | } | 1432 | } |
1452 | 1433 | ||
1434 | /** | ||
1435 | * vmw_user_surface_define_ioctl - Ioctl function implementing | ||
1436 | * the user surface reference functionality. | ||
1437 | * | ||
1438 | * @dev: Pointer to a struct drm_device. | ||
1439 | * @data: Pointer to data copied from / to user-space. | ||
1440 | * @file_priv: Pointer to a drm file private structure. | ||
1441 | */ | ||
1453 | int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | 1442 | int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, |
1454 | struct drm_file *file_priv) | 1443 | struct drm_file *file_priv) |
1455 | { | 1444 | { |
@@ -1503,33 +1492,84 @@ out_no_reference: | |||
1503 | return ret; | 1492 | return ret; |
1504 | } | 1493 | } |
1505 | 1494 | ||
1506 | int vmw_surface_check(struct vmw_private *dev_priv, | 1495 | /** |
1507 | struct ttm_object_file *tfile, | 1496 | * vmw_user_resource_lookup_handle - lookup a struct resource from a |
1508 | uint32_t handle, int *id) | 1497 | * TTM user-space handle and perform basic type checks |
1498 | * | ||
1499 | * @dev_priv: Pointer to a device private struct | ||
1500 | * @tfile: Pointer to a struct ttm_object_file identifying the caller | ||
1501 | * @handle: The TTM user-space handle | ||
1502 | * @converter: Pointer to an object describing the resource type | ||
1503 | * @p_res: On successful return the location pointed to will contain | ||
1504 | * a pointer to a refcounted struct vmw_resource. | ||
1505 | * | ||
1506 | * If the handle can't be found or is associated with an incorrect resource | ||
1507 | * type, -EINVAL will be returned. | ||
1508 | */ | ||
1509 | int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, | ||
1510 | struct ttm_object_file *tfile, | ||
1511 | uint32_t handle, | ||
1512 | const struct vmw_user_resource_conv | ||
1513 | *converter, | ||
1514 | struct vmw_resource **p_res) | ||
1509 | { | 1515 | { |
1510 | struct ttm_base_object *base; | 1516 | struct ttm_base_object *base; |
1511 | struct vmw_user_surface *user_srf; | 1517 | struct vmw_resource *res; |
1512 | 1518 | int ret = -EINVAL; | |
1513 | int ret = -EPERM; | ||
1514 | 1519 | ||
1515 | base = ttm_base_object_lookup(tfile, handle); | 1520 | base = ttm_base_object_lookup(tfile, handle); |
1516 | if (unlikely(base == NULL)) | 1521 | if (unlikely(base == NULL)) |
1517 | return -EINVAL; | 1522 | return -EINVAL; |
1518 | 1523 | ||
1519 | if (unlikely(base->object_type != VMW_RES_SURFACE)) | 1524 | if (unlikely(base->object_type != converter->object_type)) |
1520 | goto out_bad_surface; | 1525 | goto out_bad_resource; |
1521 | 1526 | ||
1522 | user_srf = container_of(base, struct vmw_user_surface, base); | 1527 | res = converter->base_obj_to_res(base); |
1523 | *id = user_srf->srf.res.id; | ||
1524 | ret = 0; | ||
1525 | 1528 | ||
1526 | out_bad_surface: | 1529 | read_lock(&dev_priv->resource_lock); |
1527 | /** | 1530 | if (!res->avail || res->res_free != converter->res_free) { |
1528 | * FIXME: May deadlock here when called from the | 1531 | read_unlock(&dev_priv->resource_lock); |
1529 | * command parsing code. | 1532 | goto out_bad_resource; |
1530 | */ | 1533 | } |
1531 | 1534 | ||
1535 | kref_get(&res->kref); | ||
1536 | read_unlock(&dev_priv->resource_lock); | ||
1537 | |||
1538 | *p_res = res; | ||
1539 | ret = 0; | ||
1540 | |||
1541 | out_bad_resource: | ||
1532 | ttm_base_object_unref(&base); | 1542 | ttm_base_object_unref(&base); |
1543 | |||
1544 | return ret; | ||
1545 | } | ||
1546 | |||
1547 | /** | ||
1548 | * Helper function that looks either a surface or dmabuf. | ||
1549 | * | ||
1550 | * The pointer this pointed at by out_surf and out_buf needs to be null. | ||
1551 | */ | ||
1552 | int vmw_user_lookup_handle(struct vmw_private *dev_priv, | ||
1553 | struct ttm_object_file *tfile, | ||
1554 | uint32_t handle, | ||
1555 | struct vmw_surface **out_surf, | ||
1556 | struct vmw_dma_buffer **out_buf) | ||
1557 | { | ||
1558 | struct vmw_resource *res; | ||
1559 | int ret; | ||
1560 | |||
1561 | BUG_ON(*out_surf || *out_buf); | ||
1562 | |||
1563 | ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle, | ||
1564 | user_surface_converter, | ||
1565 | &res); | ||
1566 | if (!ret) { | ||
1567 | *out_surf = vmw_res_to_srf(res); | ||
1568 | return 0; | ||
1569 | } | ||
1570 | |||
1571 | *out_surf = NULL; | ||
1572 | ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); | ||
1533 | return ret; | 1573 | return ret; |
1534 | } | 1574 | } |
1535 | 1575 | ||
@@ -1558,7 +1598,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
1558 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); | 1598 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); |
1559 | memset(vmw_bo, 0, sizeof(*vmw_bo)); | 1599 | memset(vmw_bo, 0, sizeof(*vmw_bo)); |
1560 | 1600 | ||
1561 | INIT_LIST_HEAD(&vmw_bo->validate_list); | 1601 | INIT_LIST_HEAD(&vmw_bo->res_list); |
1562 | 1602 | ||
1563 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, | 1603 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
1564 | ttm_bo_type_device, placement, | 1604 | ttm_bo_type_device, placement, |
@@ -1590,6 +1630,59 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) | |||
1590 | ttm_bo_unref(&bo); | 1630 | ttm_bo_unref(&bo); |
1591 | } | 1631 | } |
1592 | 1632 | ||
1633 | /** | ||
1634 | * vmw_user_dmabuf_alloc - Allocate a user dma buffer | ||
1635 | * | ||
1636 | * @dev_priv: Pointer to a struct device private. | ||
1637 | * @tfile: Pointer to a struct ttm_object_file on which to register the user | ||
1638 | * object. | ||
1639 | * @size: Size of the dma buffer. | ||
1640 | * @shareable: Boolean whether the buffer is shareable with other open files. | ||
1641 | * @handle: Pointer to where the handle value should be assigned. | ||
1642 | * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer | ||
1643 | * should be assigned. | ||
1644 | */ | ||
1645 | int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | ||
1646 | struct ttm_object_file *tfile, | ||
1647 | uint32_t size, | ||
1648 | bool shareable, | ||
1649 | uint32_t *handle, | ||
1650 | struct vmw_dma_buffer **p_dma_buf) | ||
1651 | { | ||
1652 | struct vmw_user_dma_buffer *user_bo; | ||
1653 | struct ttm_buffer_object *tmp; | ||
1654 | int ret; | ||
1655 | |||
1656 | user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); | ||
1657 | if (unlikely(user_bo == NULL)) { | ||
1658 | DRM_ERROR("Failed to allocate a buffer.\n"); | ||
1659 | return -ENOMEM; | ||
1660 | } | ||
1661 | |||
1662 | ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, | ||
1663 | &vmw_vram_sys_placement, true, | ||
1664 | &vmw_user_dmabuf_destroy); | ||
1665 | if (unlikely(ret != 0)) | ||
1666 | return ret; | ||
1667 | |||
1668 | tmp = ttm_bo_reference(&user_bo->dma.base); | ||
1669 | ret = ttm_base_object_init(tfile, | ||
1670 | &user_bo->base, | ||
1671 | shareable, | ||
1672 | ttm_buffer_type, | ||
1673 | &vmw_user_dmabuf_release, NULL); | ||
1674 | if (unlikely(ret != 0)) { | ||
1675 | ttm_bo_unref(&tmp); | ||
1676 | goto out_no_base_object; | ||
1677 | } | ||
1678 | |||
1679 | *p_dma_buf = &user_bo->dma; | ||
1680 | *handle = user_bo->base.hash.key; | ||
1681 | |||
1682 | out_no_base_object: | ||
1683 | return ret; | ||
1684 | } | ||
1685 | |||
1593 | int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | 1686 | int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, |
1594 | struct drm_file *file_priv) | 1687 | struct drm_file *file_priv) |
1595 | { | 1688 | { |
@@ -1598,44 +1691,27 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
1598 | (union drm_vmw_alloc_dmabuf_arg *)data; | 1691 | (union drm_vmw_alloc_dmabuf_arg *)data; |
1599 | struct drm_vmw_alloc_dmabuf_req *req = &arg->req; | 1692 | struct drm_vmw_alloc_dmabuf_req *req = &arg->req; |
1600 | struct drm_vmw_dmabuf_rep *rep = &arg->rep; | 1693 | struct drm_vmw_dmabuf_rep *rep = &arg->rep; |
1601 | struct vmw_user_dma_buffer *vmw_user_bo; | 1694 | struct vmw_dma_buffer *dma_buf; |
1602 | struct ttm_buffer_object *tmp; | 1695 | uint32_t handle; |
1603 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 1696 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
1604 | int ret; | 1697 | int ret; |
1605 | 1698 | ||
1606 | vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL); | ||
1607 | if (unlikely(vmw_user_bo == NULL)) | ||
1608 | return -ENOMEM; | ||
1609 | |||
1610 | ret = ttm_read_lock(&vmaster->lock, true); | 1699 | ret = ttm_read_lock(&vmaster->lock, true); |
1611 | if (unlikely(ret != 0)) { | 1700 | if (unlikely(ret != 0)) |
1612 | kfree(vmw_user_bo); | ||
1613 | return ret; | 1701 | return ret; |
1614 | } | ||
1615 | 1702 | ||
1616 | ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, | 1703 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
1617 | &vmw_vram_sys_placement, true, | 1704 | req->size, false, &handle, &dma_buf); |
1618 | &vmw_user_dmabuf_destroy); | ||
1619 | if (unlikely(ret != 0)) | 1705 | if (unlikely(ret != 0)) |
1620 | goto out_no_dmabuf; | 1706 | goto out_no_dmabuf; |
1621 | 1707 | ||
1622 | tmp = ttm_bo_reference(&vmw_user_bo->dma.base); | 1708 | rep->handle = handle; |
1623 | ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, | 1709 | rep->map_handle = dma_buf->base.addr_space_offset; |
1624 | &vmw_user_bo->base, | 1710 | rep->cur_gmr_id = handle; |
1625 | false, | 1711 | rep->cur_gmr_offset = 0; |
1626 | ttm_buffer_type, | 1712 | |
1627 | &vmw_user_dmabuf_release, NULL); | 1713 | vmw_dmabuf_unreference(&dma_buf); |
1628 | if (unlikely(ret != 0)) | ||
1629 | goto out_no_base_object; | ||
1630 | else { | ||
1631 | rep->handle = vmw_user_bo->base.hash.key; | ||
1632 | rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; | ||
1633 | rep->cur_gmr_id = vmw_user_bo->base.hash.key; | ||
1634 | rep->cur_gmr_offset = 0; | ||
1635 | } | ||
1636 | 1714 | ||
1637 | out_no_base_object: | ||
1638 | ttm_bo_unref(&tmp); | ||
1639 | out_no_dmabuf: | 1715 | out_no_dmabuf: |
1640 | ttm_read_unlock(&vmaster->lock); | 1716 | ttm_read_unlock(&vmaster->lock); |
1641 | 1717 | ||
@@ -1653,27 +1729,6 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, | |||
1653 | TTM_REF_USAGE); | 1729 | TTM_REF_USAGE); |
1654 | } | 1730 | } |
1655 | 1731 | ||
1656 | uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, | ||
1657 | uint32_t cur_validate_node) | ||
1658 | { | ||
1659 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
1660 | |||
1661 | if (likely(vmw_bo->on_validate_list)) | ||
1662 | return vmw_bo->cur_validate_node; | ||
1663 | |||
1664 | vmw_bo->cur_validate_node = cur_validate_node; | ||
1665 | vmw_bo->on_validate_list = true; | ||
1666 | |||
1667 | return cur_validate_node; | ||
1668 | } | ||
1669 | |||
1670 | void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo) | ||
1671 | { | ||
1672 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
1673 | |||
1674 | vmw_bo->on_validate_list = false; | ||
1675 | } | ||
1676 | |||
1677 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | 1732 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
1678 | uint32_t handle, struct vmw_dma_buffer **out) | 1733 | uint32_t handle, struct vmw_dma_buffer **out) |
1679 | { | 1734 | { |
@@ -1702,6 +1757,18 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | |||
1702 | return 0; | 1757 | return 0; |
1703 | } | 1758 | } |
1704 | 1759 | ||
1760 | int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | ||
1761 | struct vmw_dma_buffer *dma_buf) | ||
1762 | { | ||
1763 | struct vmw_user_dma_buffer *user_bo; | ||
1764 | |||
1765 | if (dma_buf->base.destroy != vmw_user_dmabuf_destroy) | ||
1766 | return -EINVAL; | ||
1767 | |||
1768 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); | ||
1769 | return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); | ||
1770 | } | ||
1771 | |||
1705 | /* | 1772 | /* |
1706 | * Stream management | 1773 | * Stream management |
1707 | */ | 1774 | */ |
@@ -1726,8 +1793,8 @@ static int vmw_stream_init(struct vmw_private *dev_priv, | |||
1726 | struct vmw_resource *res = &stream->res; | 1793 | struct vmw_resource *res = &stream->res; |
1727 | int ret; | 1794 | int ret; |
1728 | 1795 | ||
1729 | ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr, | 1796 | ret = vmw_resource_init(dev_priv, res, false, res_free, |
1730 | VMW_RES_STREAM, false, res_free, NULL); | 1797 | &vmw_stream_func); |
1731 | 1798 | ||
1732 | if (unlikely(ret != 0)) { | 1799 | if (unlikely(ret != 0)) { |
1733 | if (res_free == NULL) | 1800 | if (res_free == NULL) |
@@ -1749,10 +1816,6 @@ static int vmw_stream_init(struct vmw_private *dev_priv, | |||
1749 | return 0; | 1816 | return 0; |
1750 | } | 1817 | } |
1751 | 1818 | ||
1752 | /** | ||
1753 | * User-space context management: | ||
1754 | */ | ||
1755 | |||
1756 | static void vmw_user_stream_free(struct vmw_resource *res) | 1819 | static void vmw_user_stream_free(struct vmw_resource *res) |
1757 | { | 1820 | { |
1758 | struct vmw_user_stream *stream = | 1821 | struct vmw_user_stream *stream = |
@@ -1788,9 +1851,11 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, | |||
1788 | struct vmw_user_stream *stream; | 1851 | struct vmw_user_stream *stream; |
1789 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; | 1852 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; |
1790 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 1853 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
1854 | struct idr *idr = &dev_priv->res_idr[vmw_res_stream]; | ||
1791 | int ret = 0; | 1855 | int ret = 0; |
1792 | 1856 | ||
1793 | res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id); | 1857 | |
1858 | res = vmw_resource_lookup(dev_priv, idr, arg->stream_id); | ||
1794 | if (unlikely(res == NULL)) | 1859 | if (unlikely(res == NULL)) |
1795 | return -EINVAL; | 1860 | return -EINVAL; |
1796 | 1861 | ||
@@ -1891,7 +1956,8 @@ int vmw_user_stream_lookup(struct vmw_private *dev_priv, | |||
1891 | struct vmw_resource *res; | 1956 | struct vmw_resource *res; |
1892 | int ret; | 1957 | int ret; |
1893 | 1958 | ||
1894 | res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id); | 1959 | res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream], |
1960 | *inout_id); | ||
1895 | if (unlikely(res == NULL)) | 1961 | if (unlikely(res == NULL)) |
1896 | return -EINVAL; | 1962 | return -EINVAL; |
1897 | 1963 | ||
@@ -1986,3 +2052,453 @@ int vmw_dumb_destroy(struct drm_file *file_priv, | |||
1986 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | 2052 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, |
1987 | handle, TTM_REF_USAGE); | 2053 | handle, TTM_REF_USAGE); |
1988 | } | 2054 | } |
2055 | |||
2056 | /** | ||
2057 | * vmw_resource_buf_alloc - Allocate a backup buffer for a resource. | ||
2058 | * | ||
2059 | * @res: The resource for which to allocate a backup buffer. | ||
2060 | * @interruptible: Whether any sleeps during allocation should be | ||
2061 | * performed while interruptible. | ||
2062 | */ | ||
2063 | static int vmw_resource_buf_alloc(struct vmw_resource *res, | ||
2064 | bool interruptible) | ||
2065 | { | ||
2066 | unsigned long size = | ||
2067 | (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK; | ||
2068 | struct vmw_dma_buffer *backup; | ||
2069 | int ret; | ||
2070 | |||
2071 | if (likely(res->backup)) { | ||
2072 | BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size); | ||
2073 | return 0; | ||
2074 | } | ||
2075 | |||
2076 | backup = kzalloc(sizeof(*backup), GFP_KERNEL); | ||
2077 | if (unlikely(backup == NULL)) | ||
2078 | return -ENOMEM; | ||
2079 | |||
2080 | ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, | ||
2081 | res->func->backup_placement, | ||
2082 | interruptible, | ||
2083 | &vmw_dmabuf_bo_free); | ||
2084 | if (unlikely(ret != 0)) | ||
2085 | goto out_no_dmabuf; | ||
2086 | |||
2087 | res->backup = backup; | ||
2088 | |||
2089 | out_no_dmabuf: | ||
2090 | return ret; | ||
2091 | } | ||
2092 | |||
2093 | /** | ||
2094 | * vmw_resource_do_validate - Make a resource up-to-date and visible | ||
2095 | * to the device. | ||
2096 | * | ||
2097 | * @res: The resource to make visible to the device. | ||
2098 | * @val_buf: Information about a buffer possibly | ||
2099 | * containing backup data if a bind operation is needed. | ||
2100 | * | ||
2101 | * On hardware resource shortage, this function returns -EBUSY and | ||
2102 | * should be retried once resources have been freed up. | ||
2103 | */ | ||
2104 | static int vmw_resource_do_validate(struct vmw_resource *res, | ||
2105 | struct ttm_validate_buffer *val_buf) | ||
2106 | { | ||
2107 | int ret = 0; | ||
2108 | const struct vmw_res_func *func = res->func; | ||
2109 | |||
2110 | if (unlikely(res->id == -1)) { | ||
2111 | ret = func->create(res); | ||
2112 | if (unlikely(ret != 0)) | ||
2113 | return ret; | ||
2114 | } | ||
2115 | |||
2116 | if (func->bind && | ||
2117 | ((func->needs_backup && list_empty(&res->mob_head) && | ||
2118 | val_buf->bo != NULL) || | ||
2119 | (!func->needs_backup && val_buf->bo != NULL))) { | ||
2120 | ret = func->bind(res, val_buf); | ||
2121 | if (unlikely(ret != 0)) | ||
2122 | goto out_bind_failed; | ||
2123 | if (func->needs_backup) | ||
2124 | list_add_tail(&res->mob_head, &res->backup->res_list); | ||
2125 | } | ||
2126 | |||
2127 | /* | ||
2128 | * Only do this on write operations, and move to | ||
2129 | * vmw_resource_unreserve if it can be called after | ||
2130 | * backup buffers have been unreserved. Otherwise | ||
2131 | * sort out locking. | ||
2132 | */ | ||
2133 | res->res_dirty = true; | ||
2134 | |||
2135 | return 0; | ||
2136 | |||
2137 | out_bind_failed: | ||
2138 | func->destroy(res); | ||
2139 | |||
2140 | return ret; | ||
2141 | } | ||
2142 | |||
2143 | /** | ||
2144 | * vmw_resource_unreserve - Unreserve a resource previously reserved for | ||
2145 | * command submission. | ||
2146 | * | ||
2147 | * @res: Pointer to the struct vmw_resource to unreserve. | ||
2148 | * @new_backup: Pointer to new backup buffer if command submission | ||
2149 | * switched. | ||
2150 | * @new_backup_offset: New backup offset if @new_backup is !NULL. | ||
2151 | * | ||
2152 | * Currently unreserving a resource means putting it back on the device's | ||
2153 | * resource lru list, so that it can be evicted if necessary. | ||
2154 | */ | ||
2155 | void vmw_resource_unreserve(struct vmw_resource *res, | ||
2156 | struct vmw_dma_buffer *new_backup, | ||
2157 | unsigned long new_backup_offset) | ||
2158 | { | ||
2159 | struct vmw_private *dev_priv = res->dev_priv; | ||
2160 | |||
2161 | if (!list_empty(&res->lru_head)) | ||
2162 | return; | ||
2163 | |||
2164 | if (new_backup && new_backup != res->backup) { | ||
2165 | |||
2166 | if (res->backup) { | ||
2167 | BUG_ON(atomic_read(&res->backup->base.reserved) == 0); | ||
2168 | list_del_init(&res->mob_head); | ||
2169 | vmw_dmabuf_unreference(&res->backup); | ||
2170 | } | ||
2171 | |||
2172 | res->backup = vmw_dmabuf_reference(new_backup); | ||
2173 | BUG_ON(atomic_read(&new_backup->base.reserved) == 0); | ||
2174 | list_add_tail(&res->mob_head, &new_backup->res_list); | ||
2175 | } | ||
2176 | if (new_backup) | ||
2177 | res->backup_offset = new_backup_offset; | ||
2178 | |||
2179 | if (!res->func->may_evict) | ||
2180 | return; | ||
2181 | |||
2182 | write_lock(&dev_priv->resource_lock); | ||
2183 | list_add_tail(&res->lru_head, | ||
2184 | &res->dev_priv->res_lru[res->func->res_type]); | ||
2185 | write_unlock(&dev_priv->resource_lock); | ||
2186 | } | ||
2187 | |||
2188 | /** | ||
2189 | * vmw_resource_check_buffer - Check whether a backup buffer is needed | ||
2190 | * for a resource and in that case, allocate | ||
2191 | * one, reserve and validate it. | ||
2192 | * | ||
2193 | * @res: The resource for which to allocate a backup buffer. | ||
2194 | * @interruptible: Whether any sleeps during allocation should be | ||
2195 | * performed while interruptible. | ||
2196 | * @val_buf: On successful return contains data about the | ||
2197 | * reserved and validated backup buffer. | ||
2198 | */ | ||
2199 | int vmw_resource_check_buffer(struct vmw_resource *res, | ||
2200 | bool interruptible, | ||
2201 | struct ttm_validate_buffer *val_buf) | ||
2202 | { | ||
2203 | struct list_head val_list; | ||
2204 | bool backup_dirty = false; | ||
2205 | int ret; | ||
2206 | |||
2207 | if (unlikely(res->backup == NULL)) { | ||
2208 | ret = vmw_resource_buf_alloc(res, interruptible); | ||
2209 | if (unlikely(ret != 0)) | ||
2210 | return ret; | ||
2211 | } | ||
2212 | |||
2213 | INIT_LIST_HEAD(&val_list); | ||
2214 | val_buf->bo = ttm_bo_reference(&res->backup->base); | ||
2215 | list_add_tail(&val_buf->head, &val_list); | ||
2216 | ret = ttm_eu_reserve_buffers(&val_list); | ||
2217 | if (unlikely(ret != 0)) | ||
2218 | goto out_no_reserve; | ||
2219 | |||
2220 | if (res->func->needs_backup && list_empty(&res->mob_head)) | ||
2221 | return 0; | ||
2222 | |||
2223 | backup_dirty = res->backup_dirty; | ||
2224 | ret = ttm_bo_validate(&res->backup->base, | ||
2225 | res->func->backup_placement, | ||
2226 | true, false, false); | ||
2227 | |||
2228 | if (unlikely(ret != 0)) | ||
2229 | goto out_no_validate; | ||
2230 | |||
2231 | return 0; | ||
2232 | |||
2233 | out_no_validate: | ||
2234 | ttm_eu_backoff_reservation(&val_list); | ||
2235 | out_no_reserve: | ||
2236 | ttm_bo_unref(&val_buf->bo); | ||
2237 | if (backup_dirty) | ||
2238 | vmw_dmabuf_unreference(&res->backup); | ||
2239 | |||
2240 | return ret; | ||
2241 | } | ||
2242 | |||
2243 | /** | ||
2244 | * vmw_resource_reserve - Reserve a resource for command submission | ||
2245 | * | ||
2246 | * @res: The resource to reserve. | ||
2247 | * | ||
2248 | * This function takes the resource off the LRU list and make sure | ||
2249 | * a backup buffer is present for guest-backed resources. However, | ||
2250 | * the buffer may not be bound to the resource at this point. | ||
2251 | * | ||
2252 | */ | ||
2253 | int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) | ||
2254 | { | ||
2255 | struct vmw_private *dev_priv = res->dev_priv; | ||
2256 | int ret; | ||
2257 | |||
2258 | write_lock(&dev_priv->resource_lock); | ||
2259 | list_del_init(&res->lru_head); | ||
2260 | write_unlock(&dev_priv->resource_lock); | ||
2261 | |||
2262 | if (res->func->needs_backup && res->backup == NULL && | ||
2263 | !no_backup) { | ||
2264 | ret = vmw_resource_buf_alloc(res, true); | ||
2265 | if (unlikely(ret != 0)) | ||
2266 | return ret; | ||
2267 | } | ||
2268 | |||
2269 | return 0; | ||
2270 | } | ||
2271 | |||
2272 | /** | ||
2273 | * vmw_resource_backoff_reservation - Unreserve and unreference a | ||
2274 | * backup buffer | ||
2275 | *. | ||
2276 | * @val_buf: Backup buffer information. | ||
2277 | */ | ||
2278 | void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) | ||
2279 | { | ||
2280 | struct list_head val_list; | ||
2281 | |||
2282 | if (likely(val_buf->bo == NULL)) | ||
2283 | return; | ||
2284 | |||
2285 | INIT_LIST_HEAD(&val_list); | ||
2286 | list_add_tail(&val_buf->head, &val_list); | ||
2287 | ttm_eu_backoff_reservation(&val_list); | ||
2288 | ttm_bo_unref(&val_buf->bo); | ||
2289 | } | ||
2290 | |||
2291 | /** | ||
2292 | * vmw_resource_do_evict - Evict a resource, and transfer its data | ||
2293 | * to a backup buffer. | ||
2294 | * | ||
2295 | * @res: The resource to evict. | ||
2296 | */ | ||
2297 | int vmw_resource_do_evict(struct vmw_resource *res) | ||
2298 | { | ||
2299 | struct ttm_validate_buffer val_buf; | ||
2300 | const struct vmw_res_func *func = res->func; | ||
2301 | int ret; | ||
2302 | |||
2303 | BUG_ON(!func->may_evict); | ||
2304 | |||
2305 | val_buf.bo = NULL; | ||
2306 | ret = vmw_resource_check_buffer(res, true, &val_buf); | ||
2307 | if (unlikely(ret != 0)) | ||
2308 | return ret; | ||
2309 | |||
2310 | if (unlikely(func->unbind != NULL && | ||
2311 | (!func->needs_backup || !list_empty(&res->mob_head)))) { | ||
2312 | ret = func->unbind(res, res->res_dirty, &val_buf); | ||
2313 | if (unlikely(ret != 0)) | ||
2314 | goto out_no_unbind; | ||
2315 | list_del_init(&res->mob_head); | ||
2316 | } | ||
2317 | ret = func->destroy(res); | ||
2318 | res->backup_dirty = true; | ||
2319 | res->res_dirty = false; | ||
2320 | out_no_unbind: | ||
2321 | vmw_resource_backoff_reservation(&val_buf); | ||
2322 | |||
2323 | return ret; | ||
2324 | } | ||
2325 | |||
2326 | |||
2327 | /** | ||
2328 | * vmw_resource_validate - Make a resource up-to-date and visible | ||
2329 | * to the device. | ||
2330 | * | ||
2331 | * @res: The resource to make visible to the device. | ||
2332 | * | ||
2333 | * On succesful return, any backup DMA buffer pointed to by @res->backup will | ||
2334 | * be reserved and validated. | ||
2335 | * On hardware resource shortage, this function will repeatedly evict | ||
2336 | * resources of the same type until the validation succeeds. | ||
2337 | */ | ||
2338 | int vmw_resource_validate(struct vmw_resource *res) | ||
2339 | { | ||
2340 | int ret; | ||
2341 | struct vmw_resource *evict_res; | ||
2342 | struct vmw_private *dev_priv = res->dev_priv; | ||
2343 | struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; | ||
2344 | struct ttm_validate_buffer val_buf; | ||
2345 | |||
2346 | if (likely(!res->func->may_evict)) | ||
2347 | return 0; | ||
2348 | |||
2349 | val_buf.bo = NULL; | ||
2350 | if (res->backup) | ||
2351 | val_buf.bo = &res->backup->base; | ||
2352 | do { | ||
2353 | ret = vmw_resource_do_validate(res, &val_buf); | ||
2354 | if (likely(ret != -EBUSY)) | ||
2355 | break; | ||
2356 | |||
2357 | write_lock(&dev_priv->resource_lock); | ||
2358 | if (list_empty(lru_list) || !res->func->may_evict) { | ||
2359 | DRM_ERROR("Out of device device id entries " | ||
2360 | "for %s.\n", res->func->type_name); | ||
2361 | ret = -EBUSY; | ||
2362 | write_unlock(&dev_priv->resource_lock); | ||
2363 | break; | ||
2364 | } | ||
2365 | |||
2366 | evict_res = vmw_resource_reference | ||
2367 | (list_first_entry(lru_list, struct vmw_resource, | ||
2368 | lru_head)); | ||
2369 | list_del_init(&evict_res->lru_head); | ||
2370 | |||
2371 | write_unlock(&dev_priv->resource_lock); | ||
2372 | vmw_resource_do_evict(evict_res); | ||
2373 | vmw_resource_unreference(&evict_res); | ||
2374 | } while (1); | ||
2375 | |||
2376 | if (unlikely(ret != 0)) | ||
2377 | goto out_no_validate; | ||
2378 | else if (!res->func->needs_backup && res->backup) { | ||
2379 | list_del_init(&res->mob_head); | ||
2380 | vmw_dmabuf_unreference(&res->backup); | ||
2381 | } | ||
2382 | |||
2383 | return 0; | ||
2384 | |||
2385 | out_no_validate: | ||
2386 | return ret; | ||
2387 | } | ||
2388 | |||
2389 | /** | ||
2390 | * vmw_fence_single_bo - Utility function to fence a single TTM buffer | ||
2391 | * object without unreserving it. | ||
2392 | * | ||
2393 | * @bo: Pointer to the struct ttm_buffer_object to fence. | ||
2394 | * @fence: Pointer to the fence. If NULL, this function will | ||
2395 | * insert a fence into the command stream.. | ||
2396 | * | ||
2397 | * Contrary to the ttm_eu version of this function, it takes only | ||
2398 | * a single buffer object instead of a list, and it also doesn't | ||
2399 | * unreserve the buffer object, which needs to be done separately. | ||
2400 | */ | ||
2401 | void vmw_fence_single_bo(struct ttm_buffer_object *bo, | ||
2402 | struct vmw_fence_obj *fence) | ||
2403 | { | ||
2404 | struct ttm_bo_device *bdev = bo->bdev; | ||
2405 | struct ttm_bo_driver *driver = bdev->driver; | ||
2406 | struct vmw_fence_obj *old_fence_obj; | ||
2407 | struct vmw_private *dev_priv = | ||
2408 | container_of(bdev, struct vmw_private, bdev); | ||
2409 | |||
2410 | if (fence == NULL) | ||
2411 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | ||
2412 | else | ||
2413 | driver->sync_obj_ref(fence); | ||
2414 | |||
2415 | spin_lock(&bdev->fence_lock); | ||
2416 | |||
2417 | old_fence_obj = bo->sync_obj; | ||
2418 | bo->sync_obj = fence; | ||
2419 | |||
2420 | spin_unlock(&bdev->fence_lock); | ||
2421 | |||
2422 | if (old_fence_obj) | ||
2423 | vmw_fence_obj_unreference(&old_fence_obj); | ||
2424 | } | ||
2425 | |||
2426 | /** | ||
2427 | * vmw_resource_move_notify - TTM move_notify_callback | ||
2428 | * | ||
2429 | * @bo: The TTM buffer object about to move. | ||
2430 | * @mem: The truct ttm_mem_reg indicating to what memory | ||
2431 | * region the move is taking place. | ||
2432 | * | ||
2433 | * For now does nothing. | ||
2434 | */ | ||
2435 | void vmw_resource_move_notify(struct ttm_buffer_object *bo, | ||
2436 | struct ttm_mem_reg *mem) | ||
2437 | { | ||
2438 | } | ||
2439 | |||
2440 | /** | ||
2441 | * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. | ||
2442 | * | ||
2443 | * @res: The resource being queried. | ||
2444 | */ | ||
2445 | bool vmw_resource_needs_backup(const struct vmw_resource *res) | ||
2446 | { | ||
2447 | return res->func->needs_backup; | ||
2448 | } | ||
2449 | |||
2450 | /** | ||
2451 | * vmw_resource_evict_type - Evict all resources of a specific type | ||
2452 | * | ||
2453 | * @dev_priv: Pointer to a device private struct | ||
2454 | * @type: The resource type to evict | ||
2455 | * | ||
2456 | * To avoid thrashing starvation or as part of the hibernation sequence, | ||
2457 | * evict all evictable resources of a specific type. | ||
2458 | */ | ||
2459 | static void vmw_resource_evict_type(struct vmw_private *dev_priv, | ||
2460 | enum vmw_res_type type) | ||
2461 | { | ||
2462 | struct list_head *lru_list = &dev_priv->res_lru[type]; | ||
2463 | struct vmw_resource *evict_res; | ||
2464 | |||
2465 | do { | ||
2466 | write_lock(&dev_priv->resource_lock); | ||
2467 | |||
2468 | if (list_empty(lru_list)) | ||
2469 | goto out_unlock; | ||
2470 | |||
2471 | evict_res = vmw_resource_reference( | ||
2472 | list_first_entry(lru_list, struct vmw_resource, | ||
2473 | lru_head)); | ||
2474 | list_del_init(&evict_res->lru_head); | ||
2475 | write_unlock(&dev_priv->resource_lock); | ||
2476 | vmw_resource_do_evict(evict_res); | ||
2477 | vmw_resource_unreference(&evict_res); | ||
2478 | } while (1); | ||
2479 | |||
2480 | out_unlock: | ||
2481 | write_unlock(&dev_priv->resource_lock); | ||
2482 | } | ||
2483 | |||
2484 | /** | ||
2485 | * vmw_resource_evict_all - Evict all evictable resources | ||
2486 | * | ||
2487 | * @dev_priv: Pointer to a device private struct | ||
2488 | * | ||
2489 | * To avoid thrashing starvation or as part of the hibernation sequence, | ||
2490 | * evict all evictable resources. In particular this means that all | ||
2491 | * guest-backed resources that are registered with the device are | ||
2492 | * evicted and the OTable becomes clean. | ||
2493 | */ | ||
2494 | void vmw_resource_evict_all(struct vmw_private *dev_priv) | ||
2495 | { | ||
2496 | enum vmw_res_type type; | ||
2497 | |||
2498 | mutex_lock(&dev_priv->cmdbuf_mutex); | ||
2499 | |||
2500 | for (type = 0; type < vmw_res_max; ++type) | ||
2501 | vmw_resource_evict_type(dev_priv, type); | ||
2502 | |||
2503 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
2504 | } | ||