aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c836
5 files changed, 835 insertions, 82 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 98a5d7e90546..5a72ed908232 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -60,6 +60,11 @@ static uint32_t vram_gmr_placement_flags[] = {
60 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED 60 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
61}; 61};
62 62
63static uint32_t gmr_vram_placement_flags[] = {
64 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
65 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
66};
67
63struct ttm_placement vmw_vram_gmr_placement = { 68struct ttm_placement vmw_vram_gmr_placement = {
64 .fpfn = 0, 69 .fpfn = 0,
65 .lpfn = 0, 70 .lpfn = 0,
@@ -125,6 +130,15 @@ struct ttm_placement vmw_evictable_placement = {
125 .busy_placement = &sys_placement_flags 130 .busy_placement = &sys_placement_flags
126}; 131};
127 132
133struct ttm_placement vmw_srf_placement = {
134 .fpfn = 0,
135 .lpfn = 0,
136 .num_placement = 1,
137 .num_busy_placement = 2,
138 .placement = &gmr_placement_flags,
139 .busy_placement = gmr_vram_placement_flags
140};
141
128struct vmw_ttm_backend { 142struct vmw_ttm_backend {
129 struct ttm_backend backend; 143 struct ttm_backend backend;
130 struct page **pages; 144 struct page **pages;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 7b88104144ca..a98ee19bd682 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -402,6 +402,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
402 init_waitqueue_head(&dev_priv->fifo_queue); 402 init_waitqueue_head(&dev_priv->fifo_queue);
403 dev_priv->fence_queue_waiters = 0; 403 dev_priv->fence_queue_waiters = 0;
404 atomic_set(&dev_priv->fifo_queue_waiters, 0); 404 atomic_set(&dev_priv->fifo_queue_waiters, 0);
405 INIT_LIST_HEAD(&dev_priv->surface_lru);
406 dev_priv->used_memory_size = 0;
405 407
406 dev_priv->io_start = pci_resource_start(dev->pdev, 0); 408 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
407 dev_priv->vram_start = pci_resource_start(dev->pdev, 1); 409 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
@@ -422,6 +424,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
422 424
423 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); 425 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
424 426
427 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
428 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
429 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
430 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
425 if (dev_priv->capabilities & SVGA_CAP_GMR) { 431 if (dev_priv->capabilities & SVGA_CAP_GMR) {
426 dev_priv->max_gmr_descriptors = 432 dev_priv->max_gmr_descriptors =
427 vmw_read(dev_priv, 433 vmw_read(dev_priv,
@@ -434,13 +440,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
434 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); 440 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
435 dev_priv->memory_size = 441 dev_priv->memory_size =
436 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); 442 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
443 dev_priv->memory_size -= dev_priv->vram_size;
444 } else {
445 /*
446 * An arbitrary limit of 512MiB on surface
447 * memory. But all HWV8 hardware supports GMR2.
448 */
449 dev_priv->memory_size = 512*1024*1024;
437 } 450 }
438 451
439 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
440 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
441 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
442 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
443
444 mutex_unlock(&dev_priv->hw_mutex); 452 mutex_unlock(&dev_priv->hw_mutex);
445 453
446 vmw_print_capabilities(dev_priv->capabilities); 454 vmw_print_capabilities(dev_priv->capabilities);
@@ -454,8 +462,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
454 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 462 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
455 DRM_INFO("Max number of GMR pages is %u\n", 463 DRM_INFO("Max number of GMR pages is %u\n",
456 (unsigned)dev_priv->max_gmr_pages); 464 (unsigned)dev_priv->max_gmr_pages);
457 DRM_INFO("Max dedicated hypervisor graphics memory is %u\n", 465 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
458 (unsigned)dev_priv->memory_size); 466 (unsigned)dev_priv->memory_size / 1024);
459 } 467 }
460 DRM_INFO("VRAM at 0x%08x size is %u kiB\n", 468 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
461 dev_priv->vram_start, dev_priv->vram_size / 1024); 469 dev_priv->vram_start, dev_priv->vram_size / 1024);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 75e6d10281a8..ee564f0a4fb0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -79,6 +79,7 @@ struct vmw_resource {
79 int id; 79 int id;
80 enum ttm_object_type res_type; 80 enum ttm_object_type res_type;
81 bool avail; 81 bool avail;
82 void (*remove_from_lists) (struct vmw_resource *res);
82 void (*hw_destroy) (struct vmw_resource *res); 83 void (*hw_destroy) (struct vmw_resource *res);
83 void (*res_free) (struct vmw_resource *res); 84 void (*res_free) (struct vmw_resource *res);
84 struct list_head validate_head; 85 struct list_head validate_head;
@@ -99,9 +100,11 @@ struct vmw_cursor_snooper {
99}; 100};
100 101
101struct vmw_framebuffer; 102struct vmw_framebuffer;
103struct vmw_surface_offset;
102 104
103struct vmw_surface { 105struct vmw_surface {
104 struct vmw_resource res; 106 struct vmw_resource res;
107 struct list_head lru_head; /* Protected by the resource lock */
105 uint32_t flags; 108 uint32_t flags;
106 uint32_t format; 109 uint32_t format;
107 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 110 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
@@ -112,6 +115,9 @@ struct vmw_surface {
112 115
113 /* TODO so far just a extra pointer */ 116 /* TODO so far just a extra pointer */
114 struct vmw_cursor_snooper snooper; 117 struct vmw_cursor_snooper snooper;
118 struct ttm_buffer_object *backup;
119 struct vmw_surface_offset *offsets;
120 uint32_t backup_size;
115}; 121};
116 122
117struct vmw_marker_queue { 123struct vmw_marker_queue {
@@ -310,6 +316,16 @@ struct vmw_private {
310 struct ttm_buffer_object *pinned_bo; 316 struct ttm_buffer_object *pinned_bo;
311 uint32_t query_cid; 317 uint32_t query_cid;
312 bool dummy_query_bo_pinned; 318 bool dummy_query_bo_pinned;
319
320 /*
321 * Surface swapping. The "surface_lru" list is protected by the
322 * resource lock in order to be able to destroy a surface and take
323 * it off the lru atomically. "used_memory_size" is currently
324 * protected by the cmdbuf mutex for simplicity.
325 */
326
327 struct list_head surface_lru;
328 uint32_t used_memory_size;
313}; 329};
314 330
315static inline struct vmw_private *vmw_priv(struct drm_device *dev) 331static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -389,6 +405,8 @@ extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
389extern int vmw_surface_check(struct vmw_private *dev_priv, 405extern int vmw_surface_check(struct vmw_private *dev_priv,
390 struct ttm_object_file *tfile, 406 struct ttm_object_file *tfile,
391 uint32_t handle, int *id); 407 uint32_t handle, int *id);
408extern int vmw_surface_validate(struct vmw_private *dev_priv,
409 struct vmw_surface *srf);
392extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); 410extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
393extern int vmw_dmabuf_init(struct vmw_private *dev_priv, 411extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
394 struct vmw_dma_buffer *vmw_bo, 412 struct vmw_dma_buffer *vmw_bo,
@@ -412,6 +430,7 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
412 struct ttm_object_file *tfile, 430 struct ttm_object_file *tfile,
413 uint32_t *inout_id, 431 uint32_t *inout_id,
414 struct vmw_resource **out); 432 struct vmw_resource **out);
433extern void vmw_resource_unreserve(struct list_head *list);
415 434
416/** 435/**
417 * DMA buffer helper routines - vmwgfx_dmabuf.c 436 * DMA buffer helper routines - vmwgfx_dmabuf.c
@@ -486,6 +505,7 @@ extern struct ttm_placement vmw_vram_gmr_placement;
486extern struct ttm_placement vmw_vram_gmr_ne_placement; 505extern struct ttm_placement vmw_vram_gmr_ne_placement;
487extern struct ttm_placement vmw_sys_placement; 506extern struct ttm_placement vmw_sys_placement;
488extern struct ttm_placement vmw_evictable_placement; 507extern struct ttm_placement vmw_evictable_placement;
508extern struct ttm_placement vmw_srf_placement;
489extern struct ttm_bo_driver vmw_bo_driver; 509extern struct ttm_bo_driver vmw_bo_driver;
490extern int vmw_dma_quiescent(struct drm_device *dev); 510extern int vmw_dma_quiescent(struct drm_device *dev);
491 511
@@ -508,6 +528,12 @@ extern void
508vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, 528vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
509 bool only_on_cid_match, uint32_t cid); 529 bool only_on_cid_match, uint32_t cid);
510 530
531extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
532 struct vmw_private *dev_priv,
533 struct vmw_fence_obj **p_fence,
534 uint32_t *p_handle);
535
536
511/** 537/**
512 * IRQs and wating - vmwgfx_irq.c 538 * IRQs and wating - vmwgfx_irq.c
513 */ 539 */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dfd7fca6b3f7..8a22f9d4a610 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -164,6 +164,14 @@ static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
164 return ret; 164 return ret;
165 } 165 }
166 166
167 ret = vmw_surface_validate(dev_priv, srf);
168 if (unlikely(ret != 0)) {
169 if (ret != -ERESTARTSYS)
170 DRM_ERROR("Could not validate surface.\n");
171 vmw_surface_unreference(&srf);
172 return ret;
173 }
174
167 sw_context->last_sid = *sid; 175 sw_context->last_sid = *sid;
168 sw_context->sid_valid = true; 176 sw_context->sid_valid = true;
169 sw_context->sid_translation = srf->res.id; 177 sw_context->sid_translation = srf->res.id;
@@ -257,6 +265,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
257 SVGA3dCmdPresent body; 265 SVGA3dCmdPresent body;
258 } *cmd; 266 } *cmd;
259 267
268
260 cmd = container_of(header, struct vmw_sid_cmd, header); 269 cmd = container_of(header, struct vmw_sid_cmd, header);
261 270
262 if (unlikely(!sw_context->kernel)) { 271 if (unlikely(!sw_context->kernel)) {
@@ -566,6 +575,13 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
566 goto out_no_reloc; 575 goto out_no_reloc;
567 } 576 }
568 577
578 ret = vmw_surface_validate(dev_priv, srf);
579 if (unlikely(ret != 0)) {
580 if (ret != -ERESTARTSYS)
581 DRM_ERROR("Culd not validate surface.\n");
582 goto out_no_validate;
583 }
584
569 /* 585 /*
570 * Patch command stream with device SID. 586 * Patch command stream with device SID.
571 */ 587 */
@@ -579,6 +595,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
579 595
580 return 0; 596 return 0;
581 597
598out_no_validate:
599 vmw_surface_unreference(&srf);
582out_no_reloc: 600out_no_reloc:
583 vmw_dmabuf_unreference(&vmw_bo); 601 vmw_dmabuf_unreference(&vmw_bo);
584 return ret; 602 return ret;
@@ -882,6 +900,7 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
882 /* 900 /*
883 * Drop references to resources held during command submission. 901 * Drop references to resources held during command submission.
884 */ 902 */
903 vmw_resource_unreserve(&sw_context->resource_list);
885 list_for_each_entry_safe(res, res_next, &sw_context->resource_list, 904 list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
886 validate_head) { 905 validate_head) {
887 list_del_init(&res->validate_head); 906 list_del_init(&res->validate_head);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index e0a41818d9d0..93a68a61419d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -61,6 +61,12 @@ struct vmw_user_stream {
61 struct vmw_stream stream; 61 struct vmw_stream stream;
62}; 62};
63 63
64struct vmw_surface_offset {
65 uint32_t face;
66 uint32_t mip;
67 uint32_t bo_offset;
68};
69
64static inline struct vmw_dma_buffer * 70static inline struct vmw_dma_buffer *
65vmw_dma_buffer(struct ttm_buffer_object *bo) 71vmw_dma_buffer(struct ttm_buffer_object *bo)
66{ 72{
@@ -80,13 +86,36 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
80 return res; 86 return res;
81} 87}
82 88
89
90/**
91 * vmw_resource_release_id - release a resource id to the id manager.
92 *
93 * @res: Pointer to the resource.
94 *
95 * Release the resource id to the resource id manager and set it to -1
96 */
97static void vmw_resource_release_id(struct vmw_resource *res)
98{
99 struct vmw_private *dev_priv = res->dev_priv;
100
101 write_lock(&dev_priv->resource_lock);
102 if (res->id != -1)
103 idr_remove(res->idr, res->id);
104 res->id = -1;
105 write_unlock(&dev_priv->resource_lock);
106}
107
83static void vmw_resource_release(struct kref *kref) 108static void vmw_resource_release(struct kref *kref)
84{ 109{
85 struct vmw_resource *res = 110 struct vmw_resource *res =
86 container_of(kref, struct vmw_resource, kref); 111 container_of(kref, struct vmw_resource, kref);
87 struct vmw_private *dev_priv = res->dev_priv; 112 struct vmw_private *dev_priv = res->dev_priv;
113 int id = res->id;
114 struct idr *idr = res->idr;
88 115
89 idr_remove(res->idr, res->id); 116 res->avail = false;
117 if (res->remove_from_lists != NULL)
118 res->remove_from_lists(res);
90 write_unlock(&dev_priv->resource_lock); 119 write_unlock(&dev_priv->resource_lock);
91 120
92 if (likely(res->hw_destroy != NULL)) 121 if (likely(res->hw_destroy != NULL))
@@ -98,6 +127,9 @@ static void vmw_resource_release(struct kref *kref)
98 kfree(res); 127 kfree(res);
99 128
100 write_lock(&dev_priv->resource_lock); 129 write_lock(&dev_priv->resource_lock);
130
131 if (id != -1)
132 idr_remove(idr, id);
101} 133}
102 134
103void vmw_resource_unreference(struct vmw_resource **p_res) 135void vmw_resource_unreference(struct vmw_resource **p_res)
@@ -111,34 +143,61 @@ void vmw_resource_unreference(struct vmw_resource **p_res)
111 write_unlock(&dev_priv->resource_lock); 143 write_unlock(&dev_priv->resource_lock);
112} 144}
113 145
146
147/**
148 * vmw_resource_alloc_id - release a resource id to the id manager.
149 *
150 * @dev_priv: Pointer to the device private structure.
151 * @res: Pointer to the resource.
152 *
153 * Allocate the lowest free resource from the resource manager, and set
154 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
155 */
156static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
157 struct vmw_resource *res)
158{
159 int ret;
160
161 BUG_ON(res->id != -1);
162
163 do {
164 if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
165 return -ENOMEM;
166
167 write_lock(&dev_priv->resource_lock);
168 ret = idr_get_new_above(res->idr, res, 1, &res->id);
169 write_unlock(&dev_priv->resource_lock);
170
171 } while (ret == -EAGAIN);
172
173 return ret;
174}
175
176
114static int vmw_resource_init(struct vmw_private *dev_priv, 177static int vmw_resource_init(struct vmw_private *dev_priv,
115 struct vmw_resource *res, 178 struct vmw_resource *res,
116 struct idr *idr, 179 struct idr *idr,
117 enum ttm_object_type obj_type, 180 enum ttm_object_type obj_type,
118 void (*res_free) (struct vmw_resource *res)) 181 bool delay_id,
182 void (*res_free) (struct vmw_resource *res),
183 void (*remove_from_lists)
184 (struct vmw_resource *res))
119{ 185{
120 int ret;
121
122 kref_init(&res->kref); 186 kref_init(&res->kref);
123 res->hw_destroy = NULL; 187 res->hw_destroy = NULL;
124 res->res_free = res_free; 188 res->res_free = res_free;
189 res->remove_from_lists = remove_from_lists;
125 res->res_type = obj_type; 190 res->res_type = obj_type;
126 res->idr = idr; 191 res->idr = idr;
127 res->avail = false; 192 res->avail = false;
128 res->dev_priv = dev_priv; 193 res->dev_priv = dev_priv;
129 INIT_LIST_HEAD(&res->query_head); 194 INIT_LIST_HEAD(&res->query_head);
130 INIT_LIST_HEAD(&res->validate_head); 195 INIT_LIST_HEAD(&res->validate_head);
131 do { 196 res->id = -1;
132 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) 197 if (delay_id)
133 return -ENOMEM; 198 return 0;
134 199 else
135 write_lock(&dev_priv->resource_lock); 200 return vmw_resource_alloc_id(dev_priv, res);
136 ret = idr_get_new_above(idr, res, 1, &res->id);
137 write_unlock(&dev_priv->resource_lock);
138
139 } while (ret == -EAGAIN);
140
141 return ret;
142} 201}
143 202
144/** 203/**
@@ -227,14 +286,17 @@ static int vmw_context_init(struct vmw_private *dev_priv,
227 } *cmd; 286 } *cmd;
228 287
229 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr, 288 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
230 VMW_RES_CONTEXT, res_free); 289 VMW_RES_CONTEXT, false, res_free, NULL);
231 290
232 if (unlikely(ret != 0)) { 291 if (unlikely(ret != 0)) {
233 if (res_free == NULL) 292 DRM_ERROR("Failed to allocate a resource id.\n");
234 kfree(res); 293 goto out_early;
235 else 294 }
236 res_free(res); 295
237 return ret; 296 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
297 DRM_ERROR("Out of hw context ids.\n");
298 vmw_resource_unreference(&res);
299 return -ENOMEM;
238 } 300 }
239 301
240 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 302 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
@@ -252,6 +314,13 @@ static int vmw_context_init(struct vmw_private *dev_priv,
252 (void) vmw_3d_resource_inc(dev_priv, false); 314 (void) vmw_3d_resource_inc(dev_priv, false);
253 vmw_resource_activate(res, vmw_hw_context_destroy); 315 vmw_resource_activate(res, vmw_hw_context_destroy);
254 return 0; 316 return 0;
317
318out_early:
319 if (res_free == NULL)
320 kfree(res);
321 else
322 res_free(res);
323 return ret;
255} 324}
256 325
257struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) 326struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
@@ -387,31 +456,285 @@ int vmw_context_check(struct vmw_private *dev_priv,
387 return ret; 456 return ret;
388} 457}
389 458
459struct vmw_bpp {
460 uint8_t bpp;
461 uint8_t s_bpp;
462};
463
464/*
465 * Size table for the supported SVGA3D surface formats. It consists of
466 * two values. The bpp value and the s_bpp value which is short for
467 * "stride bits per pixel" The values are given in such a way that the
468 * minimum stride for the image is calculated using
469 *
470 * min_stride = w*s_bpp
471 *
472 * and the total memory requirement for the image is
473 *
474 * h*min_stride*bpp/s_bpp
475 *
476 */
477static const struct vmw_bpp vmw_sf_bpp[] = {
478 [SVGA3D_FORMAT_INVALID] = {0, 0},
479 [SVGA3D_X8R8G8B8] = {32, 32},
480 [SVGA3D_A8R8G8B8] = {32, 32},
481 [SVGA3D_R5G6B5] = {16, 16},
482 [SVGA3D_X1R5G5B5] = {16, 16},
483 [SVGA3D_A1R5G5B5] = {16, 16},
484 [SVGA3D_A4R4G4B4] = {16, 16},
485 [SVGA3D_Z_D32] = {32, 32},
486 [SVGA3D_Z_D16] = {16, 16},
487 [SVGA3D_Z_D24S8] = {32, 32},
488 [SVGA3D_Z_D15S1] = {16, 16},
489 [SVGA3D_LUMINANCE8] = {8, 8},
490 [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
491 [SVGA3D_LUMINANCE16] = {16, 16},
492 [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
493 [SVGA3D_DXT1] = {4, 16},
494 [SVGA3D_DXT2] = {8, 32},
495 [SVGA3D_DXT3] = {8, 32},
496 [SVGA3D_DXT4] = {8, 32},
497 [SVGA3D_DXT5] = {8, 32},
498 [SVGA3D_BUMPU8V8] = {16, 16},
499 [SVGA3D_BUMPL6V5U5] = {16, 16},
500 [SVGA3D_BUMPX8L8V8U8] = {32, 32},
501 [SVGA3D_ARGB_S10E5] = {16, 16},
502 [SVGA3D_ARGB_S23E8] = {32, 32},
503 [SVGA3D_A2R10G10B10] = {32, 32},
504 [SVGA3D_V8U8] = {16, 16},
505 [SVGA3D_Q8W8V8U8] = {32, 32},
506 [SVGA3D_CxV8U8] = {16, 16},
507 [SVGA3D_X8L8V8U8] = {32, 32},
508 [SVGA3D_A2W10V10U10] = {32, 32},
509 [SVGA3D_ALPHA8] = {8, 8},
510 [SVGA3D_R_S10E5] = {16, 16},
511 [SVGA3D_R_S23E8] = {32, 32},
512 [SVGA3D_RG_S10E5] = {16, 16},
513 [SVGA3D_RG_S23E8] = {32, 32},
514 [SVGA3D_BUFFER] = {8, 8},
515 [SVGA3D_Z_D24X8] = {32, 32},
516 [SVGA3D_V16U16] = {32, 32},
517 [SVGA3D_G16R16] = {32, 32},
518 [SVGA3D_A16B16G16R16] = {64, 64},
519 [SVGA3D_UYVY] = {12, 12},
520 [SVGA3D_YUY2] = {12, 12},
521 [SVGA3D_NV12] = {12, 8},
522 [SVGA3D_AYUV] = {32, 32},
523 [SVGA3D_BC4_UNORM] = {4, 16},
524 [SVGA3D_BC5_UNORM] = {8, 32},
525 [SVGA3D_Z_DF16] = {16, 16},
526 [SVGA3D_Z_DF24] = {24, 24},
527 [SVGA3D_Z_D24S8_INT] = {32, 32}
528};
529
390 530
391/** 531/**
392 * Surface management. 532 * Surface management.
393 */ 533 */
394 534
535struct vmw_surface_dma {
536 SVGA3dCmdHeader header;
537 SVGA3dCmdSurfaceDMA body;
538 SVGA3dCopyBox cb;
539 SVGA3dCmdSurfaceDMASuffix suffix;
540};
541
542struct vmw_surface_define {
543 SVGA3dCmdHeader header;
544 SVGA3dCmdDefineSurface body;
545};
546
547struct vmw_surface_destroy {
548 SVGA3dCmdHeader header;
549 SVGA3dCmdDestroySurface body;
550};
551
552
553/**
554 * vmw_surface_dma_size - Compute fifo size for a dma command.
555 *
556 * @srf: Pointer to a struct vmw_surface
557 *
558 * Computes the required size for a surface dma command for backup or
559 * restoration of the surface represented by @srf.
560 */
561static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
562{
563 return srf->num_sizes * sizeof(struct vmw_surface_dma);
564}
565
566
567/**
568 * vmw_surface_define_size - Compute fifo size for a surface define command.
569 *
570 * @srf: Pointer to a struct vmw_surface
571 *
572 * Computes the required size for a surface define command for the definition
573 * of the surface represented by @srf.
574 */
575static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
576{
577 return sizeof(struct vmw_surface_define) + srf->num_sizes *
578 sizeof(SVGA3dSize);
579}
580
581
582/**
583 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
584 *
585 * Computes the required size for a surface destroy command for the destruction
586 * of a hw surface.
587 */
588static inline uint32_t vmw_surface_destroy_size(void)
589{
590 return sizeof(struct vmw_surface_destroy);
591}
592
593/**
594 * vmw_surface_destroy_encode - Encode a surface_destroy command.
595 *
596 * @id: The surface id
597 * @cmd_space: Pointer to memory area in which the commands should be encoded.
598 */
599static void vmw_surface_destroy_encode(uint32_t id,
600 void *cmd_space)
601{
602 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
603 cmd_space;
604
605 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
606 cmd->header.size = sizeof(cmd->body);
607 cmd->body.sid = id;
608}
609
610/**
611 * vmw_surface_define_encode - Encode a surface_define command.
612 *
613 * @srf: Pointer to a struct vmw_surface object.
614 * @cmd_space: Pointer to memory area in which the commands should be encoded.
615 */
616static void vmw_surface_define_encode(const struct vmw_surface *srf,
617 void *cmd_space)
618{
619 struct vmw_surface_define *cmd = (struct vmw_surface_define *)
620 cmd_space;
621 struct drm_vmw_size *src_size;
622 SVGA3dSize *cmd_size;
623 uint32_t cmd_len;
624 int i;
625
626 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
627
628 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
629 cmd->header.size = cmd_len;
630 cmd->body.sid = srf->res.id;
631 cmd->body.surfaceFlags = srf->flags;
632 cmd->body.format = cpu_to_le32(srf->format);
633 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
634 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
635
636 cmd += 1;
637 cmd_size = (SVGA3dSize *) cmd;
638 src_size = srf->sizes;
639
640 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
641 cmd_size->width = src_size->width;
642 cmd_size->height = src_size->height;
643 cmd_size->depth = src_size->depth;
644 }
645}
646
647
648/**
649 * vmw_surface_dma_encode - Encode a surface_dma command.
650 *
651 * @srf: Pointer to a struct vmw_surface object.
652 * @cmd_space: Pointer to memory area in which the commands should be encoded.
653 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
654 * should be placed or read from.
655 * @to_surface: Boolean whether to DMA to the surface or from the surface.
656 */
657static void vmw_surface_dma_encode(struct vmw_surface *srf,
658 void *cmd_space,
659 const SVGAGuestPtr *ptr,
660 bool to_surface)
661{
662 uint32_t i;
663 uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
664 uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
665 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
666
667 for (i = 0; i < srf->num_sizes; ++i) {
668 SVGA3dCmdHeader *header = &cmd->header;
669 SVGA3dCmdSurfaceDMA *body = &cmd->body;
670 SVGA3dCopyBox *cb = &cmd->cb;
671 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
672 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
673 const struct drm_vmw_size *cur_size = &srf->sizes[i];
674
675 header->id = SVGA_3D_CMD_SURFACE_DMA;
676 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
677
678 body->guest.ptr = *ptr;
679 body->guest.ptr.offset += cur_offset->bo_offset;
680 body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
681 body->host.sid = srf->res.id;
682 body->host.face = cur_offset->face;
683 body->host.mipmap = cur_offset->mip;
684 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
685 SVGA3D_READ_HOST_VRAM);
686 cb->x = 0;
687 cb->y = 0;
688 cb->z = 0;
689 cb->srcx = 0;
690 cb->srcy = 0;
691 cb->srcz = 0;
692 cb->w = cur_size->width;
693 cb->h = cur_size->height;
694 cb->d = cur_size->depth;
695
696 suffix->suffixSize = sizeof(*suffix);
697 suffix->maximumOffset = body->guest.pitch*cur_size->height*
698 cur_size->depth*bpp / stride_bpp;
699 suffix->flags.discard = 0;
700 suffix->flags.unsynchronized = 0;
701 suffix->flags.reserved = 0;
702 ++cmd;
703 }
704};
705
706
395static void vmw_hw_surface_destroy(struct vmw_resource *res) 707static void vmw_hw_surface_destroy(struct vmw_resource *res)
396{ 708{
397 709
398 struct vmw_private *dev_priv = res->dev_priv; 710 struct vmw_private *dev_priv = res->dev_priv;
399 struct { 711 struct vmw_surface *srf;
400 SVGA3dCmdHeader header; 712 void *cmd;
401 SVGA3dCmdDestroySurface body;
402 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
403 713
404 if (unlikely(cmd == NULL)) { 714 if (res->id != -1) {
405 DRM_ERROR("Failed reserving FIFO space for surface "
406 "destruction.\n");
407 return;
408 }
409 715
410 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY); 716 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
411 cmd->header.size = cpu_to_le32(sizeof(cmd->body)); 717 if (unlikely(cmd == NULL)) {
412 cmd->body.sid = cpu_to_le32(res->id); 718 DRM_ERROR("Failed reserving FIFO space for surface "
719 "destruction.\n");
720 return;
721 }
413 722
414 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 723 vmw_surface_destroy_encode(res->id, cmd);
724 vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
725
726 /*
727 * used_memory_size_atomic, or separate lock
728 * to avoid taking dev_priv::cmdbuf_mutex in
729 * the destroy path.
730 */
731
732 mutex_lock(&dev_priv->cmdbuf_mutex);
733 srf = container_of(res, struct vmw_surface, res);
734 dev_priv->used_memory_size -= srf->backup_size;
735 mutex_unlock(&dev_priv->cmdbuf_mutex);
736
737 }
415 vmw_3d_resource_dec(dev_priv, false); 738 vmw_3d_resource_dec(dev_priv, false);
416} 739}
417 740
@@ -419,70 +742,352 @@ void vmw_surface_res_free(struct vmw_resource *res)
419{ 742{
420 struct vmw_surface *srf = container_of(res, struct vmw_surface, res); 743 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
421 744
745 if (srf->backup)
746 ttm_bo_unref(&srf->backup);
747 kfree(srf->offsets);
422 kfree(srf->sizes); 748 kfree(srf->sizes);
423 kfree(srf->snooper.image); 749 kfree(srf->snooper.image);
424 kfree(srf); 750 kfree(srf);
425} 751}
426 752
427int vmw_surface_init(struct vmw_private *dev_priv, 753
428 struct vmw_surface *srf, 754/**
429 void (*res_free) (struct vmw_resource *res)) 755 * vmw_surface_do_validate - make a surface available to the device.
756 *
757 * @dev_priv: Pointer to a device private struct.
758 * @srf: Pointer to a struct vmw_surface.
759 *
760 * If the surface doesn't have a hw id, allocate one, and optionally
761 * DMA the backed up surface contents to the device.
762 *
763 * Returns -EBUSY if there wasn't sufficient device resources to
764 * complete the validation. Retry after freeing up resources.
765 *
766 * May return other errors if the kernel is out of guest resources.
767 */
768int vmw_surface_do_validate(struct vmw_private *dev_priv,
769 struct vmw_surface *srf)
430{ 770{
431 int ret;
432 struct {
433 SVGA3dCmdHeader header;
434 SVGA3dCmdDefineSurface body;
435 } *cmd;
436 SVGA3dSize *cmd_size;
437 struct vmw_resource *res = &srf->res; 771 struct vmw_resource *res = &srf->res;
438 struct drm_vmw_size *src_size; 772 struct list_head val_list;
439 size_t submit_size; 773 struct ttm_validate_buffer val_buf;
440 uint32_t cmd_len; 774 uint32_t submit_size;
441 int i; 775 uint8_t *cmd;
776 int ret;
442 777
443 BUG_ON(res_free == NULL); 778 if (likely(res->id != -1))
444 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr, 779 return 0;
445 VMW_RES_SURFACE, res_free); 780
781 if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
782 dev_priv->memory_size))
783 return -EBUSY;
784
785 /*
786 * Reserve- and validate the backup DMA bo.
787 */
788
789 if (srf->backup) {
790 INIT_LIST_HEAD(&val_list);
791 val_buf.bo = ttm_bo_reference(srf->backup);
792 val_buf.new_sync_obj_arg = (void *)((unsigned long)
793 DRM_VMW_FENCE_FLAG_EXEC);
794 list_add_tail(&val_buf.head, &val_list);
795 ret = ttm_eu_reserve_buffers(&val_list);
796 if (unlikely(ret != 0))
797 goto out_no_reserve;
798
799 ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
800 true, false, false);
801 if (unlikely(ret != 0))
802 goto out_no_validate;
803 }
804
805 /*
806 * Alloc id for the resource.
807 */
446 808
809 ret = vmw_resource_alloc_id(dev_priv, res);
447 if (unlikely(ret != 0)) { 810 if (unlikely(ret != 0)) {
448 res_free(res); 811 DRM_ERROR("Failed to allocate a surface id.\n");
449 return ret; 812 goto out_no_id;
813 }
814 if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
815 ret = -EBUSY;
816 goto out_no_fifo;
450 } 817 }
451 818
452 submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize); 819
453 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); 820 /*
821 * Encode surface define- and dma commands.
822 */
823
824 submit_size = vmw_surface_define_size(srf);
825 if (srf->backup)
826 submit_size += vmw_surface_dma_size(srf);
454 827
455 cmd = vmw_fifo_reserve(dev_priv, submit_size); 828 cmd = vmw_fifo_reserve(dev_priv, submit_size);
456 if (unlikely(cmd == NULL)) { 829 if (unlikely(cmd == NULL)) {
457 DRM_ERROR("Fifo reserve failed for create surface.\n"); 830 DRM_ERROR("Failed reserving FIFO space for surface "
458 vmw_resource_unreference(&res); 831 "validation.\n");
459 return -ENOMEM; 832 ret = -ENOMEM;
833 goto out_no_fifo;
460 } 834 }
461 835
462 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE); 836 vmw_surface_define_encode(srf, cmd);
463 cmd->header.size = cpu_to_le32(cmd_len); 837 if (srf->backup) {
464 cmd->body.sid = cpu_to_le32(res->id); 838 SVGAGuestPtr ptr;
465 cmd->body.surfaceFlags = cpu_to_le32(srf->flags); 839
466 cmd->body.format = cpu_to_le32(srf->format); 840 cmd += vmw_surface_define_size(srf);
467 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { 841 vmw_bo_get_guest_ptr(srf->backup, &ptr);
468 cmd->body.face[i].numMipLevels = 842 vmw_surface_dma_encode(srf, cmd, &ptr, true);
469 cpu_to_le32(srf->mip_levels[i]);
470 } 843 }
471 844
472 cmd += 1; 845 vmw_fifo_commit(dev_priv, submit_size);
473 cmd_size = (SVGA3dSize *) cmd;
474 src_size = srf->sizes;
475 846
476 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { 847 /*
477 cmd_size->width = cpu_to_le32(src_size->width); 848 * Create a fence object and fence the backup buffer.
478 cmd_size->height = cpu_to_le32(src_size->height); 849 */
479 cmd_size->depth = cpu_to_le32(src_size->depth); 850
851 if (srf->backup) {
852 struct vmw_fence_obj *fence;
853
854 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
855 &fence, NULL);
856 ttm_eu_fence_buffer_objects(&val_list, fence);
857 if (likely(fence != NULL))
858 vmw_fence_obj_unreference(&fence);
859 ttm_bo_unref(&val_buf.bo);
860 ttm_bo_unref(&srf->backup);
480 } 861 }
481 862
863 /*
864 * Surface memory usage accounting.
865 */
866
867 dev_priv->used_memory_size += srf->backup_size;
868
869 return 0;
870
871out_no_fifo:
872 vmw_resource_release_id(res);
873out_no_id:
874out_no_validate:
875 if (srf->backup)
876 ttm_eu_backoff_reservation(&val_list);
877out_no_reserve:
878 if (srf->backup)
879 ttm_bo_unref(&val_buf.bo);
880 return ret;
881}
882
883/**
884 * vmw_surface_evict - Evict a hw surface.
885 *
886 * @dev_priv: Pointer to a device private struct.
887 * @srf: Pointer to a struct vmw_surface
888 *
889 * DMA the contents of a hw surface to a backup guest buffer object,
890 * and destroy the hw surface, releasing its id.
891 */
892int vmw_surface_evict(struct vmw_private *dev_priv,
893 struct vmw_surface *srf)
894{
895 struct vmw_resource *res = &srf->res;
896 struct list_head val_list;
897 struct ttm_validate_buffer val_buf;
898 uint32_t submit_size;
899 uint8_t *cmd;
900 int ret;
901 struct vmw_fence_obj *fence;
902 SVGAGuestPtr ptr;
903
904 BUG_ON(res->id == -1);
905
906 /*
907 * Create a surface backup buffer object.
908 */
909
910 if (!srf->backup) {
911 ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
912 ttm_bo_type_device,
913 &vmw_srf_placement, 0, 0, true,
914 NULL, &srf->backup);
915 if (unlikely(ret != 0))
916 return ret;
917 }
918
919 /*
920 * Reserve- and validate the backup DMA bo.
921 */
922
923 INIT_LIST_HEAD(&val_list);
924 val_buf.bo = ttm_bo_reference(srf->backup);
925 val_buf.new_sync_obj_arg = (void *)(unsigned long)
926 DRM_VMW_FENCE_FLAG_EXEC;
927 list_add_tail(&val_buf.head, &val_list);
928 ret = ttm_eu_reserve_buffers(&val_list);
929 if (unlikely(ret != 0))
930 goto out_no_reserve;
931
932 ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
933 true, false, false);
934 if (unlikely(ret != 0))
935 goto out_no_validate;
936
937
938 /*
939 * Encode the dma- and surface destroy commands.
940 */
941
942 submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
943 cmd = vmw_fifo_reserve(dev_priv, submit_size);
944 if (unlikely(cmd == NULL)) {
945 DRM_ERROR("Failed reserving FIFO space for surface "
946 "eviction.\n");
947 ret = -ENOMEM;
948 goto out_no_fifo;
949 }
950
951 vmw_bo_get_guest_ptr(srf->backup, &ptr);
952 vmw_surface_dma_encode(srf, cmd, &ptr, false);
953 cmd += vmw_surface_dma_size(srf);
954 vmw_surface_destroy_encode(res->id, cmd);
482 vmw_fifo_commit(dev_priv, submit_size); 955 vmw_fifo_commit(dev_priv, submit_size);
956
957 /*
958 * Surface memory usage accounting.
959 */
960
961 dev_priv->used_memory_size -= srf->backup_size;
962
963 /*
964 * Create a fence object and fence the DMA buffer.
965 */
966
967 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
968 &fence, NULL);
969 ttm_eu_fence_buffer_objects(&val_list, fence);
970 if (likely(fence != NULL))
971 vmw_fence_obj_unreference(&fence);
972 ttm_bo_unref(&val_buf.bo);
973
974 /*
975 * Release the surface ID.
976 */
977
978 vmw_resource_release_id(res);
979
980 return 0;
981
982out_no_fifo:
983out_no_validate:
984 if (srf->backup)
985 ttm_eu_backoff_reservation(&val_list);
986out_no_reserve:
987 ttm_bo_unref(&val_buf.bo);
988 ttm_bo_unref(&srf->backup);
989 return ret;
990}
991
992
993/**
994 * vmw_surface_validate - make a surface available to the device, evicting
995 * other surfaces if needed.
996 *
997 * @dev_priv: Pointer to a device private struct.
998 * @srf: Pointer to a struct vmw_surface.
999 *
1000 * Try to validate a surface and if it fails due to limited device resources,
1001 * repeatedly try to evict other surfaces until the request can be
1002 * acommodated.
1003 *
1004 * May return errors if out of resources.
1005 */
1006int vmw_surface_validate(struct vmw_private *dev_priv,
1007 struct vmw_surface *srf)
1008{
1009 int ret;
1010 struct vmw_surface *evict_srf;
1011
1012 do {
1013 write_lock(&dev_priv->resource_lock);
1014 list_del_init(&srf->lru_head);
1015 write_unlock(&dev_priv->resource_lock);
1016
1017 ret = vmw_surface_do_validate(dev_priv, srf);
1018 if (likely(ret != -EBUSY))
1019 break;
1020
1021 write_lock(&dev_priv->resource_lock);
1022 if (list_empty(&dev_priv->surface_lru)) {
1023 DRM_ERROR("Out of device memory for surfaces.\n");
1024 ret = -EBUSY;
1025 write_unlock(&dev_priv->resource_lock);
1026 break;
1027 }
1028
1029 evict_srf = vmw_surface_reference
1030 (list_first_entry(&dev_priv->surface_lru,
1031 struct vmw_surface,
1032 lru_head));
1033 list_del_init(&evict_srf->lru_head);
1034
1035 write_unlock(&dev_priv->resource_lock);
1036 (void) vmw_surface_evict(dev_priv, evict_srf);
1037
1038 vmw_surface_unreference(&evict_srf);
1039
1040 } while (1);
1041
1042 if (unlikely(ret != 0 && srf->res.id != -1)) {
1043 write_lock(&dev_priv->resource_lock);
1044 list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
1045 write_unlock(&dev_priv->resource_lock);
1046 }
1047
1048 return ret;
1049}
1050
1051
1052/**
1053 * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
1054 *
1055 * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
1056 *
1057 * As part of the resource destruction, remove the surface from any
1058 * lookup lists.
1059 */
1060static void vmw_surface_remove_from_lists(struct vmw_resource *res)
1061{
1062 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
1063
1064 list_del_init(&srf->lru_head);
1065}
1066
1067int vmw_surface_init(struct vmw_private *dev_priv,
1068 struct vmw_surface *srf,
1069 void (*res_free) (struct vmw_resource *res))
1070{
1071 int ret;
1072 struct vmw_resource *res = &srf->res;
1073
1074 BUG_ON(res_free == NULL);
1075 INIT_LIST_HEAD(&srf->lru_head);
1076 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
1077 VMW_RES_SURFACE, true, res_free,
1078 vmw_surface_remove_from_lists);
1079
1080 if (unlikely(ret != 0))
1081 res_free(res);
1082
1083 /*
1084 * The surface won't be visible to hardware until a
1085 * surface validate.
1086 */
1087
483 (void) vmw_3d_resource_inc(dev_priv, false); 1088 (void) vmw_3d_resource_inc(dev_priv, false);
484 vmw_resource_activate(res, vmw_hw_surface_destroy); 1089 vmw_resource_activate(res, vmw_hw_surface_destroy);
485 return 0; 1090 return ret;
486} 1091}
487 1092
488static void vmw_user_surface_free(struct vmw_resource *res) 1093static void vmw_user_surface_free(struct vmw_resource *res)
@@ -491,11 +1096,54 @@ static void vmw_user_surface_free(struct vmw_resource *res)
491 struct vmw_user_surface *user_srf = 1096 struct vmw_user_surface *user_srf =
492 container_of(srf, struct vmw_user_surface, srf); 1097 container_of(srf, struct vmw_user_surface, srf);
493 1098
1099 if (srf->backup)
1100 ttm_bo_unref(&srf->backup);
1101 kfree(srf->offsets);
494 kfree(srf->sizes); 1102 kfree(srf->sizes);
495 kfree(srf->snooper.image); 1103 kfree(srf->snooper.image);
496 kfree(user_srf); 1104 kfree(user_srf);
497} 1105}
498 1106
1107/**
1108 * vmw_resource_unreserve - unreserve resources previously reserved for
1109 * command submission.
1110 *
1111 * @list_head: list of resources to unreserve.
1112 *
1113 * Currently only surfaces are considered, and unreserving a surface
1114 * means putting it back on the device's surface lru list,
1115 * so that it can be evicted if necessary.
1116 * This function traverses the resource list and
1117 * checks whether resources are surfaces, and in that case puts them back
1118 * on the device's surface LRU list.
1119 */
1120void vmw_resource_unreserve(struct list_head *list)
1121{
1122 struct vmw_resource *res;
1123 struct vmw_surface *srf;
1124 rwlock_t *lock = NULL;
1125
1126 list_for_each_entry(res, list, validate_head) {
1127
1128 if (res->res_free != &vmw_surface_res_free &&
1129 res->res_free != &vmw_user_surface_free)
1130 continue;
1131
1132 if (unlikely(lock == NULL)) {
1133 lock = &res->dev_priv->resource_lock;
1134 write_lock(lock);
1135 }
1136
1137 srf = container_of(res, struct vmw_surface, res);
1138 list_del_init(&srf->lru_head);
1139 list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
1140 }
1141
1142 if (lock != NULL)
1143 write_unlock(lock);
1144}
1145
1146
499int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, 1147int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
500 struct ttm_object_file *tfile, 1148 struct ttm_object_file *tfile,
501 uint32_t handle, struct vmw_surface **out) 1149 uint32_t handle, struct vmw_surface **out)
@@ -572,7 +1220,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
572 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1220 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
573 struct drm_vmw_size __user *user_sizes; 1221 struct drm_vmw_size __user *user_sizes;
574 int ret; 1222 int ret;
575 int i; 1223 int i, j;
1224 uint32_t cur_bo_offset;
1225 struct drm_vmw_size *cur_size;
1226 struct vmw_surface_offset *cur_offset;
1227 uint32_t stride_bpp;
1228 uint32_t bpp;
576 1229
577 if (unlikely(user_srf == NULL)) 1230 if (unlikely(user_srf == NULL))
578 return -ENOMEM; 1231 return -ENOMEM;
@@ -583,6 +1236,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
583 srf->flags = req->flags; 1236 srf->flags = req->flags;
584 srf->format = req->format; 1237 srf->format = req->format;
585 srf->scanout = req->scanout; 1238 srf->scanout = req->scanout;
1239 srf->backup = NULL;
1240
586 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); 1241 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
587 srf->num_sizes = 0; 1242 srf->num_sizes = 0;
588 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 1243 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
@@ -599,6 +1254,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
599 ret = -ENOMEM; 1254 ret = -ENOMEM;
600 goto out_err0; 1255 goto out_err0;
601 } 1256 }
1257 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
1258 GFP_KERNEL);
1259 if (unlikely(srf->sizes == NULL)) {
1260 ret = -ENOMEM;
1261 goto out_no_offsets;
1262 }
602 1263
603 user_sizes = (struct drm_vmw_size __user *)(unsigned long) 1264 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
604 req->size_addr; 1265 req->size_addr;
@@ -610,6 +1271,29 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
610 goto out_err1; 1271 goto out_err1;
611 } 1272 }
612 1273
1274 cur_bo_offset = 0;
1275 cur_offset = srf->offsets;
1276 cur_size = srf->sizes;
1277
1278 bpp = vmw_sf_bpp[srf->format].bpp;
1279 stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
1280
1281 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
1282 for (j = 0; j < srf->mip_levels[i]; ++j) {
1283 uint32_t stride =
1284 (cur_size->width * stride_bpp + 7) >> 3;
1285
1286 cur_offset->face = i;
1287 cur_offset->mip = j;
1288 cur_offset->bo_offset = cur_bo_offset;
1289 cur_bo_offset += stride * cur_size->height *
1290 cur_size->depth * bpp / stride_bpp;
1291 ++cur_offset;
1292 ++cur_size;
1293 }
1294 }
1295 srf->backup_size = cur_bo_offset;
1296
613 if (srf->scanout && 1297 if (srf->scanout &&
614 srf->num_sizes == 1 && 1298 srf->num_sizes == 1 &&
615 srf->sizes[0].width == 64 && 1299 srf->sizes[0].width == 64 &&
@@ -658,6 +1342,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
658 vmw_resource_unreference(&res); 1342 vmw_resource_unreference(&res);
659 return 0; 1343 return 0;
660out_err1: 1344out_err1:
1345 kfree(srf->offsets);
1346out_no_offsets:
661 kfree(srf->sizes); 1347 kfree(srf->sizes);
662out_err0: 1348out_err0:
663 kfree(user_srf); 1349 kfree(user_srf);
@@ -974,7 +1660,7 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
974 int ret; 1660 int ret;
975 1661
976 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr, 1662 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
977 VMW_RES_STREAM, res_free); 1663 VMW_RES_STREAM, false, res_free, NULL);
978 1664
979 if (unlikely(ret != 0)) { 1665 if (unlikely(ret != 0)) {
980 if (res_free == NULL) 1666 if (res_free == NULL)