aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2010-10-26 15:21:47 -0400
committerDave Airlie <airlied@redhat.com>2010-10-26 21:07:46 -0400
commit135cba0dc399fdd47bd3ae305c1db75fcd77243f (patch)
tree3eedcd7c5701dfe05246aca3479ab7396169f2e7 /drivers/gpu
parent8f895da57da80b307efa2f94b5d4caf801e959a5 (diff)
vmwgfx: Implement a proper GMR eviction mechanism
Use Ben's new range manager hooks to implement a manager for GMRs that manages ids rather than ranges. This means we can use the standard TTM code for binding, unbinding and eviction. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c137
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c75
9 files changed, 252 insertions, 160 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 4505e17df3f5..c9281a1b1d3b 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
4vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ 4vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ 5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ 6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
7 vmwgfx_overlay.o vmwgfx_fence.o 7 vmwgfx_overlay.o vmwgfx_fence.o vmwgfx_gmrid_manager.o
8 8
9obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o 9obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 1b3bd8c6c67e..80bc37b274e7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -39,6 +39,9 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
39static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | 39static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
40 TTM_PL_FLAG_CACHED; 40 TTM_PL_FLAG_CACHED;
41 41
42static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
43 TTM_PL_FLAG_CACHED;
44
42struct ttm_placement vmw_vram_placement = { 45struct ttm_placement vmw_vram_placement = {
43 .fpfn = 0, 46 .fpfn = 0,
44 .lpfn = 0, 47 .lpfn = 0,
@@ -48,6 +51,20 @@ struct ttm_placement vmw_vram_placement = {
48 .busy_placement = &vram_placement_flags 51 .busy_placement = &vram_placement_flags
49}; 52};
50 53
54static uint32_t vram_gmr_placement_flags[] = {
55 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
56 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
57};
58
59struct ttm_placement vmw_vram_gmr_placement = {
60 .fpfn = 0,
61 .lpfn = 0,
62 .num_placement = 2,
63 .placement = vram_gmr_placement_flags,
64 .num_busy_placement = 1,
65 .busy_placement = &gmr_placement_flags
66};
67
51struct ttm_placement vmw_vram_sys_placement = { 68struct ttm_placement vmw_vram_sys_placement = {
52 .fpfn = 0, 69 .fpfn = 0,
53 .lpfn = 0, 70 .lpfn = 0,
@@ -77,27 +94,52 @@ struct ttm_placement vmw_sys_placement = {
77 94
78struct vmw_ttm_backend { 95struct vmw_ttm_backend {
79 struct ttm_backend backend; 96 struct ttm_backend backend;
97 struct page **pages;
98 unsigned long num_pages;
99 struct vmw_private *dev_priv;
100 int gmr_id;
80}; 101};
81 102
82static int vmw_ttm_populate(struct ttm_backend *backend, 103static int vmw_ttm_populate(struct ttm_backend *backend,
83 unsigned long num_pages, struct page **pages, 104 unsigned long num_pages, struct page **pages,
84 struct page *dummy_read_page) 105 struct page *dummy_read_page)
85{ 106{
107 struct vmw_ttm_backend *vmw_be =
108 container_of(backend, struct vmw_ttm_backend, backend);
109
110 vmw_be->pages = pages;
111 vmw_be->num_pages = num_pages;
112
86 return 0; 113 return 0;
87} 114}
88 115
89static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) 116static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
90{ 117{
91 return 0; 118 struct vmw_ttm_backend *vmw_be =
119 container_of(backend, struct vmw_ttm_backend, backend);
120
121 vmw_be->gmr_id = bo_mem->start;
122
123 return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
124 vmw_be->num_pages, vmw_be->gmr_id);
92} 125}
93 126
94static int vmw_ttm_unbind(struct ttm_backend *backend) 127static int vmw_ttm_unbind(struct ttm_backend *backend)
95{ 128{
129 struct vmw_ttm_backend *vmw_be =
130 container_of(backend, struct vmw_ttm_backend, backend);
131
132 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
96 return 0; 133 return 0;
97} 134}
98 135
99static void vmw_ttm_clear(struct ttm_backend *backend) 136static void vmw_ttm_clear(struct ttm_backend *backend)
100{ 137{
138 struct vmw_ttm_backend *vmw_be =
139 container_of(backend, struct vmw_ttm_backend, backend);
140
141 vmw_be->pages = NULL;
142 vmw_be->num_pages = 0;
101} 143}
102 144
103static void vmw_ttm_destroy(struct ttm_backend *backend) 145static void vmw_ttm_destroy(struct ttm_backend *backend)
@@ -125,6 +167,7 @@ struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
125 return NULL; 167 return NULL;
126 168
127 vmw_be->backend.func = &vmw_ttm_func; 169 vmw_be->backend.func = &vmw_ttm_func;
170 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
128 171
129 return &vmw_be->backend; 172 return &vmw_be->backend;
130} 173}
@@ -142,7 +185,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
142 /* System memory */ 185 /* System memory */
143 186
144 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 187 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
145 man->available_caching = TTM_PL_MASK_CACHING; 188 man->available_caching = TTM_PL_FLAG_CACHED;
146 man->default_caching = TTM_PL_FLAG_CACHED; 189 man->default_caching = TTM_PL_FLAG_CACHED;
147 break; 190 break;
148 case TTM_PL_VRAM: 191 case TTM_PL_VRAM:
@@ -150,8 +193,20 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
150 man->func = &ttm_bo_manager_func; 193 man->func = &ttm_bo_manager_func;
151 man->gpu_offset = 0; 194 man->gpu_offset = 0;
152 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; 195 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
153 man->available_caching = TTM_PL_MASK_CACHING; 196 man->available_caching = TTM_PL_FLAG_CACHED;
154 man->default_caching = TTM_PL_FLAG_WC; 197 man->default_caching = TTM_PL_FLAG_CACHED;
198 break;
199 case VMW_PL_GMR:
200 /*
201 * "Guest Memory Regions" is an aperture like feature with
202 * one slot per bo. There is an upper limit of the number of
203 * slots as well as the bo size.
204 */
205 man->func = &vmw_gmrid_manager_func;
206 man->gpu_offset = 0;
207 man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
208 man->available_caching = TTM_PL_FLAG_CACHED;
209 man->default_caching = TTM_PL_FLAG_CACHED;
155 break; 210 break;
156 default: 211 default:
157 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); 212 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@ -175,18 +230,6 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
175 return 0; 230 return 0;
176} 231}
177 232
178static void vmw_move_notify(struct ttm_buffer_object *bo,
179 struct ttm_mem_reg *new_mem)
180{
181 if (new_mem->mem_type != TTM_PL_SYSTEM)
182 vmw_dmabuf_gmr_unbind(bo);
183}
184
185static void vmw_swap_notify(struct ttm_buffer_object *bo)
186{
187 vmw_dmabuf_gmr_unbind(bo);
188}
189
190static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 233static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
191{ 234{
192 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 235 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -201,7 +244,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
201 return -EINVAL; 244 return -EINVAL;
202 switch (mem->mem_type) { 245 switch (mem->mem_type) {
203 case TTM_PL_SYSTEM: 246 case TTM_PL_SYSTEM:
204 /* System memory */ 247 case VMW_PL_GMR:
205 return 0; 248 return 0;
206 case TTM_PL_VRAM: 249 case TTM_PL_VRAM:
207 mem->bus.offset = mem->start << PAGE_SHIFT; 250 mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -277,8 +320,8 @@ struct ttm_bo_driver vmw_bo_driver = {
277 .sync_obj_flush = vmw_sync_obj_flush, 320 .sync_obj_flush = vmw_sync_obj_flush,
278 .sync_obj_unref = vmw_sync_obj_unref, 321 .sync_obj_unref = vmw_sync_obj_unref,
279 .sync_obj_ref = vmw_sync_obj_ref, 322 .sync_obj_ref = vmw_sync_obj_ref,
280 .move_notify = vmw_move_notify, 323 .move_notify = NULL,
281 .swap_notify = vmw_swap_notify, 324 .swap_notify = NULL,
282 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, 325 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
283 .io_mem_reserve = &vmw_ttm_io_mem_reserve, 326 .io_mem_reserve = &vmw_ttm_io_mem_reserve,
284 .io_mem_free = &vmw_ttm_io_mem_free, 327 .io_mem_free = &vmw_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index f2942b3c59c0..d0ef624fbdcc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -260,13 +260,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
260 idr_init(&dev_priv->context_idr); 260 idr_init(&dev_priv->context_idr);
261 idr_init(&dev_priv->surface_idr); 261 idr_init(&dev_priv->surface_idr);
262 idr_init(&dev_priv->stream_idr); 262 idr_init(&dev_priv->stream_idr);
263 ida_init(&dev_priv->gmr_ida);
264 mutex_init(&dev_priv->init_mutex); 263 mutex_init(&dev_priv->init_mutex);
265 init_waitqueue_head(&dev_priv->fence_queue); 264 init_waitqueue_head(&dev_priv->fence_queue);
266 init_waitqueue_head(&dev_priv->fifo_queue); 265 init_waitqueue_head(&dev_priv->fifo_queue);
267 atomic_set(&dev_priv->fence_queue_waiters, 0); 266 atomic_set(&dev_priv->fence_queue_waiters, 0);
268 atomic_set(&dev_priv->fifo_queue_waiters, 0); 267 atomic_set(&dev_priv->fifo_queue_waiters, 0);
269 INIT_LIST_HEAD(&dev_priv->gmr_lru);
270 268
271 dev_priv->io_start = pci_resource_start(dev->pdev, 0); 269 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
272 dev_priv->vram_start = pci_resource_start(dev->pdev, 1); 270 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
@@ -341,6 +339,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
341 goto out_err2; 339 goto out_err2;
342 } 340 }
343 341
342 dev_priv->has_gmr = true;
343 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
344 dev_priv->max_gmr_ids) != 0) {
345 DRM_INFO("No GMR memory available. "
346 "Graphics memory resources are very limited.\n");
347 dev_priv->has_gmr = false;
348 }
349
344 dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start, 350 dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
345 dev_priv->mmio_size, DRM_MTRR_WC); 351 dev_priv->mmio_size, DRM_MTRR_WC);
346 352
@@ -440,13 +446,14 @@ out_err4:
440out_err3: 446out_err3:
441 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, 447 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
442 dev_priv->mmio_size, DRM_MTRR_WC); 448 dev_priv->mmio_size, DRM_MTRR_WC);
449 if (dev_priv->has_gmr)
450 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
443 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 451 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
444out_err2: 452out_err2:
445 (void)ttm_bo_device_release(&dev_priv->bdev); 453 (void)ttm_bo_device_release(&dev_priv->bdev);
446out_err1: 454out_err1:
447 vmw_ttm_global_release(dev_priv); 455 vmw_ttm_global_release(dev_priv);
448out_err0: 456out_err0:
449 ida_destroy(&dev_priv->gmr_ida);
450 idr_destroy(&dev_priv->surface_idr); 457 idr_destroy(&dev_priv->surface_idr);
451 idr_destroy(&dev_priv->context_idr); 458 idr_destroy(&dev_priv->context_idr);
452 idr_destroy(&dev_priv->stream_idr); 459 idr_destroy(&dev_priv->stream_idr);
@@ -478,10 +485,11 @@ static int vmw_driver_unload(struct drm_device *dev)
478 iounmap(dev_priv->mmio_virt); 485 iounmap(dev_priv->mmio_virt);
479 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, 486 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
480 dev_priv->mmio_size, DRM_MTRR_WC); 487 dev_priv->mmio_size, DRM_MTRR_WC);
488 if (dev_priv->has_gmr)
489 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
481 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 490 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
482 (void)ttm_bo_device_release(&dev_priv->bdev); 491 (void)ttm_bo_device_release(&dev_priv->bdev);
483 vmw_ttm_global_release(dev_priv); 492 vmw_ttm_global_release(dev_priv);
484 ida_destroy(&dev_priv->gmr_ida);
485 idr_destroy(&dev_priv->surface_idr); 493 idr_destroy(&dev_priv->surface_idr);
486 idr_destroy(&dev_priv->context_idr); 494 idr_destroy(&dev_priv->context_idr);
487 idr_destroy(&dev_priv->stream_idr); 495 idr_destroy(&dev_priv->stream_idr);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 9d55fa8cd0fe..e7a58d055041 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -49,6 +49,9 @@
49#define VMWGFX_MAX_GMRS 2048 49#define VMWGFX_MAX_GMRS 2048
50#define VMWGFX_MAX_DISPLAYS 16 50#define VMWGFX_MAX_DISPLAYS 16
51 51
52#define VMW_PL_GMR TTM_PL_PRIV0
53#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
54
52struct vmw_fpriv { 55struct vmw_fpriv {
53 struct drm_master *locked_master; 56 struct drm_master *locked_master;
54 struct ttm_object_file *tfile; 57 struct ttm_object_file *tfile;
@@ -57,8 +60,6 @@ struct vmw_fpriv {
57struct vmw_dma_buffer { 60struct vmw_dma_buffer {
58 struct ttm_buffer_object base; 61 struct ttm_buffer_object base;
59 struct list_head validate_list; 62 struct list_head validate_list;
60 struct list_head gmr_lru;
61 uint32_t gmr_id;
62 bool gmr_bound; 63 bool gmr_bound;
63 uint32_t cur_validate_node; 64 uint32_t cur_validate_node;
64 bool on_validate_list; 65 bool on_validate_list;
@@ -184,6 +185,7 @@ struct vmw_private {
184 uint32_t capabilities; 185 uint32_t capabilities;
185 uint32_t max_gmr_descriptors; 186 uint32_t max_gmr_descriptors;
186 uint32_t max_gmr_ids; 187 uint32_t max_gmr_ids;
188 bool has_gmr;
187 struct mutex hw_mutex; 189 struct mutex hw_mutex;
188 190
189 /* 191 /*
@@ -266,14 +268,6 @@ struct vmw_private {
266 struct mutex cmdbuf_mutex; 268 struct mutex cmdbuf_mutex;
267 269
268 /** 270 /**
269 * GMR management. Protected by the lru spinlock.
270 */
271
272 struct ida gmr_ida;
273 struct list_head gmr_lru;
274
275
276 /**
277 * Operating mode. 271 * Operating mode.
278 */ 272 */
279 273
@@ -334,7 +328,9 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv);
334 */ 328 */
335 329
336extern int vmw_gmr_bind(struct vmw_private *dev_priv, 330extern int vmw_gmr_bind(struct vmw_private *dev_priv,
337 struct ttm_buffer_object *bo); 331 struct page *pages[],
332 unsigned long num_pages,
333 int gmr_id);
338extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); 334extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
339 335
340/** 336/**
@@ -383,14 +379,10 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
383extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); 379extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
384extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 380extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
385 uint32_t id, struct vmw_dma_buffer **out); 381 uint32_t id, struct vmw_dma_buffer **out);
386extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
387extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
388extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
389extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, 382extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
390 struct vmw_dma_buffer *bo); 383 struct vmw_dma_buffer *bo);
391extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, 384extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
392 struct vmw_dma_buffer *bo); 385 struct vmw_dma_buffer *bo);
393extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
394extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 386extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
395 struct drm_file *file_priv); 387 struct drm_file *file_priv);
396extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 388extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -442,6 +434,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
442extern struct ttm_placement vmw_vram_placement; 434extern struct ttm_placement vmw_vram_placement;
443extern struct ttm_placement vmw_vram_ne_placement; 435extern struct ttm_placement vmw_vram_ne_placement;
444extern struct ttm_placement vmw_vram_sys_placement; 436extern struct ttm_placement vmw_vram_sys_placement;
437extern struct ttm_placement vmw_vram_gmr_placement;
445extern struct ttm_placement vmw_sys_placement; 438extern struct ttm_placement vmw_sys_placement;
446extern struct ttm_bo_driver vmw_bo_driver; 439extern struct ttm_bo_driver vmw_bo_driver;
447extern int vmw_dma_quiescent(struct drm_device *dev); 440extern int vmw_dma_quiescent(struct drm_device *dev);
@@ -544,6 +537,12 @@ int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
544int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); 537int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
545 538
546/** 539/**
540 * GMR Id manager
541 */
542
543extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
544
545/**
547 * Inline helper functions 546 * Inline helper functions
548 */ 547 */
549 548
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 8e396850513c..51d9f9f1d7f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -538,8 +538,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
538 reloc = &sw_context->relocs[i]; 538 reloc = &sw_context->relocs[i];
539 validate = &sw_context->val_bufs[reloc->index]; 539 validate = &sw_context->val_bufs[reloc->index];
540 bo = validate->bo; 540 bo = validate->bo;
541 reloc->location->offset += bo->offset; 541 if (bo->mem.mem_type == TTM_PL_VRAM) {
542 reloc->location->gmrId = vmw_dmabuf_gmr(bo); 542 reloc->location->offset += bo->offset;
543 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
544 } else
545 reloc->location->gmrId = bo->mem.start;
543 } 546 }
544 vmw_free_relocations(sw_context); 547 vmw_free_relocations(sw_context);
545} 548}
@@ -563,25 +566,14 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
563{ 566{
564 int ret; 567 int ret;
565 568
566 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
567 return 0;
568
569 /** 569 /**
570 * Put BO in VRAM, only if there is space. 570 * Put BO in VRAM if there is space, otherwise as a GMR.
571 * If there is no space in VRAM and GMR ids are all used up,
572 * start evicting GMRs to make room. If the DMA buffer can't be
573 * used as a GMR, this will return -ENOMEM.
571 */ 574 */
572 575
573 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false); 576 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
574 if (unlikely(ret == -ERESTARTSYS))
575 return ret;
576
577 /**
578 * Otherwise, set it up as GMR.
579 */
580
581 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
582 return 0;
583
584 ret = vmw_gmr_bind(dev_priv, bo);
585 if (likely(ret == 0 || ret == -ERESTARTSYS)) 577 if (likely(ret == 0 || ret == -ERESTARTSYS))
586 return ret; 578 return ret;
587 579
@@ -590,6 +582,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
590 * previous contents. 582 * previous contents.
591 */ 583 */
592 584
585 DRM_INFO("Falling through to VRAM.\n");
593 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false); 586 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
594 return ret; 587 return ret;
595} 588}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index e7304188a784..41d9a5b73c03 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -612,7 +612,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
612{ 612{
613 struct ttm_buffer_object *bo = &vmw_bo->base; 613 struct ttm_buffer_object *bo = &vmw_bo->base;
614 struct ttm_placement ne_placement = vmw_vram_ne_placement; 614 struct ttm_placement ne_placement = vmw_vram_ne_placement;
615 struct drm_mm_node *mm_node;
616 int ret = 0; 615 int ret = 0;
617 616
618 ne_placement.lpfn = bo->num_pages; 617 ne_placement.lpfn = bo->num_pages;
@@ -626,9 +625,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
626 if (unlikely(ret != 0)) 625 if (unlikely(ret != 0))
627 goto err_unlock; 626 goto err_unlock;
628 627
629 mm_node = bo->mem.mm_node;
630 if (bo->mem.mem_type == TTM_PL_VRAM && 628 if (bo->mem.mem_type == TTM_PL_VRAM &&
631 mm_node->start < bo->num_pages) 629 bo->mem.start < bo->num_pages &&
630 bo->mem.start > 0)
632 (void) ttm_bo_validate(bo, &vmw_sys_placement, false, 631 (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
633 false, false); 632 false, false);
634 633
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 5f8908a5d7fd..de0c5948521d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -146,7 +146,7 @@ static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
146 */ 146 */
147 147
148static unsigned long vmw_gmr_count_descriptors(struct page *pages[], 148static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
149 unsigned long num_pages) 149 unsigned long num_pages)
150{ 150{
151 unsigned long prev_pfn = ~(0UL); 151 unsigned long prev_pfn = ~(0UL);
152 unsigned long pfn; 152 unsigned long pfn;
@@ -163,45 +163,33 @@ static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
163} 163}
164 164
165int vmw_gmr_bind(struct vmw_private *dev_priv, 165int vmw_gmr_bind(struct vmw_private *dev_priv,
166 struct ttm_buffer_object *bo) 166 struct page *pages[],
167 unsigned long num_pages,
168 int gmr_id)
167{ 169{
168 struct ttm_tt *ttm = bo->ttm;
169 unsigned long descriptors;
170 int ret;
171 uint32_t id;
172 struct list_head desc_pages; 170 struct list_head desc_pages;
171 int ret;
173 172
174 if (!(dev_priv->capabilities & SVGA_CAP_GMR)) 173 if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
175 return -EINVAL; 174 return -EINVAL;
176 175
177 ret = ttm_tt_populate(ttm); 176 if (vmw_gmr_count_descriptors(pages, num_pages) >
178 if (unlikely(ret != 0)) 177 dev_priv->max_gmr_descriptors)
179 return ret;
180
181 descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
182 if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
183 return -EINVAL; 178 return -EINVAL;
184 179
185 INIT_LIST_HEAD(&desc_pages); 180 INIT_LIST_HEAD(&desc_pages);
186 ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
187 ttm->num_pages);
188 if (unlikely(ret != 0))
189 return ret;
190 181
191 ret = vmw_gmr_id_alloc(dev_priv, &id); 182 ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
192 if (unlikely(ret != 0)) 183 if (unlikely(ret != 0))
193 goto out_no_id; 184 return ret;
194 185
195 vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages); 186 vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
196 vmw_gmr_free_descriptors(&desc_pages); 187 vmw_gmr_free_descriptors(&desc_pages);
197 vmw_dmabuf_set_gmr(bo, id);
198 return 0;
199 188
200out_no_id: 189 return 0;
201 vmw_gmr_free_descriptors(&desc_pages);
202 return ret;
203} 190}
204 191
192
205void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) 193void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
206{ 194{
207 mutex_lock(&dev_priv->hw_mutex); 195 mutex_lock(&dev_priv->hw_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
new file mode 100644
index 000000000000..ac6e0d1bd629
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -0,0 +1,137 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "vmwgfx_drv.h"
32#include "ttm/ttm_module.h"
33#include "ttm/ttm_bo_driver.h"
34#include "ttm/ttm_placement.h"
35#include <linux/idr.h>
36#include <linux/spinlock.h>
37#include <linux/kernel.h>
38
39struct vmwgfx_gmrid_man {
40 spinlock_t lock;
41 struct ida gmr_ida;
42 uint32_t max_gmr_ids;
43};
44
45static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
46 struct ttm_buffer_object *bo,
47 struct ttm_placement *placement,
48 struct ttm_mem_reg *mem)
49{
50 struct vmwgfx_gmrid_man *gman =
51 (struct vmwgfx_gmrid_man *)man->priv;
52 int ret;
53 int id;
54
55 mem->mm_node = NULL;
56
57 do {
58 if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0))
59 return -ENOMEM;
60
61 spin_lock(&gman->lock);
62 ret = ida_get_new(&gman->gmr_ida, &id);
63
64 if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
65 ida_remove(&gman->gmr_ida, id);
66 spin_unlock(&gman->lock);
67 return 0;
68 }
69
70 spin_unlock(&gman->lock);
71
72 } while (ret == -EAGAIN);
73
74 if (likely(ret == 0)) {
75 mem->mm_node = gman;
76 mem->start = id;
77 }
78
79 return ret;
80}
81
82static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
83 struct ttm_mem_reg *mem)
84{
85 struct vmwgfx_gmrid_man *gman =
86 (struct vmwgfx_gmrid_man *)man->priv;
87
88 if (mem->mm_node) {
89 spin_lock(&gman->lock);
90 ida_remove(&gman->gmr_ida, mem->start);
91 spin_unlock(&gman->lock);
92 mem->mm_node = NULL;
93 }
94}
95
96static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
97 unsigned long p_size)
98{
99 struct vmwgfx_gmrid_man *gman =
100 kzalloc(sizeof(*gman), GFP_KERNEL);
101
102 if (unlikely(gman == NULL))
103 return -ENOMEM;
104
105 spin_lock_init(&gman->lock);
106 ida_init(&gman->gmr_ida);
107 gman->max_gmr_ids = p_size;
108 man->priv = (void *) gman;
109 return 0;
110}
111
112static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
113{
114 struct vmwgfx_gmrid_man *gman =
115 (struct vmwgfx_gmrid_man *)man->priv;
116
117 if (gman) {
118 ida_destroy(&gman->gmr_ida);
119 kfree(gman);
120 }
121 return 0;
122}
123
124static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
125 const char *prefix)
126{
127 printk(KERN_INFO "%s: No debug info available for the GMR "
128 "id manager.\n", prefix);
129}
130
131const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
132 vmw_gmrid_man_init,
133 vmw_gmrid_man_takedown,
134 vmw_gmrid_man_get_node,
135 vmw_gmrid_man_put_node,
136 vmw_gmrid_man_debug
137};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c8c40e9979db..36e129f0023f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -765,28 +765,11 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
765 return bo_user_size + page_array_size; 765 return bo_user_size + page_array_size;
766} 766}
767 767
768void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
769{
770 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
771 struct ttm_bo_global *glob = bo->glob;
772 struct vmw_private *dev_priv =
773 container_of(bo->bdev, struct vmw_private, bdev);
774
775 if (vmw_bo->gmr_bound) {
776 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
777 spin_lock(&glob->lru_lock);
778 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
779 spin_unlock(&glob->lru_lock);
780 vmw_bo->gmr_bound = false;
781 }
782}
783
784void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 768void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
785{ 769{
786 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 770 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
787 struct ttm_bo_global *glob = bo->glob; 771 struct ttm_bo_global *glob = bo->glob;
788 772
789 vmw_dmabuf_gmr_unbind(bo);
790 ttm_mem_global_free(glob->mem_glob, bo->acc_size); 773 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
791 kfree(vmw_bo); 774 kfree(vmw_bo);
792} 775}
@@ -818,10 +801,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
818 801
819 memset(vmw_bo, 0, sizeof(*vmw_bo)); 802 memset(vmw_bo, 0, sizeof(*vmw_bo));
820 803
821 INIT_LIST_HEAD(&vmw_bo->gmr_lru);
822 INIT_LIST_HEAD(&vmw_bo->validate_list); 804 INIT_LIST_HEAD(&vmw_bo->validate_list);
823 vmw_bo->gmr_id = 0;
824 vmw_bo->gmr_bound = false;
825 805
826 ret = ttm_bo_init(bdev, &vmw_bo->base, size, 806 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
827 ttm_bo_type_device, placement, 807 ttm_bo_type_device, placement,
@@ -835,7 +815,6 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
835 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 815 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
836 struct ttm_bo_global *glob = bo->glob; 816 struct ttm_bo_global *glob = bo->glob;
837 817
838 vmw_dmabuf_gmr_unbind(bo);
839 ttm_mem_global_free(glob->mem_glob, bo->acc_size); 818 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
840 kfree(vmw_user_bo); 819 kfree(vmw_user_bo);
841} 820}
@@ -938,25 +917,6 @@ void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
938 vmw_bo->on_validate_list = false; 917 vmw_bo->on_validate_list = false;
939} 918}
940 919
941uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
942{
943 struct vmw_dma_buffer *vmw_bo;
944
945 if (bo->mem.mem_type == TTM_PL_VRAM)
946 return SVGA_GMR_FRAMEBUFFER;
947
948 vmw_bo = vmw_dma_buffer(bo);
949
950 return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
951}
952
953void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
954{
955 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
956 vmw_bo->gmr_bound = true;
957 vmw_bo->gmr_id = id;
958}
959
960int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 920int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
961 uint32_t handle, struct vmw_dma_buffer **out) 921 uint32_t handle, struct vmw_dma_buffer **out)
962{ 922{
@@ -985,41 +945,6 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
985 return 0; 945 return 0;
986} 946}
987 947
988/**
989 * TODO: Implement a gmr id eviction mechanism. Currently we just fail
990 * when we're out of ids, causing GMR space to be allocated
991 * out of VRAM.
992 */
993
994int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
995{
996 struct ttm_bo_global *glob = dev_priv->bdev.glob;
997 int id;
998 int ret;
999
1000 do {
1001 if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
1002 return -ENOMEM;
1003
1004 spin_lock(&glob->lru_lock);
1005 ret = ida_get_new(&dev_priv->gmr_ida, &id);
1006 spin_unlock(&glob->lru_lock);
1007 } while (ret == -EAGAIN);
1008
1009 if (unlikely(ret != 0))
1010 return ret;
1011
1012 if (unlikely(id >= dev_priv->max_gmr_ids)) {
1013 spin_lock(&glob->lru_lock);
1014 ida_remove(&dev_priv->gmr_ida, id);
1015 spin_unlock(&glob->lru_lock);
1016 return -EBUSY;
1017 }
1018
1019 *p_id = (uint32_t) id;
1020 return 0;
1021}
1022
1023/* 948/*
1024 * Stream management 949 * Stream management
1025 */ 950 */