aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 21:57:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 21:57:59 -0400
commitc48c43e422c1404fd72c57d1d21a6f6d01e18900 (patch)
tree48e5d3828b4f5479361986535f71a1ae44e4f3c1 /drivers/gpu/drm/vmwgfx
parent520045db940a381d2bee1c1b2179f7921b40fb10 (diff)
parent135cba0dc399fdd47bd3ae305c1db75fcd77243f (diff)
Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (476 commits) vmwgfx: Implement a proper GMR eviction mechanism drm/radeon/kms: fix r6xx/7xx 1D tiling CS checker v2 drm/radeon/kms: properly compute group_size on 6xx/7xx drm/radeon/kms: fix 2D tile height alignment in the r600 CS checker drm/radeon/kms/evergreen: set the clear state to the blit state drm/radeon/kms: don't poll dac load detect. gpu: Add Intel GMA500(Poulsbo) Stub Driver drm/radeon/kms: MC vram map needs to be >= pci aperture size drm/radeon/kms: implement display watermark support for evergreen drm/radeon/kms/evergreen: add some additional safe regs v2 drm/radeon/r600: fix tiling issues in CS checker. drm/i915: Move gpu_write_list to per-ring drm/i915: Invalidate the to-ring, flush the old-ring when updating domains drm/i915/ringbuffer: Write the value passed in to the tail register agp/intel: Restore valid PTE bit for Sandybridge after bdd3072 drm/i915: Fix flushing regression from 9af90d19f drm/i915/sdvo: Remove unused encoding member i915: enable AVI infoframe for intel_hdmi.c [v4] drm/i915: Fix current fb blocking for page flip drm/i915: IS_IRONLAKE is synonymous with gen == 5 ... Fix up conflicts in - drivers/gpu/drm/i915/{i915_gem.c, i915/intel_overlay.c}: due to the new simplified stack-based kmap_atomic() interface - drivers/gpu/drm/vmwgfx/vmwgfx_drv.c: added .llseek entry due to BKL removal cleanups.
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c84
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c130
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h40
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c137
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c200
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c75
12 files changed, 567 insertions, 212 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 4505e17df3f5..c9281a1b1d3b 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
4vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ 4vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ 5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ 6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
7 vmwgfx_overlay.o vmwgfx_fence.o 7 vmwgfx_overlay.o vmwgfx_fence.o vmwgfx_gmrid_manager.o
8 8
9obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o 9obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index c4f5114aee7c..80bc37b274e7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -39,6 +39,9 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
39static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | 39static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
40 TTM_PL_FLAG_CACHED; 40 TTM_PL_FLAG_CACHED;
41 41
42static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
43 TTM_PL_FLAG_CACHED;
44
42struct ttm_placement vmw_vram_placement = { 45struct ttm_placement vmw_vram_placement = {
43 .fpfn = 0, 46 .fpfn = 0,
44 .lpfn = 0, 47 .lpfn = 0,
@@ -48,6 +51,20 @@ struct ttm_placement vmw_vram_placement = {
48 .busy_placement = &vram_placement_flags 51 .busy_placement = &vram_placement_flags
49}; 52};
50 53
54static uint32_t vram_gmr_placement_flags[] = {
55 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
56 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
57};
58
59struct ttm_placement vmw_vram_gmr_placement = {
60 .fpfn = 0,
61 .lpfn = 0,
62 .num_placement = 2,
63 .placement = vram_gmr_placement_flags,
64 .num_busy_placement = 1,
65 .busy_placement = &gmr_placement_flags
66};
67
51struct ttm_placement vmw_vram_sys_placement = { 68struct ttm_placement vmw_vram_sys_placement = {
52 .fpfn = 0, 69 .fpfn = 0,
53 .lpfn = 0, 70 .lpfn = 0,
@@ -77,27 +94,52 @@ struct ttm_placement vmw_sys_placement = {
77 94
78struct vmw_ttm_backend { 95struct vmw_ttm_backend {
79 struct ttm_backend backend; 96 struct ttm_backend backend;
97 struct page **pages;
98 unsigned long num_pages;
99 struct vmw_private *dev_priv;
100 int gmr_id;
80}; 101};
81 102
82static int vmw_ttm_populate(struct ttm_backend *backend, 103static int vmw_ttm_populate(struct ttm_backend *backend,
83 unsigned long num_pages, struct page **pages, 104 unsigned long num_pages, struct page **pages,
84 struct page *dummy_read_page) 105 struct page *dummy_read_page)
85{ 106{
107 struct vmw_ttm_backend *vmw_be =
108 container_of(backend, struct vmw_ttm_backend, backend);
109
110 vmw_be->pages = pages;
111 vmw_be->num_pages = num_pages;
112
86 return 0; 113 return 0;
87} 114}
88 115
89static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) 116static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
90{ 117{
91 return 0; 118 struct vmw_ttm_backend *vmw_be =
119 container_of(backend, struct vmw_ttm_backend, backend);
120
121 vmw_be->gmr_id = bo_mem->start;
122
123 return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
124 vmw_be->num_pages, vmw_be->gmr_id);
92} 125}
93 126
94static int vmw_ttm_unbind(struct ttm_backend *backend) 127static int vmw_ttm_unbind(struct ttm_backend *backend)
95{ 128{
129 struct vmw_ttm_backend *vmw_be =
130 container_of(backend, struct vmw_ttm_backend, backend);
131
132 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
96 return 0; 133 return 0;
97} 134}
98 135
99static void vmw_ttm_clear(struct ttm_backend *backend) 136static void vmw_ttm_clear(struct ttm_backend *backend)
100{ 137{
138 struct vmw_ttm_backend *vmw_be =
139 container_of(backend, struct vmw_ttm_backend, backend);
140
141 vmw_be->pages = NULL;
142 vmw_be->num_pages = 0;
101} 143}
102 144
103static void vmw_ttm_destroy(struct ttm_backend *backend) 145static void vmw_ttm_destroy(struct ttm_backend *backend)
@@ -125,6 +167,7 @@ struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
125 return NULL; 167 return NULL;
126 168
127 vmw_be->backend.func = &vmw_ttm_func; 169 vmw_be->backend.func = &vmw_ttm_func;
170 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
128 171
129 return &vmw_be->backend; 172 return &vmw_be->backend;
130} 173}
@@ -142,15 +185,28 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
142 /* System memory */ 185 /* System memory */
143 186
144 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 187 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
145 man->available_caching = TTM_PL_MASK_CACHING; 188 man->available_caching = TTM_PL_FLAG_CACHED;
146 man->default_caching = TTM_PL_FLAG_CACHED; 189 man->default_caching = TTM_PL_FLAG_CACHED;
147 break; 190 break;
148 case TTM_PL_VRAM: 191 case TTM_PL_VRAM:
149 /* "On-card" video ram */ 192 /* "On-card" video ram */
193 man->func = &ttm_bo_manager_func;
150 man->gpu_offset = 0; 194 man->gpu_offset = 0;
151 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; 195 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
152 man->available_caching = TTM_PL_MASK_CACHING; 196 man->available_caching = TTM_PL_FLAG_CACHED;
153 man->default_caching = TTM_PL_FLAG_WC; 197 man->default_caching = TTM_PL_FLAG_CACHED;
198 break;
199 case VMW_PL_GMR:
200 /*
201 * "Guest Memory Regions" is an aperture like feature with
202 * one slot per bo. There is an upper limit of the number of
203 * slots as well as the bo size.
204 */
205 man->func = &vmw_gmrid_manager_func;
206 man->gpu_offset = 0;
207 man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
208 man->available_caching = TTM_PL_FLAG_CACHED;
209 man->default_caching = TTM_PL_FLAG_CACHED;
154 break; 210 break;
155 default: 211 default:
156 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); 212 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@ -174,18 +230,6 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
174 return 0; 230 return 0;
175} 231}
176 232
177static void vmw_move_notify(struct ttm_buffer_object *bo,
178 struct ttm_mem_reg *new_mem)
179{
180 if (new_mem->mem_type != TTM_PL_SYSTEM)
181 vmw_dmabuf_gmr_unbind(bo);
182}
183
184static void vmw_swap_notify(struct ttm_buffer_object *bo)
185{
186 vmw_dmabuf_gmr_unbind(bo);
187}
188
189static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 233static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
190{ 234{
191 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 235 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -200,10 +244,10 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
200 return -EINVAL; 244 return -EINVAL;
201 switch (mem->mem_type) { 245 switch (mem->mem_type) {
202 case TTM_PL_SYSTEM: 246 case TTM_PL_SYSTEM:
203 /* System memory */ 247 case VMW_PL_GMR:
204 return 0; 248 return 0;
205 case TTM_PL_VRAM: 249 case TTM_PL_VRAM:
206 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; 250 mem->bus.offset = mem->start << PAGE_SHIFT;
207 mem->bus.base = dev_priv->vram_start; 251 mem->bus.base = dev_priv->vram_start;
208 mem->bus.is_iomem = true; 252 mem->bus.is_iomem = true;
209 break; 253 break;
@@ -276,8 +320,8 @@ struct ttm_bo_driver vmw_bo_driver = {
276 .sync_obj_flush = vmw_sync_obj_flush, 320 .sync_obj_flush = vmw_sync_obj_flush,
277 .sync_obj_unref = vmw_sync_obj_unref, 321 .sync_obj_unref = vmw_sync_obj_unref,
278 .sync_obj_ref = vmw_sync_obj_ref, 322 .sync_obj_ref = vmw_sync_obj_ref,
279 .move_notify = vmw_move_notify, 323 .move_notify = NULL,
280 .swap_notify = vmw_swap_notify, 324 .swap_notify = NULL,
281 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, 325 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
282 .io_mem_reserve = &vmw_ttm_io_mem_reserve, 326 .io_mem_reserve = &vmw_ttm_io_mem_reserve,
283 .io_mem_free = &vmw_ttm_io_mem_free, 327 .io_mem_free = &vmw_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2ef93df9e8ae..10ca97ee0206 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -260,13 +260,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
260 idr_init(&dev_priv->context_idr); 260 idr_init(&dev_priv->context_idr);
261 idr_init(&dev_priv->surface_idr); 261 idr_init(&dev_priv->surface_idr);
262 idr_init(&dev_priv->stream_idr); 262 idr_init(&dev_priv->stream_idr);
263 ida_init(&dev_priv->gmr_ida);
264 mutex_init(&dev_priv->init_mutex); 263 mutex_init(&dev_priv->init_mutex);
265 init_waitqueue_head(&dev_priv->fence_queue); 264 init_waitqueue_head(&dev_priv->fence_queue);
266 init_waitqueue_head(&dev_priv->fifo_queue); 265 init_waitqueue_head(&dev_priv->fifo_queue);
267 atomic_set(&dev_priv->fence_queue_waiters, 0); 266 atomic_set(&dev_priv->fence_queue_waiters, 0);
268 atomic_set(&dev_priv->fifo_queue_waiters, 0); 267 atomic_set(&dev_priv->fifo_queue_waiters, 0);
269 INIT_LIST_HEAD(&dev_priv->gmr_lru);
270 268
271 dev_priv->io_start = pci_resource_start(dev->pdev, 0); 269 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
272 dev_priv->vram_start = pci_resource_start(dev->pdev, 1); 270 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
@@ -341,6 +339,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
341 goto out_err2; 339 goto out_err2;
342 } 340 }
343 341
342 dev_priv->has_gmr = true;
343 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
344 dev_priv->max_gmr_ids) != 0) {
345 DRM_INFO("No GMR memory available. "
346 "Graphics memory resources are very limited.\n");
347 dev_priv->has_gmr = false;
348 }
349
344 dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start, 350 dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
345 dev_priv->mmio_size, DRM_MTRR_WC); 351 dev_priv->mmio_size, DRM_MTRR_WC);
346 352
@@ -440,13 +446,14 @@ out_err4:
440out_err3: 446out_err3:
441 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, 447 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
442 dev_priv->mmio_size, DRM_MTRR_WC); 448 dev_priv->mmio_size, DRM_MTRR_WC);
449 if (dev_priv->has_gmr)
450 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
443 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 451 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
444out_err2: 452out_err2:
445 (void)ttm_bo_device_release(&dev_priv->bdev); 453 (void)ttm_bo_device_release(&dev_priv->bdev);
446out_err1: 454out_err1:
447 vmw_ttm_global_release(dev_priv); 455 vmw_ttm_global_release(dev_priv);
448out_err0: 456out_err0:
449 ida_destroy(&dev_priv->gmr_ida);
450 idr_destroy(&dev_priv->surface_idr); 457 idr_destroy(&dev_priv->surface_idr);
451 idr_destroy(&dev_priv->context_idr); 458 idr_destroy(&dev_priv->context_idr);
452 idr_destroy(&dev_priv->stream_idr); 459 idr_destroy(&dev_priv->stream_idr);
@@ -478,10 +485,11 @@ static int vmw_driver_unload(struct drm_device *dev)
478 iounmap(dev_priv->mmio_virt); 485 iounmap(dev_priv->mmio_virt);
479 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, 486 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
480 dev_priv->mmio_size, DRM_MTRR_WC); 487 dev_priv->mmio_size, DRM_MTRR_WC);
488 if (dev_priv->has_gmr)
489 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
481 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 490 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
482 (void)ttm_bo_device_release(&dev_priv->bdev); 491 (void)ttm_bo_device_release(&dev_priv->bdev);
483 vmw_ttm_global_release(dev_priv); 492 vmw_ttm_global_release(dev_priv);
484 ida_destroy(&dev_priv->gmr_ida);
485 idr_destroy(&dev_priv->surface_idr); 493 idr_destroy(&dev_priv->surface_idr);
486 idr_destroy(&dev_priv->context_idr); 494 idr_destroy(&dev_priv->context_idr);
487 idr_destroy(&dev_priv->stream_idr); 495 idr_destroy(&dev_priv->stream_idr);
@@ -597,6 +605,8 @@ static void vmw_lastclose(struct drm_device *dev)
597static void vmw_master_init(struct vmw_master *vmaster) 605static void vmw_master_init(struct vmw_master *vmaster)
598{ 606{
599 ttm_lock_init(&vmaster->lock); 607 ttm_lock_init(&vmaster->lock);
608 INIT_LIST_HEAD(&vmaster->fb_surf);
609 mutex_init(&vmaster->fb_surf_mutex);
600} 610}
601 611
602static int vmw_master_create(struct drm_device *dev, 612static int vmw_master_create(struct drm_device *dev,
@@ -608,7 +618,7 @@ static int vmw_master_create(struct drm_device *dev,
608 if (unlikely(vmaster == NULL)) 618 if (unlikely(vmaster == NULL))
609 return -ENOMEM; 619 return -ENOMEM;
610 620
611 ttm_lock_init(&vmaster->lock); 621 vmw_master_init(vmaster);
612 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 622 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
613 master->driver_priv = vmaster; 623 master->driver_priv = vmaster;
614 624
@@ -699,6 +709,7 @@ static void vmw_master_drop(struct drm_device *dev,
699 709
700 vmw_fp->locked_master = drm_master_get(file_priv->master); 710 vmw_fp->locked_master = drm_master_get(file_priv->master);
701 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); 711 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
712 vmw_kms_idle_workqueues(vmaster);
702 713
703 if (unlikely((ret != 0))) { 714 if (unlikely((ret != 0))) {
704 DRM_ERROR("Unable to lock TTM at VT switch.\n"); 715 DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -751,15 +762,16 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
751 * Buffer contents is moved to swappable memory. 762 * Buffer contents is moved to swappable memory.
752 */ 763 */
753 ttm_bo_swapout_all(&dev_priv->bdev); 764 ttm_bo_swapout_all(&dev_priv->bdev);
765
754 break; 766 break;
755 case PM_POST_HIBERNATION: 767 case PM_POST_HIBERNATION:
756 case PM_POST_SUSPEND: 768 case PM_POST_SUSPEND:
769 case PM_POST_RESTORE:
757 ttm_suspend_unlock(&vmaster->lock); 770 ttm_suspend_unlock(&vmaster->lock);
771
758 break; 772 break;
759 case PM_RESTORE_PREPARE: 773 case PM_RESTORE_PREPARE:
760 break; 774 break;
761 case PM_POST_RESTORE:
762 break;
763 default: 775 default:
764 break; 776 break;
765 } 777 }
@@ -770,21 +782,98 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
770 * These might not be needed with the virtual SVGA device. 782 * These might not be needed with the virtual SVGA device.
771 */ 783 */
772 784
773int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 785static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
774{ 786{
787 struct drm_device *dev = pci_get_drvdata(pdev);
788 struct vmw_private *dev_priv = vmw_priv(dev);
789
790 if (dev_priv->num_3d_resources != 0) {
791 DRM_INFO("Can't suspend or hibernate "
792 "while 3D resources are active.\n");
793 return -EBUSY;
794 }
795
775 pci_save_state(pdev); 796 pci_save_state(pdev);
776 pci_disable_device(pdev); 797 pci_disable_device(pdev);
777 pci_set_power_state(pdev, PCI_D3hot); 798 pci_set_power_state(pdev, PCI_D3hot);
778 return 0; 799 return 0;
779} 800}
780 801
781int vmw_pci_resume(struct pci_dev *pdev) 802static int vmw_pci_resume(struct pci_dev *pdev)
782{ 803{
783 pci_set_power_state(pdev, PCI_D0); 804 pci_set_power_state(pdev, PCI_D0);
784 pci_restore_state(pdev); 805 pci_restore_state(pdev);
785 return pci_enable_device(pdev); 806 return pci_enable_device(pdev);
786} 807}
787 808
809static int vmw_pm_suspend(struct device *kdev)
810{
811 struct pci_dev *pdev = to_pci_dev(kdev);
812 struct pm_message dummy;
813
814 dummy.event = 0;
815
816 return vmw_pci_suspend(pdev, dummy);
817}
818
819static int vmw_pm_resume(struct device *kdev)
820{
821 struct pci_dev *pdev = to_pci_dev(kdev);
822
823 return vmw_pci_resume(pdev);
824}
825
826static int vmw_pm_prepare(struct device *kdev)
827{
828 struct pci_dev *pdev = to_pci_dev(kdev);
829 struct drm_device *dev = pci_get_drvdata(pdev);
830 struct vmw_private *dev_priv = vmw_priv(dev);
831
832 /**
833 * Release 3d reference held by fbdev and potentially
834 * stop fifo.
835 */
836 dev_priv->suspended = true;
837 if (dev_priv->enable_fb)
838 vmw_3d_resource_dec(dev_priv);
839
840 if (dev_priv->num_3d_resources != 0) {
841
842 DRM_INFO("Can't suspend or hibernate "
843 "while 3D resources are active.\n");
844
845 if (dev_priv->enable_fb)
846 vmw_3d_resource_inc(dev_priv);
847 dev_priv->suspended = false;
848 return -EBUSY;
849 }
850
851 return 0;
852}
853
854static void vmw_pm_complete(struct device *kdev)
855{
856 struct pci_dev *pdev = to_pci_dev(kdev);
857 struct drm_device *dev = pci_get_drvdata(pdev);
858 struct vmw_private *dev_priv = vmw_priv(dev);
859
860 /**
861 * Reclaim 3d reference held by fbdev and potentially
862 * start fifo.
863 */
864 if (dev_priv->enable_fb)
865 vmw_3d_resource_inc(dev_priv);
866
867 dev_priv->suspended = false;
868}
869
870static const struct dev_pm_ops vmw_pm_ops = {
871 .prepare = vmw_pm_prepare,
872 .complete = vmw_pm_complete,
873 .suspend = vmw_pm_suspend,
874 .resume = vmw_pm_resume,
875};
876
788static struct drm_driver driver = { 877static struct drm_driver driver = {
789 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 878 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
790 DRIVER_MODESET, 879 DRIVER_MODESET,
@@ -798,8 +887,6 @@ static struct drm_driver driver = {
798 .irq_handler = vmw_irq_handler, 887 .irq_handler = vmw_irq_handler,
799 .get_vblank_counter = vmw_get_vblank_counter, 888 .get_vblank_counter = vmw_get_vblank_counter,
800 .reclaim_buffers_locked = NULL, 889 .reclaim_buffers_locked = NULL,
801 .get_map_ofs = drm_core_get_map_ofs,
802 .get_reg_ofs = drm_core_get_reg_ofs,
803 .ioctls = vmw_ioctls, 890 .ioctls = vmw_ioctls,
804 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), 891 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
805 .dma_quiescent = NULL, /*vmw_dma_quiescent, */ 892 .dma_quiescent = NULL, /*vmw_dma_quiescent, */
@@ -821,15 +908,16 @@ static struct drm_driver driver = {
821 .compat_ioctl = drm_compat_ioctl, 908 .compat_ioctl = drm_compat_ioctl,
822#endif 909#endif
823 .llseek = noop_llseek, 910 .llseek = noop_llseek,
824 }, 911 },
825 .pci_driver = { 912 .pci_driver = {
826 .name = VMWGFX_DRIVER_NAME, 913 .name = VMWGFX_DRIVER_NAME,
827 .id_table = vmw_pci_id_list, 914 .id_table = vmw_pci_id_list,
828 .probe = vmw_probe, 915 .probe = vmw_probe,
829 .remove = vmw_remove, 916 .remove = vmw_remove,
830 .suspend = vmw_pci_suspend, 917 .driver = {
831 .resume = vmw_pci_resume 918 .pm = &vmw_pm_ops
832 }, 919 }
920 },
833 .name = VMWGFX_DRIVER_NAME, 921 .name = VMWGFX_DRIVER_NAME,
834 .desc = VMWGFX_DRIVER_DESC, 922 .desc = VMWGFX_DRIVER_DESC,
835 .date = VMWGFX_DRIVER_DATE, 923 .date = VMWGFX_DRIVER_DATE,
@@ -863,3 +951,7 @@ module_exit(vmwgfx_exit);
863MODULE_AUTHOR("VMware Inc. and others"); 951MODULE_AUTHOR("VMware Inc. and others");
864MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); 952MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
865MODULE_LICENSE("GPL and additional rights"); 953MODULE_LICENSE("GPL and additional rights");
954MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
955 __stringify(VMWGFX_DRIVER_MINOR) "."
956 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
957 "0");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 58de6393f611..e7a58d055041 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -39,9 +39,9 @@
39#include "ttm/ttm_execbuf_util.h" 39#include "ttm/ttm_execbuf_util.h"
40#include "ttm/ttm_module.h" 40#include "ttm/ttm_module.h"
41 41
42#define VMWGFX_DRIVER_DATE "20100209" 42#define VMWGFX_DRIVER_DATE "20100927"
43#define VMWGFX_DRIVER_MAJOR 1 43#define VMWGFX_DRIVER_MAJOR 1
44#define VMWGFX_DRIVER_MINOR 2 44#define VMWGFX_DRIVER_MINOR 4
45#define VMWGFX_DRIVER_PATCHLEVEL 0 45#define VMWGFX_DRIVER_PATCHLEVEL 0
46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -49,6 +49,9 @@
49#define VMWGFX_MAX_GMRS 2048 49#define VMWGFX_MAX_GMRS 2048
50#define VMWGFX_MAX_DISPLAYS 16 50#define VMWGFX_MAX_DISPLAYS 16
51 51
52#define VMW_PL_GMR TTM_PL_PRIV0
53#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
54
52struct vmw_fpriv { 55struct vmw_fpriv {
53 struct drm_master *locked_master; 56 struct drm_master *locked_master;
54 struct ttm_object_file *tfile; 57 struct ttm_object_file *tfile;
@@ -57,8 +60,6 @@ struct vmw_fpriv {
57struct vmw_dma_buffer { 60struct vmw_dma_buffer {
58 struct ttm_buffer_object base; 61 struct ttm_buffer_object base;
59 struct list_head validate_list; 62 struct list_head validate_list;
60 struct list_head gmr_lru;
61 uint32_t gmr_id;
62 bool gmr_bound; 63 bool gmr_bound;
63 uint32_t cur_validate_node; 64 uint32_t cur_validate_node;
64 bool on_validate_list; 65 bool on_validate_list;
@@ -151,6 +152,8 @@ struct vmw_overlay;
151 152
152struct vmw_master { 153struct vmw_master {
153 struct ttm_lock lock; 154 struct ttm_lock lock;
155 struct mutex fb_surf_mutex;
156 struct list_head fb_surf;
154}; 157};
155 158
156struct vmw_vga_topology_state { 159struct vmw_vga_topology_state {
@@ -182,6 +185,7 @@ struct vmw_private {
182 uint32_t capabilities; 185 uint32_t capabilities;
183 uint32_t max_gmr_descriptors; 186 uint32_t max_gmr_descriptors;
184 uint32_t max_gmr_ids; 187 uint32_t max_gmr_ids;
188 bool has_gmr;
185 struct mutex hw_mutex; 189 struct mutex hw_mutex;
186 190
187 /* 191 /*
@@ -264,14 +268,6 @@ struct vmw_private {
264 struct mutex cmdbuf_mutex; 268 struct mutex cmdbuf_mutex;
265 269
266 /** 270 /**
267 * GMR management. Protected by the lru spinlock.
268 */
269
270 struct ida gmr_ida;
271 struct list_head gmr_lru;
272
273
274 /**
275 * Operating mode. 271 * Operating mode.
276 */ 272 */
277 273
@@ -286,6 +282,7 @@ struct vmw_private {
286 struct vmw_master *active_master; 282 struct vmw_master *active_master;
287 struct vmw_master fbdev_master; 283 struct vmw_master fbdev_master;
288 struct notifier_block pm_nb; 284 struct notifier_block pm_nb;
285 bool suspended;
289 286
290 struct mutex release_mutex; 287 struct mutex release_mutex;
291 uint32_t num_3d_resources; 288 uint32_t num_3d_resources;
@@ -331,7 +328,9 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv);
331 */ 328 */
332 329
333extern int vmw_gmr_bind(struct vmw_private *dev_priv, 330extern int vmw_gmr_bind(struct vmw_private *dev_priv,
334 struct ttm_buffer_object *bo); 331 struct page *pages[],
332 unsigned long num_pages,
333 int gmr_id);
335extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); 334extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
336 335
337/** 336/**
@@ -380,14 +379,10 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
380extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); 379extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
381extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 380extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
382 uint32_t id, struct vmw_dma_buffer **out); 381 uint32_t id, struct vmw_dma_buffer **out);
383extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
384extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
385extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
386extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, 382extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
387 struct vmw_dma_buffer *bo); 383 struct vmw_dma_buffer *bo);
388extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, 384extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
389 struct vmw_dma_buffer *bo); 385 struct vmw_dma_buffer *bo);
390extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
391extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 386extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
392 struct drm_file *file_priv); 387 struct drm_file *file_priv);
393extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 388extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -439,6 +434,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
439extern struct ttm_placement vmw_vram_placement; 434extern struct ttm_placement vmw_vram_placement;
440extern struct ttm_placement vmw_vram_ne_placement; 435extern struct ttm_placement vmw_vram_ne_placement;
441extern struct ttm_placement vmw_vram_sys_placement; 436extern struct ttm_placement vmw_vram_sys_placement;
437extern struct ttm_placement vmw_vram_gmr_placement;
442extern struct ttm_placement vmw_sys_placement; 438extern struct ttm_placement vmw_sys_placement;
443extern struct ttm_bo_driver vmw_bo_driver; 439extern struct ttm_bo_driver vmw_bo_driver;
444extern int vmw_dma_quiescent(struct drm_device *dev); 440extern int vmw_dma_quiescent(struct drm_device *dev);
@@ -518,6 +514,10 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
518 unsigned bbp, unsigned depth); 514 unsigned bbp, unsigned depth);
519int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 515int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
520 struct drm_file *file_priv); 516 struct drm_file *file_priv);
517void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
518bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
519 uint32_t pitch,
520 uint32_t height);
521u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); 521u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
522 522
523/** 523/**
@@ -537,6 +537,12 @@ int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
537int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); 537int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
538 538
539/** 539/**
540 * GMR Id manager
541 */
542
543extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
544
545/**
540 * Inline helper functions 546 * Inline helper functions
541 */ 547 */
542 548
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 8e396850513c..51d9f9f1d7f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -538,8 +538,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
538 reloc = &sw_context->relocs[i]; 538 reloc = &sw_context->relocs[i];
539 validate = &sw_context->val_bufs[reloc->index]; 539 validate = &sw_context->val_bufs[reloc->index];
540 bo = validate->bo; 540 bo = validate->bo;
541 reloc->location->offset += bo->offset; 541 if (bo->mem.mem_type == TTM_PL_VRAM) {
542 reloc->location->gmrId = vmw_dmabuf_gmr(bo); 542 reloc->location->offset += bo->offset;
543 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
544 } else
545 reloc->location->gmrId = bo->mem.start;
543 } 546 }
544 vmw_free_relocations(sw_context); 547 vmw_free_relocations(sw_context);
545} 548}
@@ -563,25 +566,14 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
563{ 566{
564 int ret; 567 int ret;
565 568
566 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
567 return 0;
568
569 /** 569 /**
570 * Put BO in VRAM, only if there is space. 570 * Put BO in VRAM if there is space, otherwise as a GMR.
571 * If there is no space in VRAM and GMR ids are all used up,
572 * start evicting GMRs to make room. If the DMA buffer can't be
573 * used as a GMR, this will return -ENOMEM.
571 */ 574 */
572 575
573 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false); 576 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
574 if (unlikely(ret == -ERESTARTSYS))
575 return ret;
576
577 /**
578 * Otherwise, set it up as GMR.
579 */
580
581 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
582 return 0;
583
584 ret = vmw_gmr_bind(dev_priv, bo);
585 if (likely(ret == 0 || ret == -ERESTARTSYS)) 577 if (likely(ret == 0 || ret == -ERESTARTSYS))
586 return ret; 578 return ret;
587 579
@@ -590,6 +582,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
590 * previous contents. 582 * previous contents.
591 */ 583 */
592 584
585 DRM_INFO("Falling through to VRAM.\n");
593 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false); 586 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
594 return ret; 587 return ret;
595} 588}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 409e172f4abf..41d9a5b73c03 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -144,6 +144,13 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
144 return -EINVAL; 144 return -EINVAL;
145 } 145 }
146 146
147 if (!vmw_kms_validate_mode_vram(vmw_priv,
148 info->fix.line_length,
149 var->yoffset + var->yres)) {
150 DRM_ERROR("Requested geom can not fit in framebuffer\n");
151 return -EINVAL;
152 }
153
147 return 0; 154 return 0;
148} 155}
149 156
@@ -205,6 +212,9 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
205 SVGAFifoCmdUpdate body; 212 SVGAFifoCmdUpdate body;
206 } *cmd; 213 } *cmd;
207 214
215 if (vmw_priv->suspended)
216 return;
217
208 spin_lock_irqsave(&par->dirty.lock, flags); 218 spin_lock_irqsave(&par->dirty.lock, flags);
209 if (!par->dirty.active) { 219 if (!par->dirty.active) {
210 spin_unlock_irqrestore(&par->dirty.lock, flags); 220 spin_unlock_irqrestore(&par->dirty.lock, flags);
@@ -616,7 +626,8 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
616 goto err_unlock; 626 goto err_unlock;
617 627
618 if (bo->mem.mem_type == TTM_PL_VRAM && 628 if (bo->mem.mem_type == TTM_PL_VRAM &&
619 bo->mem.mm_node->start < bo->num_pages) 629 bo->mem.start < bo->num_pages &&
630 bo->mem.start > 0)
620 (void) ttm_bo_validate(bo, &vmw_sys_placement, false, 631 (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
621 false, false); 632 false, false);
622 633
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 5f8908a5d7fd..de0c5948521d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -146,7 +146,7 @@ static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
146 */ 146 */
147 147
148static unsigned long vmw_gmr_count_descriptors(struct page *pages[], 148static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
149 unsigned long num_pages) 149 unsigned long num_pages)
150{ 150{
151 unsigned long prev_pfn = ~(0UL); 151 unsigned long prev_pfn = ~(0UL);
152 unsigned long pfn; 152 unsigned long pfn;
@@ -163,45 +163,33 @@ static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
163} 163}
164 164
165int vmw_gmr_bind(struct vmw_private *dev_priv, 165int vmw_gmr_bind(struct vmw_private *dev_priv,
166 struct ttm_buffer_object *bo) 166 struct page *pages[],
167 unsigned long num_pages,
168 int gmr_id)
167{ 169{
168 struct ttm_tt *ttm = bo->ttm;
169 unsigned long descriptors;
170 int ret;
171 uint32_t id;
172 struct list_head desc_pages; 170 struct list_head desc_pages;
171 int ret;
173 172
174 if (!(dev_priv->capabilities & SVGA_CAP_GMR)) 173 if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
175 return -EINVAL; 174 return -EINVAL;
176 175
177 ret = ttm_tt_populate(ttm); 176 if (vmw_gmr_count_descriptors(pages, num_pages) >
178 if (unlikely(ret != 0)) 177 dev_priv->max_gmr_descriptors)
179 return ret;
180
181 descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
182 if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
183 return -EINVAL; 178 return -EINVAL;
184 179
185 INIT_LIST_HEAD(&desc_pages); 180 INIT_LIST_HEAD(&desc_pages);
186 ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
187 ttm->num_pages);
188 if (unlikely(ret != 0))
189 return ret;
190 181
191 ret = vmw_gmr_id_alloc(dev_priv, &id); 182 ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
192 if (unlikely(ret != 0)) 183 if (unlikely(ret != 0))
193 goto out_no_id; 184 return ret;
194 185
195 vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages); 186 vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
196 vmw_gmr_free_descriptors(&desc_pages); 187 vmw_gmr_free_descriptors(&desc_pages);
197 vmw_dmabuf_set_gmr(bo, id);
198 return 0;
199 188
200out_no_id: 189 return 0;
201 vmw_gmr_free_descriptors(&desc_pages);
202 return ret;
203} 190}
204 191
192
205void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) 193void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
206{ 194{
207 mutex_lock(&dev_priv->hw_mutex); 195 mutex_lock(&dev_priv->hw_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
new file mode 100644
index 000000000000..ac6e0d1bd629
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -0,0 +1,137 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "vmwgfx_drv.h"
32#include "ttm/ttm_module.h"
33#include "ttm/ttm_bo_driver.h"
34#include "ttm/ttm_placement.h"
35#include <linux/idr.h>
36#include <linux/spinlock.h>
37#include <linux/kernel.h>
38
39struct vmwgfx_gmrid_man {
40 spinlock_t lock;
41 struct ida gmr_ida;
42 uint32_t max_gmr_ids;
43};
44
45static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
46 struct ttm_buffer_object *bo,
47 struct ttm_placement *placement,
48 struct ttm_mem_reg *mem)
49{
50 struct vmwgfx_gmrid_man *gman =
51 (struct vmwgfx_gmrid_man *)man->priv;
52 int ret;
53 int id;
54
55 mem->mm_node = NULL;
56
57 do {
58 if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0))
59 return -ENOMEM;
60
61 spin_lock(&gman->lock);
62 ret = ida_get_new(&gman->gmr_ida, &id);
63
64 if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
65 ida_remove(&gman->gmr_ida, id);
66 spin_unlock(&gman->lock);
67 return 0;
68 }
69
70 spin_unlock(&gman->lock);
71
72 } while (ret == -EAGAIN);
73
74 if (likely(ret == 0)) {
75 mem->mm_node = gman;
76 mem->start = id;
77 }
78
79 return ret;
80}
81
82static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
83 struct ttm_mem_reg *mem)
84{
85 struct vmwgfx_gmrid_man *gman =
86 (struct vmwgfx_gmrid_man *)man->priv;
87
88 if (mem->mm_node) {
89 spin_lock(&gman->lock);
90 ida_remove(&gman->gmr_ida, mem->start);
91 spin_unlock(&gman->lock);
92 mem->mm_node = NULL;
93 }
94}
95
96static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
97 unsigned long p_size)
98{
99 struct vmwgfx_gmrid_man *gman =
100 kzalloc(sizeof(*gman), GFP_KERNEL);
101
102 if (unlikely(gman == NULL))
103 return -ENOMEM;
104
105 spin_lock_init(&gman->lock);
106 ida_init(&gman->gmr_ida);
107 gman->max_gmr_ids = p_size;
108 man->priv = (void *) gman;
109 return 0;
110}
111
112static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
113{
114 struct vmwgfx_gmrid_man *gman =
115 (struct vmwgfx_gmrid_man *)man->priv;
116
117 if (gman) {
118 ida_destroy(&gman->gmr_ida);
119 kfree(gman);
120 }
121 return 0;
122}
123
124static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
125 const char *prefix)
126{
127 printk(KERN_INFO "%s: No debug info available for the GMR "
128 "id manager.\n", prefix);
129}
130
131const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
132 vmw_gmrid_man_init,
133 vmw_gmrid_man_takedown,
134 vmw_gmrid_man_get_node,
135 vmw_gmrid_man_put_node,
136 vmw_gmrid_man_debug
137};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 1c7a316454d8..570d57775a58 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -54,6 +54,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
54 case DRM_VMW_PARAM_FIFO_CAPS: 54 case DRM_VMW_PARAM_FIFO_CAPS:
55 param->value = dev_priv->fifo.capabilities; 55 param->value = dev_priv->fifo.capabilities;
56 break; 56 break;
57 case DRM_VMW_PARAM_MAX_FB_SIZE:
58 param->value = dev_priv->vram_size;
59 break;
57 default: 60 default:
58 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 61 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
59 param->param); 62 param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index e882ba099f0c..87c6e6156d7d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -332,18 +332,55 @@ struct vmw_framebuffer_surface {
332 struct delayed_work d_work; 332 struct delayed_work d_work;
333 struct mutex work_lock; 333 struct mutex work_lock;
334 bool present_fs; 334 bool present_fs;
335 struct list_head head;
336 struct drm_master *master;
335}; 337};
336 338
339/**
340 * vmw_kms_idle_workqueues - Flush workqueues on this master
341 *
342 * @vmaster - Pointer identifying the master, for the surfaces of which
343 * we idle the dirty work queues.
344 *
345 * This function should be called with the ttm lock held in exclusive mode
346 * to idle all dirty work queues before the fifo is taken down.
347 *
348 * The work task may actually requeue itself, but after the flush returns we're
349 * sure that there's nothing to present, since the ttm lock is held in
350 * exclusive mode, so the fifo will never get used.
351 */
352
353void vmw_kms_idle_workqueues(struct vmw_master *vmaster)
354{
355 struct vmw_framebuffer_surface *entry;
356
357 mutex_lock(&vmaster->fb_surf_mutex);
358 list_for_each_entry(entry, &vmaster->fb_surf, head) {
359 if (cancel_delayed_work_sync(&entry->d_work))
360 (void) entry->d_work.work.func(&entry->d_work.work);
361
362 (void) cancel_delayed_work_sync(&entry->d_work);
363 }
364 mutex_unlock(&vmaster->fb_surf_mutex);
365}
366
337void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) 367void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
338{ 368{
339 struct vmw_framebuffer_surface *vfb = 369 struct vmw_framebuffer_surface *vfbs =
340 vmw_framebuffer_to_vfbs(framebuffer); 370 vmw_framebuffer_to_vfbs(framebuffer);
371 struct vmw_master *vmaster = vmw_master(vfbs->master);
372
341 373
342 cancel_delayed_work_sync(&vfb->d_work); 374 mutex_lock(&vmaster->fb_surf_mutex);
375 list_del(&vfbs->head);
376 mutex_unlock(&vmaster->fb_surf_mutex);
377
378 cancel_delayed_work_sync(&vfbs->d_work);
379 drm_master_put(&vfbs->master);
343 drm_framebuffer_cleanup(framebuffer); 380 drm_framebuffer_cleanup(framebuffer);
344 vmw_surface_unreference(&vfb->surface); 381 vmw_surface_unreference(&vfbs->surface);
345 382
346 kfree(framebuffer); 383 kfree(vfbs);
347} 384}
348 385
349static void vmw_framebuffer_present_fs_callback(struct work_struct *work) 386static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
@@ -362,6 +399,12 @@ static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
362 SVGA3dCopyRect cr; 399 SVGA3dCopyRect cr;
363 } *cmd; 400 } *cmd;
364 401
402 /**
403 * Strictly we should take the ttm_lock in read mode before accessing
404 * the fifo, to make sure the fifo is present and up. However,
405 * instead we flush all workqueues under the ttm lock in exclusive mode
406 * before taking down the fifo.
407 */
365 mutex_lock(&vfbs->work_lock); 408 mutex_lock(&vfbs->work_lock);
366 if (!vfbs->present_fs) 409 if (!vfbs->present_fs)
367 goto out_unlock; 410 goto out_unlock;
@@ -392,17 +435,20 @@ out_unlock:
392 435
393 436
394int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, 437int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
438 struct drm_file *file_priv,
395 unsigned flags, unsigned color, 439 unsigned flags, unsigned color,
396 struct drm_clip_rect *clips, 440 struct drm_clip_rect *clips,
397 unsigned num_clips) 441 unsigned num_clips)
398{ 442{
399 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 443 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
444 struct vmw_master *vmaster = vmw_master(file_priv->master);
400 struct vmw_framebuffer_surface *vfbs = 445 struct vmw_framebuffer_surface *vfbs =
401 vmw_framebuffer_to_vfbs(framebuffer); 446 vmw_framebuffer_to_vfbs(framebuffer);
402 struct vmw_surface *surf = vfbs->surface; 447 struct vmw_surface *surf = vfbs->surface;
403 struct drm_clip_rect norect; 448 struct drm_clip_rect norect;
404 SVGA3dCopyRect *cr; 449 SVGA3dCopyRect *cr;
405 int i, inc = 1; 450 int i, inc = 1;
451 int ret;
406 452
407 struct { 453 struct {
408 SVGA3dCmdHeader header; 454 SVGA3dCmdHeader header;
@@ -410,6 +456,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
410 SVGA3dCopyRect cr; 456 SVGA3dCopyRect cr;
411 } *cmd; 457 } *cmd;
412 458
459 if (unlikely(vfbs->master != file_priv->master))
460 return -EINVAL;
461
462 ret = ttm_read_lock(&vmaster->lock, true);
463 if (unlikely(ret != 0))
464 return ret;
465
413 if (!num_clips || 466 if (!num_clips ||
414 !(dev_priv->fifo.capabilities & 467 !(dev_priv->fifo.capabilities &
415 SVGA_FIFO_CAP_SCREEN_OBJECT)) { 468 SVGA_FIFO_CAP_SCREEN_OBJECT)) {
@@ -425,6 +478,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
425 */ 478 */
426 vmw_framebuffer_present_fs_callback(&vfbs->d_work.work); 479 vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
427 } 480 }
481 ttm_read_unlock(&vmaster->lock);
428 return 0; 482 return 0;
429 } 483 }
430 484
@@ -442,6 +496,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
442 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); 496 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
443 if (unlikely(cmd == NULL)) { 497 if (unlikely(cmd == NULL)) {
444 DRM_ERROR("Fifo reserve failed.\n"); 498 DRM_ERROR("Fifo reserve failed.\n");
499 ttm_read_unlock(&vmaster->lock);
445 return -ENOMEM; 500 return -ENOMEM;
446 } 501 }
447 502
@@ -461,7 +516,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
461 } 516 }
462 517
463 vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); 518 vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
464 519 ttm_read_unlock(&vmaster->lock);
465 return 0; 520 return 0;
466} 521}
467 522
@@ -471,16 +526,57 @@ static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
471 .create_handle = vmw_framebuffer_create_handle, 526 .create_handle = vmw_framebuffer_create_handle,
472}; 527};
473 528
474int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, 529static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
475 struct vmw_surface *surface, 530 struct drm_file *file_priv,
476 struct vmw_framebuffer **out, 531 struct vmw_surface *surface,
477 unsigned width, unsigned height) 532 struct vmw_framebuffer **out,
533 const struct drm_mode_fb_cmd
534 *mode_cmd)
478 535
479{ 536{
480 struct drm_device *dev = dev_priv->dev; 537 struct drm_device *dev = dev_priv->dev;
481 struct vmw_framebuffer_surface *vfbs; 538 struct vmw_framebuffer_surface *vfbs;
539 enum SVGA3dSurfaceFormat format;
540 struct vmw_master *vmaster = vmw_master(file_priv->master);
482 int ret; 541 int ret;
483 542
543 /*
544 * Sanity checks.
545 */
546
547 if (unlikely(surface->mip_levels[0] != 1 ||
548 surface->num_sizes != 1 ||
549 surface->sizes[0].width < mode_cmd->width ||
550 surface->sizes[0].height < mode_cmd->height ||
551 surface->sizes[0].depth != 1)) {
552 DRM_ERROR("Incompatible surface dimensions "
553 "for requested mode.\n");
554 return -EINVAL;
555 }
556
557 switch (mode_cmd->depth) {
558 case 32:
559 format = SVGA3D_A8R8G8B8;
560 break;
561 case 24:
562 format = SVGA3D_X8R8G8B8;
563 break;
564 case 16:
565 format = SVGA3D_R5G6B5;
566 break;
567 case 15:
568 format = SVGA3D_A1R5G5B5;
569 break;
570 default:
571 DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
572 return -EINVAL;
573 }
574
575 if (unlikely(format != surface->format)) {
576 DRM_ERROR("Invalid surface format for requested mode.\n");
577 return -EINVAL;
578 }
579
484 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); 580 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
485 if (!vfbs) { 581 if (!vfbs) {
486 ret = -ENOMEM; 582 ret = -ENOMEM;
@@ -498,16 +594,22 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
498 } 594 }
499 595
500 /* XXX get the first 3 from the surface info */ 596 /* XXX get the first 3 from the surface info */
501 vfbs->base.base.bits_per_pixel = 32; 597 vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
502 vfbs->base.base.pitch = width * 32 / 4; 598 vfbs->base.base.pitch = mode_cmd->pitch;
503 vfbs->base.base.depth = 24; 599 vfbs->base.base.depth = mode_cmd->depth;
504 vfbs->base.base.width = width; 600 vfbs->base.base.width = mode_cmd->width;
505 vfbs->base.base.height = height; 601 vfbs->base.base.height = mode_cmd->height;
506 vfbs->base.pin = &vmw_surface_dmabuf_pin; 602 vfbs->base.pin = &vmw_surface_dmabuf_pin;
507 vfbs->base.unpin = &vmw_surface_dmabuf_unpin; 603 vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
508 vfbs->surface = surface; 604 vfbs->surface = surface;
605 vfbs->master = drm_master_get(file_priv->master);
509 mutex_init(&vfbs->work_lock); 606 mutex_init(&vfbs->work_lock);
607
608 mutex_lock(&vmaster->fb_surf_mutex);
510 INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); 609 INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
610 list_add_tail(&vfbs->head, &vmaster->fb_surf);
611 mutex_unlock(&vmaster->fb_surf_mutex);
612
511 *out = &vfbs->base; 613 *out = &vfbs->base;
512 614
513 return 0; 615 return 0;
@@ -544,18 +646,25 @@ void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
544} 646}
545 647
546int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, 648int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
649 struct drm_file *file_priv,
547 unsigned flags, unsigned color, 650 unsigned flags, unsigned color,
548 struct drm_clip_rect *clips, 651 struct drm_clip_rect *clips,
549 unsigned num_clips) 652 unsigned num_clips)
550{ 653{
551 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 654 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
655 struct vmw_master *vmaster = vmw_master(file_priv->master);
552 struct drm_clip_rect norect; 656 struct drm_clip_rect norect;
657 int ret;
553 struct { 658 struct {
554 uint32_t header; 659 uint32_t header;
555 SVGAFifoCmdUpdate body; 660 SVGAFifoCmdUpdate body;
556 } *cmd; 661 } *cmd;
557 int i, increment = 1; 662 int i, increment = 1;
558 663
664 ret = ttm_read_lock(&vmaster->lock, true);
665 if (unlikely(ret != 0))
666 return ret;
667
559 if (!num_clips) { 668 if (!num_clips) {
560 num_clips = 1; 669 num_clips = 1;
561 clips = &norect; 670 clips = &norect;
@@ -570,6 +679,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
570 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); 679 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
571 if (unlikely(cmd == NULL)) { 680 if (unlikely(cmd == NULL)) {
572 DRM_ERROR("Fifo reserve failed.\n"); 681 DRM_ERROR("Fifo reserve failed.\n");
682 ttm_read_unlock(&vmaster->lock);
573 return -ENOMEM; 683 return -ENOMEM;
574 } 684 }
575 685
@@ -582,6 +692,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
582 } 692 }
583 693
584 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); 694 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
695 ttm_read_unlock(&vmaster->lock);
585 696
586 return 0; 697 return 0;
587} 698}
@@ -659,16 +770,25 @@ static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
659 return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer); 770 return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
660} 771}
661 772
662int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, 773static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
663 struct vmw_dma_buffer *dmabuf, 774 struct vmw_dma_buffer *dmabuf,
664 struct vmw_framebuffer **out, 775 struct vmw_framebuffer **out,
665 unsigned width, unsigned height) 776 const struct drm_mode_fb_cmd
777 *mode_cmd)
666 778
667{ 779{
668 struct drm_device *dev = dev_priv->dev; 780 struct drm_device *dev = dev_priv->dev;
669 struct vmw_framebuffer_dmabuf *vfbd; 781 struct vmw_framebuffer_dmabuf *vfbd;
782 unsigned int requested_size;
670 int ret; 783 int ret;
671 784
785 requested_size = mode_cmd->height * mode_cmd->pitch;
786 if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
787 DRM_ERROR("Screen buffer object size is too small "
788 "for requested mode.\n");
789 return -EINVAL;
790 }
791
672 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); 792 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
673 if (!vfbd) { 793 if (!vfbd) {
674 ret = -ENOMEM; 794 ret = -ENOMEM;
@@ -685,12 +805,11 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
685 goto out_err3; 805 goto out_err3;
686 } 806 }
687 807
688 /* XXX get the first 3 from the surface info */ 808 vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
689 vfbd->base.base.bits_per_pixel = 32; 809 vfbd->base.base.pitch = mode_cmd->pitch;
690 vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8; 810 vfbd->base.base.depth = mode_cmd->depth;
691 vfbd->base.base.depth = 24; 811 vfbd->base.base.width = mode_cmd->width;
692 vfbd->base.base.width = width; 812 vfbd->base.base.height = mode_cmd->height;
693 vfbd->base.base.height = height;
694 vfbd->base.pin = vmw_framebuffer_dmabuf_pin; 813 vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
695 vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; 814 vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
696 vfbd->buffer = dmabuf; 815 vfbd->buffer = dmabuf;
@@ -719,8 +838,25 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
719 struct vmw_framebuffer *vfb = NULL; 838 struct vmw_framebuffer *vfb = NULL;
720 struct vmw_surface *surface = NULL; 839 struct vmw_surface *surface = NULL;
721 struct vmw_dma_buffer *bo = NULL; 840 struct vmw_dma_buffer *bo = NULL;
841 u64 required_size;
722 int ret; 842 int ret;
723 843
844 /**
845 * This code should be conditioned on Screen Objects not being used.
846 * If screen objects are used, we can allocate a GMR to hold the
847 * requested framebuffer.
848 */
849
850 required_size = mode_cmd->pitch * mode_cmd->height;
851 if (unlikely(required_size > (u64) dev_priv->vram_size)) {
852 DRM_ERROR("VRAM size is too small for requested mode.\n");
853 return NULL;
854 }
855
856 /**
857 * End conditioned code.
858 */
859
724 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, 860 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
725 mode_cmd->handle, &surface); 861 mode_cmd->handle, &surface);
726 if (ret) 862 if (ret)
@@ -729,8 +865,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
729 if (!surface->scanout) 865 if (!surface->scanout)
730 goto err_not_scanout; 866 goto err_not_scanout;
731 867
732 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, 868 ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
733 mode_cmd->width, mode_cmd->height); 869 &vfb, mode_cmd);
734 870
735 /* vmw_user_surface_lookup takes one ref so does new_fb */ 871 /* vmw_user_surface_lookup takes one ref so does new_fb */
736 vmw_surface_unreference(&surface); 872 vmw_surface_unreference(&surface);
@@ -751,7 +887,7 @@ try_dmabuf:
751 } 887 }
752 888
753 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, 889 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
754 mode_cmd->width, mode_cmd->height); 890 mode_cmd);
755 891
756 /* vmw_user_dmabuf_lookup takes one ref so does new_fb */ 892 /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
757 vmw_dmabuf_unreference(&bo); 893 vmw_dmabuf_unreference(&bo);
@@ -889,6 +1025,9 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
889 vmw_priv->num_displays = vmw_read(vmw_priv, 1025 vmw_priv->num_displays = vmw_read(vmw_priv,
890 SVGA_REG_NUM_GUEST_DISPLAYS); 1026 SVGA_REG_NUM_GUEST_DISPLAYS);
891 1027
1028 if (vmw_priv->num_displays == 0)
1029 vmw_priv->num_displays = 1;
1030
892 for (i = 0; i < vmw_priv->num_displays; ++i) { 1031 for (i = 0; i < vmw_priv->num_displays; ++i) {
893 save = &vmw_priv->vga_save[i]; 1032 save = &vmw_priv->vga_save[i];
894 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); 1033 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
@@ -997,6 +1136,13 @@ out_unlock:
997 return ret; 1136 return ret;
998} 1137}
999 1138
1139bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1140 uint32_t pitch,
1141 uint32_t height)
1142{
1143 return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
1144}
1145
1000u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) 1146u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
1001{ 1147{
1002 return 0; 1148 return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 11cb39e3accb..a01c47ddb5bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -427,7 +427,9 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
427{ 427{
428 struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector); 428 struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector);
429 struct drm_device *dev = connector->dev; 429 struct drm_device *dev = connector->dev;
430 struct vmw_private *dev_priv = vmw_priv(dev);
430 struct drm_display_mode *mode = NULL; 431 struct drm_display_mode *mode = NULL;
432 struct drm_display_mode *bmode;
431 struct drm_display_mode prefmode = { DRM_MODE("preferred", 433 struct drm_display_mode prefmode = { DRM_MODE("preferred",
432 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 434 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
433 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 435 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -443,22 +445,30 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
443 mode->hdisplay = ldu->pref_width; 445 mode->hdisplay = ldu->pref_width;
444 mode->vdisplay = ldu->pref_height; 446 mode->vdisplay = ldu->pref_height;
445 mode->vrefresh = drm_mode_vrefresh(mode); 447 mode->vrefresh = drm_mode_vrefresh(mode);
446 drm_mode_probed_add(connector, mode); 448 if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
449 mode->vdisplay)) {
450 drm_mode_probed_add(connector, mode);
447 451
448 if (ldu->pref_mode) { 452 if (ldu->pref_mode) {
449 list_del_init(&ldu->pref_mode->head); 453 list_del_init(&ldu->pref_mode->head);
450 drm_mode_destroy(dev, ldu->pref_mode); 454 drm_mode_destroy(dev, ldu->pref_mode);
451 } 455 }
452 456
453 ldu->pref_mode = mode; 457 ldu->pref_mode = mode;
458 }
454 } 459 }
455 460
456 for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { 461 for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
457 if (vmw_ldu_connector_builtin[i].hdisplay > max_width || 462 bmode = &vmw_ldu_connector_builtin[i];
458 vmw_ldu_connector_builtin[i].vdisplay > max_height) 463 if (bmode->hdisplay > max_width ||
464 bmode->vdisplay > max_height)
465 continue;
466
467 if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
468 bmode->vdisplay))
459 continue; 469 continue;
460 470
461 mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]); 471 mode = drm_mode_duplicate(dev, bmode);
462 if (!mode) 472 if (!mode)
463 return 0; 473 return 0;
464 mode->vrefresh = drm_mode_vrefresh(mode); 474 mode->vrefresh = drm_mode_vrefresh(mode);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c8c40e9979db..36e129f0023f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -765,28 +765,11 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
765 return bo_user_size + page_array_size; 765 return bo_user_size + page_array_size;
766} 766}
767 767
768void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
769{
770 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
771 struct ttm_bo_global *glob = bo->glob;
772 struct vmw_private *dev_priv =
773 container_of(bo->bdev, struct vmw_private, bdev);
774
775 if (vmw_bo->gmr_bound) {
776 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
777 spin_lock(&glob->lru_lock);
778 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
779 spin_unlock(&glob->lru_lock);
780 vmw_bo->gmr_bound = false;
781 }
782}
783
784void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 768void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
785{ 769{
786 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 770 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
787 struct ttm_bo_global *glob = bo->glob; 771 struct ttm_bo_global *glob = bo->glob;
788 772
789 vmw_dmabuf_gmr_unbind(bo);
790 ttm_mem_global_free(glob->mem_glob, bo->acc_size); 773 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
791 kfree(vmw_bo); 774 kfree(vmw_bo);
792} 775}
@@ -818,10 +801,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
818 801
819 memset(vmw_bo, 0, sizeof(*vmw_bo)); 802 memset(vmw_bo, 0, sizeof(*vmw_bo));
820 803
821 INIT_LIST_HEAD(&vmw_bo->gmr_lru);
822 INIT_LIST_HEAD(&vmw_bo->validate_list); 804 INIT_LIST_HEAD(&vmw_bo->validate_list);
823 vmw_bo->gmr_id = 0;
824 vmw_bo->gmr_bound = false;
825 805
826 ret = ttm_bo_init(bdev, &vmw_bo->base, size, 806 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
827 ttm_bo_type_device, placement, 807 ttm_bo_type_device, placement,
@@ -835,7 +815,6 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
835 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 815 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
836 struct ttm_bo_global *glob = bo->glob; 816 struct ttm_bo_global *glob = bo->glob;
837 817
838 vmw_dmabuf_gmr_unbind(bo);
839 ttm_mem_global_free(glob->mem_glob, bo->acc_size); 818 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
840 kfree(vmw_user_bo); 819 kfree(vmw_user_bo);
841} 820}
@@ -938,25 +917,6 @@ void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
938 vmw_bo->on_validate_list = false; 917 vmw_bo->on_validate_list = false;
939} 918}
940 919
941uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
942{
943 struct vmw_dma_buffer *vmw_bo;
944
945 if (bo->mem.mem_type == TTM_PL_VRAM)
946 return SVGA_GMR_FRAMEBUFFER;
947
948 vmw_bo = vmw_dma_buffer(bo);
949
950 return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
951}
952
953void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
954{
955 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
956 vmw_bo->gmr_bound = true;
957 vmw_bo->gmr_id = id;
958}
959
960int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 920int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
961 uint32_t handle, struct vmw_dma_buffer **out) 921 uint32_t handle, struct vmw_dma_buffer **out)
962{ 922{
@@ -985,41 +945,6 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
985 return 0; 945 return 0;
986} 946}
987 947
988/**
989 * TODO: Implement a gmr id eviction mechanism. Currently we just fail
990 * when we're out of ids, causing GMR space to be allocated
991 * out of VRAM.
992 */
993
994int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
995{
996 struct ttm_bo_global *glob = dev_priv->bdev.glob;
997 int id;
998 int ret;
999
1000 do {
1001 if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
1002 return -ENOMEM;
1003
1004 spin_lock(&glob->lru_lock);
1005 ret = ida_get_new(&dev_priv->gmr_ida, &id);
1006 spin_unlock(&glob->lru_lock);
1007 } while (ret == -EAGAIN);
1008
1009 if (unlikely(ret != 0))
1010 return ret;
1011
1012 if (unlikely(id >= dev_priv->max_gmr_ids)) {
1013 spin_lock(&glob->lru_lock);
1014 ida_remove(&dev_priv->gmr_ida, id);
1015 spin_unlock(&glob->lru_lock);
1016 return -EBUSY;
1017 }
1018
1019 *p_id = (uint32_t) id;
1020 return 0;
1021}
1022
1023/* 948/*
1024 * Stream management 949 * Stream management
1025 */ 950 */