diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/Makefile | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 87 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 138 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 41 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | 38 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 137 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 207 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 89 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c | 2 |
15 files changed, 597 insertions, 235 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 4505e17df3f5..c9281a1b1d3b 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile | |||
@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm | |||
4 | vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ | 4 | vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ |
5 | vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ | 5 | vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ |
6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ | 6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ |
7 | vmwgfx_overlay.o vmwgfx_fence.o | 7 | vmwgfx_overlay.o vmwgfx_fence.o vmwgfx_gmrid_manager.o |
8 | 8 | ||
9 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o | 9 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index c4f5114aee7c..87e43e0733bf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -39,6 +39,9 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | | |||
39 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | | 39 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | |
40 | TTM_PL_FLAG_CACHED; | 40 | TTM_PL_FLAG_CACHED; |
41 | 41 | ||
42 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | | ||
43 | TTM_PL_FLAG_CACHED; | ||
44 | |||
42 | struct ttm_placement vmw_vram_placement = { | 45 | struct ttm_placement vmw_vram_placement = { |
43 | .fpfn = 0, | 46 | .fpfn = 0, |
44 | .lpfn = 0, | 47 | .lpfn = 0, |
@@ -48,6 +51,20 @@ struct ttm_placement vmw_vram_placement = { | |||
48 | .busy_placement = &vram_placement_flags | 51 | .busy_placement = &vram_placement_flags |
49 | }; | 52 | }; |
50 | 53 | ||
54 | static uint32_t vram_gmr_placement_flags[] = { | ||
55 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, | ||
56 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | ||
57 | }; | ||
58 | |||
59 | struct ttm_placement vmw_vram_gmr_placement = { | ||
60 | .fpfn = 0, | ||
61 | .lpfn = 0, | ||
62 | .num_placement = 2, | ||
63 | .placement = vram_gmr_placement_flags, | ||
64 | .num_busy_placement = 1, | ||
65 | .busy_placement = &gmr_placement_flags | ||
66 | }; | ||
67 | |||
51 | struct ttm_placement vmw_vram_sys_placement = { | 68 | struct ttm_placement vmw_vram_sys_placement = { |
52 | .fpfn = 0, | 69 | .fpfn = 0, |
53 | .lpfn = 0, | 70 | .lpfn = 0, |
@@ -77,27 +94,53 @@ struct ttm_placement vmw_sys_placement = { | |||
77 | 94 | ||
78 | struct vmw_ttm_backend { | 95 | struct vmw_ttm_backend { |
79 | struct ttm_backend backend; | 96 | struct ttm_backend backend; |
97 | struct page **pages; | ||
98 | unsigned long num_pages; | ||
99 | struct vmw_private *dev_priv; | ||
100 | int gmr_id; | ||
80 | }; | 101 | }; |
81 | 102 | ||
82 | static int vmw_ttm_populate(struct ttm_backend *backend, | 103 | static int vmw_ttm_populate(struct ttm_backend *backend, |
83 | unsigned long num_pages, struct page **pages, | 104 | unsigned long num_pages, struct page **pages, |
84 | struct page *dummy_read_page) | 105 | struct page *dummy_read_page, |
106 | dma_addr_t *dma_addrs) | ||
85 | { | 107 | { |
108 | struct vmw_ttm_backend *vmw_be = | ||
109 | container_of(backend, struct vmw_ttm_backend, backend); | ||
110 | |||
111 | vmw_be->pages = pages; | ||
112 | vmw_be->num_pages = num_pages; | ||
113 | |||
86 | return 0; | 114 | return 0; |
87 | } | 115 | } |
88 | 116 | ||
89 | static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) | 117 | static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) |
90 | { | 118 | { |
91 | return 0; | 119 | struct vmw_ttm_backend *vmw_be = |
120 | container_of(backend, struct vmw_ttm_backend, backend); | ||
121 | |||
122 | vmw_be->gmr_id = bo_mem->start; | ||
123 | |||
124 | return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages, | ||
125 | vmw_be->num_pages, vmw_be->gmr_id); | ||
92 | } | 126 | } |
93 | 127 | ||
94 | static int vmw_ttm_unbind(struct ttm_backend *backend) | 128 | static int vmw_ttm_unbind(struct ttm_backend *backend) |
95 | { | 129 | { |
130 | struct vmw_ttm_backend *vmw_be = | ||
131 | container_of(backend, struct vmw_ttm_backend, backend); | ||
132 | |||
133 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); | ||
96 | return 0; | 134 | return 0; |
97 | } | 135 | } |
98 | 136 | ||
99 | static void vmw_ttm_clear(struct ttm_backend *backend) | 137 | static void vmw_ttm_clear(struct ttm_backend *backend) |
100 | { | 138 | { |
139 | struct vmw_ttm_backend *vmw_be = | ||
140 | container_of(backend, struct vmw_ttm_backend, backend); | ||
141 | |||
142 | vmw_be->pages = NULL; | ||
143 | vmw_be->num_pages = 0; | ||
101 | } | 144 | } |
102 | 145 | ||
103 | static void vmw_ttm_destroy(struct ttm_backend *backend) | 146 | static void vmw_ttm_destroy(struct ttm_backend *backend) |
@@ -125,6 +168,7 @@ struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev) | |||
125 | return NULL; | 168 | return NULL; |
126 | 169 | ||
127 | vmw_be->backend.func = &vmw_ttm_func; | 170 | vmw_be->backend.func = &vmw_ttm_func; |
171 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); | ||
128 | 172 | ||
129 | return &vmw_be->backend; | 173 | return &vmw_be->backend; |
130 | } | 174 | } |
@@ -142,15 +186,28 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
142 | /* System memory */ | 186 | /* System memory */ |
143 | 187 | ||
144 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | 188 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
145 | man->available_caching = TTM_PL_MASK_CACHING; | 189 | man->available_caching = TTM_PL_FLAG_CACHED; |
146 | man->default_caching = TTM_PL_FLAG_CACHED; | 190 | man->default_caching = TTM_PL_FLAG_CACHED; |
147 | break; | 191 | break; |
148 | case TTM_PL_VRAM: | 192 | case TTM_PL_VRAM: |
149 | /* "On-card" video ram */ | 193 | /* "On-card" video ram */ |
194 | man->func = &ttm_bo_manager_func; | ||
150 | man->gpu_offset = 0; | 195 | man->gpu_offset = 0; |
151 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; | 196 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; |
152 | man->available_caching = TTM_PL_MASK_CACHING; | 197 | man->available_caching = TTM_PL_FLAG_CACHED; |
153 | man->default_caching = TTM_PL_FLAG_WC; | 198 | man->default_caching = TTM_PL_FLAG_CACHED; |
199 | break; | ||
200 | case VMW_PL_GMR: | ||
201 | /* | ||
202 | * "Guest Memory Regions" is an aperture like feature with | ||
203 | * one slot per bo. There is an upper limit of the number of | ||
204 | * slots as well as the bo size. | ||
205 | */ | ||
206 | man->func = &vmw_gmrid_manager_func; | ||
207 | man->gpu_offset = 0; | ||
208 | man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE; | ||
209 | man->available_caching = TTM_PL_FLAG_CACHED; | ||
210 | man->default_caching = TTM_PL_FLAG_CACHED; | ||
154 | break; | 211 | break; |
155 | default: | 212 | default: |
156 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | 213 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); |
@@ -174,18 +231,6 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |||
174 | return 0; | 231 | return 0; |
175 | } | 232 | } |
176 | 233 | ||
177 | static void vmw_move_notify(struct ttm_buffer_object *bo, | ||
178 | struct ttm_mem_reg *new_mem) | ||
179 | { | ||
180 | if (new_mem->mem_type != TTM_PL_SYSTEM) | ||
181 | vmw_dmabuf_gmr_unbind(bo); | ||
182 | } | ||
183 | |||
184 | static void vmw_swap_notify(struct ttm_buffer_object *bo) | ||
185 | { | ||
186 | vmw_dmabuf_gmr_unbind(bo); | ||
187 | } | ||
188 | |||
189 | static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 234 | static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
190 | { | 235 | { |
191 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | 236 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
@@ -200,10 +245,10 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg | |||
200 | return -EINVAL; | 245 | return -EINVAL; |
201 | switch (mem->mem_type) { | 246 | switch (mem->mem_type) { |
202 | case TTM_PL_SYSTEM: | 247 | case TTM_PL_SYSTEM: |
203 | /* System memory */ | 248 | case VMW_PL_GMR: |
204 | return 0; | 249 | return 0; |
205 | case TTM_PL_VRAM: | 250 | case TTM_PL_VRAM: |
206 | mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; | 251 | mem->bus.offset = mem->start << PAGE_SHIFT; |
207 | mem->bus.base = dev_priv->vram_start; | 252 | mem->bus.base = dev_priv->vram_start; |
208 | mem->bus.is_iomem = true; | 253 | mem->bus.is_iomem = true; |
209 | break; | 254 | break; |
@@ -276,8 +321,8 @@ struct ttm_bo_driver vmw_bo_driver = { | |||
276 | .sync_obj_flush = vmw_sync_obj_flush, | 321 | .sync_obj_flush = vmw_sync_obj_flush, |
277 | .sync_obj_unref = vmw_sync_obj_unref, | 322 | .sync_obj_unref = vmw_sync_obj_unref, |
278 | .sync_obj_ref = vmw_sync_obj_ref, | 323 | .sync_obj_ref = vmw_sync_obj_ref, |
279 | .move_notify = vmw_move_notify, | 324 | .move_notify = NULL, |
280 | .swap_notify = vmw_swap_notify, | 325 | .swap_notify = NULL, |
281 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, | 326 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
282 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, | 327 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, |
283 | .io_mem_free = &vmw_ttm_io_mem_free, | 328 | .io_mem_free = &vmw_ttm_io_mem_free, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index a96ed6d9d010..96949b93d920 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -260,13 +260,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
260 | idr_init(&dev_priv->context_idr); | 260 | idr_init(&dev_priv->context_idr); |
261 | idr_init(&dev_priv->surface_idr); | 261 | idr_init(&dev_priv->surface_idr); |
262 | idr_init(&dev_priv->stream_idr); | 262 | idr_init(&dev_priv->stream_idr); |
263 | ida_init(&dev_priv->gmr_ida); | ||
264 | mutex_init(&dev_priv->init_mutex); | 263 | mutex_init(&dev_priv->init_mutex); |
265 | init_waitqueue_head(&dev_priv->fence_queue); | 264 | init_waitqueue_head(&dev_priv->fence_queue); |
266 | init_waitqueue_head(&dev_priv->fifo_queue); | 265 | init_waitqueue_head(&dev_priv->fifo_queue); |
267 | atomic_set(&dev_priv->fence_queue_waiters, 0); | 266 | atomic_set(&dev_priv->fence_queue_waiters, 0); |
268 | atomic_set(&dev_priv->fifo_queue_waiters, 0); | 267 | atomic_set(&dev_priv->fifo_queue_waiters, 0); |
269 | INIT_LIST_HEAD(&dev_priv->gmr_lru); | ||
270 | 268 | ||
271 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); | 269 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); |
272 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); | 270 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); |
@@ -341,6 +339,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
341 | goto out_err2; | 339 | goto out_err2; |
342 | } | 340 | } |
343 | 341 | ||
342 | dev_priv->has_gmr = true; | ||
343 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | ||
344 | dev_priv->max_gmr_ids) != 0) { | ||
345 | DRM_INFO("No GMR memory available. " | ||
346 | "Graphics memory resources are very limited.\n"); | ||
347 | dev_priv->has_gmr = false; | ||
348 | } | ||
349 | |||
344 | dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start, | 350 | dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start, |
345 | dev_priv->mmio_size, DRM_MTRR_WC); | 351 | dev_priv->mmio_size, DRM_MTRR_WC); |
346 | 352 | ||
@@ -440,13 +446,14 @@ out_err4: | |||
440 | out_err3: | 446 | out_err3: |
441 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, | 447 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, |
442 | dev_priv->mmio_size, DRM_MTRR_WC); | 448 | dev_priv->mmio_size, DRM_MTRR_WC); |
449 | if (dev_priv->has_gmr) | ||
450 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
443 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | 451 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
444 | out_err2: | 452 | out_err2: |
445 | (void)ttm_bo_device_release(&dev_priv->bdev); | 453 | (void)ttm_bo_device_release(&dev_priv->bdev); |
446 | out_err1: | 454 | out_err1: |
447 | vmw_ttm_global_release(dev_priv); | 455 | vmw_ttm_global_release(dev_priv); |
448 | out_err0: | 456 | out_err0: |
449 | ida_destroy(&dev_priv->gmr_ida); | ||
450 | idr_destroy(&dev_priv->surface_idr); | 457 | idr_destroy(&dev_priv->surface_idr); |
451 | idr_destroy(&dev_priv->context_idr); | 458 | idr_destroy(&dev_priv->context_idr); |
452 | idr_destroy(&dev_priv->stream_idr); | 459 | idr_destroy(&dev_priv->stream_idr); |
@@ -478,10 +485,11 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
478 | iounmap(dev_priv->mmio_virt); | 485 | iounmap(dev_priv->mmio_virt); |
479 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, | 486 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, |
480 | dev_priv->mmio_size, DRM_MTRR_WC); | 487 | dev_priv->mmio_size, DRM_MTRR_WC); |
488 | if (dev_priv->has_gmr) | ||
489 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
481 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | 490 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
482 | (void)ttm_bo_device_release(&dev_priv->bdev); | 491 | (void)ttm_bo_device_release(&dev_priv->bdev); |
483 | vmw_ttm_global_release(dev_priv); | 492 | vmw_ttm_global_release(dev_priv); |
484 | ida_destroy(&dev_priv->gmr_ida); | ||
485 | idr_destroy(&dev_priv->surface_idr); | 493 | idr_destroy(&dev_priv->surface_idr); |
486 | idr_destroy(&dev_priv->context_idr); | 494 | idr_destroy(&dev_priv->context_idr); |
487 | idr_destroy(&dev_priv->stream_idr); | 495 | idr_destroy(&dev_priv->stream_idr); |
@@ -597,6 +605,8 @@ static void vmw_lastclose(struct drm_device *dev) | |||
597 | static void vmw_master_init(struct vmw_master *vmaster) | 605 | static void vmw_master_init(struct vmw_master *vmaster) |
598 | { | 606 | { |
599 | ttm_lock_init(&vmaster->lock); | 607 | ttm_lock_init(&vmaster->lock); |
608 | INIT_LIST_HEAD(&vmaster->fb_surf); | ||
609 | mutex_init(&vmaster->fb_surf_mutex); | ||
600 | } | 610 | } |
601 | 611 | ||
602 | static int vmw_master_create(struct drm_device *dev, | 612 | static int vmw_master_create(struct drm_device *dev, |
@@ -608,7 +618,7 @@ static int vmw_master_create(struct drm_device *dev, | |||
608 | if (unlikely(vmaster == NULL)) | 618 | if (unlikely(vmaster == NULL)) |
609 | return -ENOMEM; | 619 | return -ENOMEM; |
610 | 620 | ||
611 | ttm_lock_init(&vmaster->lock); | 621 | vmw_master_init(vmaster); |
612 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | 622 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
613 | master->driver_priv = vmaster; | 623 | master->driver_priv = vmaster; |
614 | 624 | ||
@@ -699,6 +709,7 @@ static void vmw_master_drop(struct drm_device *dev, | |||
699 | 709 | ||
700 | vmw_fp->locked_master = drm_master_get(file_priv->master); | 710 | vmw_fp->locked_master = drm_master_get(file_priv->master); |
701 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); | 711 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); |
712 | vmw_kms_idle_workqueues(vmaster); | ||
702 | 713 | ||
703 | if (unlikely((ret != 0))) { | 714 | if (unlikely((ret != 0))) { |
704 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); | 715 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); |
@@ -751,15 +762,16 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | |||
751 | * Buffer contents is moved to swappable memory. | 762 | * Buffer contents is moved to swappable memory. |
752 | */ | 763 | */ |
753 | ttm_bo_swapout_all(&dev_priv->bdev); | 764 | ttm_bo_swapout_all(&dev_priv->bdev); |
765 | |||
754 | break; | 766 | break; |
755 | case PM_POST_HIBERNATION: | 767 | case PM_POST_HIBERNATION: |
756 | case PM_POST_SUSPEND: | 768 | case PM_POST_SUSPEND: |
769 | case PM_POST_RESTORE: | ||
757 | ttm_suspend_unlock(&vmaster->lock); | 770 | ttm_suspend_unlock(&vmaster->lock); |
771 | |||
758 | break; | 772 | break; |
759 | case PM_RESTORE_PREPARE: | 773 | case PM_RESTORE_PREPARE: |
760 | break; | 774 | break; |
761 | case PM_POST_RESTORE: | ||
762 | break; | ||
763 | default: | 775 | default: |
764 | break; | 776 | break; |
765 | } | 777 | } |
@@ -770,21 +782,98 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | |||
770 | * These might not be needed with the virtual SVGA device. | 782 | * These might not be needed with the virtual SVGA device. |
771 | */ | 783 | */ |
772 | 784 | ||
773 | int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) | 785 | static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) |
774 | { | 786 | { |
787 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
788 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
789 | |||
790 | if (dev_priv->num_3d_resources != 0) { | ||
791 | DRM_INFO("Can't suspend or hibernate " | ||
792 | "while 3D resources are active.\n"); | ||
793 | return -EBUSY; | ||
794 | } | ||
795 | |||
775 | pci_save_state(pdev); | 796 | pci_save_state(pdev); |
776 | pci_disable_device(pdev); | 797 | pci_disable_device(pdev); |
777 | pci_set_power_state(pdev, PCI_D3hot); | 798 | pci_set_power_state(pdev, PCI_D3hot); |
778 | return 0; | 799 | return 0; |
779 | } | 800 | } |
780 | 801 | ||
781 | int vmw_pci_resume(struct pci_dev *pdev) | 802 | static int vmw_pci_resume(struct pci_dev *pdev) |
782 | { | 803 | { |
783 | pci_set_power_state(pdev, PCI_D0); | 804 | pci_set_power_state(pdev, PCI_D0); |
784 | pci_restore_state(pdev); | 805 | pci_restore_state(pdev); |
785 | return pci_enable_device(pdev); | 806 | return pci_enable_device(pdev); |
786 | } | 807 | } |
787 | 808 | ||
809 | static int vmw_pm_suspend(struct device *kdev) | ||
810 | { | ||
811 | struct pci_dev *pdev = to_pci_dev(kdev); | ||
812 | struct pm_message dummy; | ||
813 | |||
814 | dummy.event = 0; | ||
815 | |||
816 | return vmw_pci_suspend(pdev, dummy); | ||
817 | } | ||
818 | |||
819 | static int vmw_pm_resume(struct device *kdev) | ||
820 | { | ||
821 | struct pci_dev *pdev = to_pci_dev(kdev); | ||
822 | |||
823 | return vmw_pci_resume(pdev); | ||
824 | } | ||
825 | |||
826 | static int vmw_pm_prepare(struct device *kdev) | ||
827 | { | ||
828 | struct pci_dev *pdev = to_pci_dev(kdev); | ||
829 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
830 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
831 | |||
832 | /** | ||
833 | * Release 3d reference held by fbdev and potentially | ||
834 | * stop fifo. | ||
835 | */ | ||
836 | dev_priv->suspended = true; | ||
837 | if (dev_priv->enable_fb) | ||
838 | vmw_3d_resource_dec(dev_priv); | ||
839 | |||
840 | if (dev_priv->num_3d_resources != 0) { | ||
841 | |||
842 | DRM_INFO("Can't suspend or hibernate " | ||
843 | "while 3D resources are active.\n"); | ||
844 | |||
845 | if (dev_priv->enable_fb) | ||
846 | vmw_3d_resource_inc(dev_priv); | ||
847 | dev_priv->suspended = false; | ||
848 | return -EBUSY; | ||
849 | } | ||
850 | |||
851 | return 0; | ||
852 | } | ||
853 | |||
854 | static void vmw_pm_complete(struct device *kdev) | ||
855 | { | ||
856 | struct pci_dev *pdev = to_pci_dev(kdev); | ||
857 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
858 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
859 | |||
860 | /** | ||
861 | * Reclaim 3d reference held by fbdev and potentially | ||
862 | * start fifo. | ||
863 | */ | ||
864 | if (dev_priv->enable_fb) | ||
865 | vmw_3d_resource_inc(dev_priv); | ||
866 | |||
867 | dev_priv->suspended = false; | ||
868 | } | ||
869 | |||
870 | static const struct dev_pm_ops vmw_pm_ops = { | ||
871 | .prepare = vmw_pm_prepare, | ||
872 | .complete = vmw_pm_complete, | ||
873 | .suspend = vmw_pm_suspend, | ||
874 | .resume = vmw_pm_resume, | ||
875 | }; | ||
876 | |||
788 | static struct drm_driver driver = { | 877 | static struct drm_driver driver = { |
789 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | | 878 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | |
790 | DRIVER_MODESET, | 879 | DRIVER_MODESET, |
@@ -798,8 +887,6 @@ static struct drm_driver driver = { | |||
798 | .irq_handler = vmw_irq_handler, | 887 | .irq_handler = vmw_irq_handler, |
799 | .get_vblank_counter = vmw_get_vblank_counter, | 888 | .get_vblank_counter = vmw_get_vblank_counter, |
800 | .reclaim_buffers_locked = NULL, | 889 | .reclaim_buffers_locked = NULL, |
801 | .get_map_ofs = drm_core_get_map_ofs, | ||
802 | .get_reg_ofs = drm_core_get_reg_ofs, | ||
803 | .ioctls = vmw_ioctls, | 890 | .ioctls = vmw_ioctls, |
804 | .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), | 891 | .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), |
805 | .dma_quiescent = NULL, /*vmw_dma_quiescent, */ | 892 | .dma_quiescent = NULL, /*vmw_dma_quiescent, */ |
@@ -820,15 +907,8 @@ static struct drm_driver driver = { | |||
820 | #if defined(CONFIG_COMPAT) | 907 | #if defined(CONFIG_COMPAT) |
821 | .compat_ioctl = drm_compat_ioctl, | 908 | .compat_ioctl = drm_compat_ioctl, |
822 | #endif | 909 | #endif |
823 | }, | 910 | .llseek = noop_llseek, |
824 | .pci_driver = { | 911 | }, |
825 | .name = VMWGFX_DRIVER_NAME, | ||
826 | .id_table = vmw_pci_id_list, | ||
827 | .probe = vmw_probe, | ||
828 | .remove = vmw_remove, | ||
829 | .suspend = vmw_pci_suspend, | ||
830 | .resume = vmw_pci_resume | ||
831 | }, | ||
832 | .name = VMWGFX_DRIVER_NAME, | 912 | .name = VMWGFX_DRIVER_NAME, |
833 | .desc = VMWGFX_DRIVER_DESC, | 913 | .desc = VMWGFX_DRIVER_DESC, |
834 | .date = VMWGFX_DRIVER_DATE, | 914 | .date = VMWGFX_DRIVER_DATE, |
@@ -837,6 +917,16 @@ static struct drm_driver driver = { | |||
837 | .patchlevel = VMWGFX_DRIVER_PATCHLEVEL | 917 | .patchlevel = VMWGFX_DRIVER_PATCHLEVEL |
838 | }; | 918 | }; |
839 | 919 | ||
920 | static struct pci_driver vmw_pci_driver = { | ||
921 | .name = VMWGFX_DRIVER_NAME, | ||
922 | .id_table = vmw_pci_id_list, | ||
923 | .probe = vmw_probe, | ||
924 | .remove = vmw_remove, | ||
925 | .driver = { | ||
926 | .pm = &vmw_pm_ops | ||
927 | } | ||
928 | }; | ||
929 | |||
840 | static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 930 | static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
841 | { | 931 | { |
842 | return drm_get_pci_dev(pdev, ent, &driver); | 932 | return drm_get_pci_dev(pdev, ent, &driver); |
@@ -845,7 +935,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
845 | static int __init vmwgfx_init(void) | 935 | static int __init vmwgfx_init(void) |
846 | { | 936 | { |
847 | int ret; | 937 | int ret; |
848 | ret = drm_init(&driver); | 938 | ret = drm_pci_init(&driver, &vmw_pci_driver); |
849 | if (ret) | 939 | if (ret) |
850 | DRM_ERROR("Failed initializing DRM.\n"); | 940 | DRM_ERROR("Failed initializing DRM.\n"); |
851 | return ret; | 941 | return ret; |
@@ -853,7 +943,7 @@ static int __init vmwgfx_init(void) | |||
853 | 943 | ||
854 | static void __exit vmwgfx_exit(void) | 944 | static void __exit vmwgfx_exit(void) |
855 | { | 945 | { |
856 | drm_exit(&driver); | 946 | drm_pci_exit(&driver, &vmw_pci_driver); |
857 | } | 947 | } |
858 | 948 | ||
859 | module_init(vmwgfx_init); | 949 | module_init(vmwgfx_init); |
@@ -862,3 +952,7 @@ module_exit(vmwgfx_exit); | |||
862 | MODULE_AUTHOR("VMware Inc. and others"); | 952 | MODULE_AUTHOR("VMware Inc. and others"); |
863 | MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); | 953 | MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); |
864 | MODULE_LICENSE("GPL and additional rights"); | 954 | MODULE_LICENSE("GPL and additional rights"); |
955 | MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." | ||
956 | __stringify(VMWGFX_DRIVER_MINOR) "." | ||
957 | __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." | ||
958 | "0"); | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 58de6393f611..10fc01f69c40 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -39,9 +39,9 @@ | |||
39 | #include "ttm/ttm_execbuf_util.h" | 39 | #include "ttm/ttm_execbuf_util.h" |
40 | #include "ttm/ttm_module.h" | 40 | #include "ttm/ttm_module.h" |
41 | 41 | ||
42 | #define VMWGFX_DRIVER_DATE "20100209" | 42 | #define VMWGFX_DRIVER_DATE "20100927" |
43 | #define VMWGFX_DRIVER_MAJOR 1 | 43 | #define VMWGFX_DRIVER_MAJOR 1 |
44 | #define VMWGFX_DRIVER_MINOR 2 | 44 | #define VMWGFX_DRIVER_MINOR 4 |
45 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 45 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
@@ -49,6 +49,9 @@ | |||
49 | #define VMWGFX_MAX_GMRS 2048 | 49 | #define VMWGFX_MAX_GMRS 2048 |
50 | #define VMWGFX_MAX_DISPLAYS 16 | 50 | #define VMWGFX_MAX_DISPLAYS 16 |
51 | 51 | ||
52 | #define VMW_PL_GMR TTM_PL_PRIV0 | ||
53 | #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 | ||
54 | |||
52 | struct vmw_fpriv { | 55 | struct vmw_fpriv { |
53 | struct drm_master *locked_master; | 56 | struct drm_master *locked_master; |
54 | struct ttm_object_file *tfile; | 57 | struct ttm_object_file *tfile; |
@@ -57,8 +60,6 @@ struct vmw_fpriv { | |||
57 | struct vmw_dma_buffer { | 60 | struct vmw_dma_buffer { |
58 | struct ttm_buffer_object base; | 61 | struct ttm_buffer_object base; |
59 | struct list_head validate_list; | 62 | struct list_head validate_list; |
60 | struct list_head gmr_lru; | ||
61 | uint32_t gmr_id; | ||
62 | bool gmr_bound; | 63 | bool gmr_bound; |
63 | uint32_t cur_validate_node; | 64 | uint32_t cur_validate_node; |
64 | bool on_validate_list; | 65 | bool on_validate_list; |
@@ -151,6 +152,8 @@ struct vmw_overlay; | |||
151 | 152 | ||
152 | struct vmw_master { | 153 | struct vmw_master { |
153 | struct ttm_lock lock; | 154 | struct ttm_lock lock; |
155 | struct mutex fb_surf_mutex; | ||
156 | struct list_head fb_surf; | ||
154 | }; | 157 | }; |
155 | 158 | ||
156 | struct vmw_vga_topology_state { | 159 | struct vmw_vga_topology_state { |
@@ -182,6 +185,7 @@ struct vmw_private { | |||
182 | uint32_t capabilities; | 185 | uint32_t capabilities; |
183 | uint32_t max_gmr_descriptors; | 186 | uint32_t max_gmr_descriptors; |
184 | uint32_t max_gmr_ids; | 187 | uint32_t max_gmr_ids; |
188 | bool has_gmr; | ||
185 | struct mutex hw_mutex; | 189 | struct mutex hw_mutex; |
186 | 190 | ||
187 | /* | 191 | /* |
@@ -260,18 +264,9 @@ struct vmw_private { | |||
260 | */ | 264 | */ |
261 | 265 | ||
262 | struct vmw_sw_context ctx; | 266 | struct vmw_sw_context ctx; |
263 | uint32_t val_seq; | ||
264 | struct mutex cmdbuf_mutex; | 267 | struct mutex cmdbuf_mutex; |
265 | 268 | ||
266 | /** | 269 | /** |
267 | * GMR management. Protected by the lru spinlock. | ||
268 | */ | ||
269 | |||
270 | struct ida gmr_ida; | ||
271 | struct list_head gmr_lru; | ||
272 | |||
273 | |||
274 | /** | ||
275 | * Operating mode. | 270 | * Operating mode. |
276 | */ | 271 | */ |
277 | 272 | ||
@@ -286,6 +281,7 @@ struct vmw_private { | |||
286 | struct vmw_master *active_master; | 281 | struct vmw_master *active_master; |
287 | struct vmw_master fbdev_master; | 282 | struct vmw_master fbdev_master; |
288 | struct notifier_block pm_nb; | 283 | struct notifier_block pm_nb; |
284 | bool suspended; | ||
289 | 285 | ||
290 | struct mutex release_mutex; | 286 | struct mutex release_mutex; |
291 | uint32_t num_3d_resources; | 287 | uint32_t num_3d_resources; |
@@ -331,7 +327,9 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv); | |||
331 | */ | 327 | */ |
332 | 328 | ||
333 | extern int vmw_gmr_bind(struct vmw_private *dev_priv, | 329 | extern int vmw_gmr_bind(struct vmw_private *dev_priv, |
334 | struct ttm_buffer_object *bo); | 330 | struct page *pages[], |
331 | unsigned long num_pages, | ||
332 | int gmr_id); | ||
335 | extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); | 333 | extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); |
336 | 334 | ||
337 | /** | 335 | /** |
@@ -380,14 +378,10 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, | |||
380 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); | 378 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); |
381 | extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | 379 | extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
382 | uint32_t id, struct vmw_dma_buffer **out); | 380 | uint32_t id, struct vmw_dma_buffer **out); |
383 | extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo); | ||
384 | extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id); | ||
385 | extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id); | ||
386 | extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | 381 | extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, |
387 | struct vmw_dma_buffer *bo); | 382 | struct vmw_dma_buffer *bo); |
388 | extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, | 383 | extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, |
389 | struct vmw_dma_buffer *bo); | 384 | struct vmw_dma_buffer *bo); |
390 | extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo); | ||
391 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, | 385 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, |
392 | struct drm_file *file_priv); | 386 | struct drm_file *file_priv); |
393 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, | 387 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, |
@@ -439,6 +433,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); | |||
439 | extern struct ttm_placement vmw_vram_placement; | 433 | extern struct ttm_placement vmw_vram_placement; |
440 | extern struct ttm_placement vmw_vram_ne_placement; | 434 | extern struct ttm_placement vmw_vram_ne_placement; |
441 | extern struct ttm_placement vmw_vram_sys_placement; | 435 | extern struct ttm_placement vmw_vram_sys_placement; |
436 | extern struct ttm_placement vmw_vram_gmr_placement; | ||
442 | extern struct ttm_placement vmw_sys_placement; | 437 | extern struct ttm_placement vmw_sys_placement; |
443 | extern struct ttm_bo_driver vmw_bo_driver; | 438 | extern struct ttm_bo_driver vmw_bo_driver; |
444 | extern int vmw_dma_quiescent(struct drm_device *dev); | 439 | extern int vmw_dma_quiescent(struct drm_device *dev); |
@@ -518,6 +513,10 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv, | |||
518 | unsigned bbp, unsigned depth); | 513 | unsigned bbp, unsigned depth); |
519 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | 514 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, |
520 | struct drm_file *file_priv); | 515 | struct drm_file *file_priv); |
516 | void vmw_kms_idle_workqueues(struct vmw_master *vmaster); | ||
517 | bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, | ||
518 | uint32_t pitch, | ||
519 | uint32_t height); | ||
521 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); | 520 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); |
522 | 521 | ||
523 | /** | 522 | /** |
@@ -537,6 +536,12 @@ int vmw_overlay_num_overlays(struct vmw_private *dev_priv); | |||
537 | int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); | 536 | int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); |
538 | 537 | ||
539 | /** | 538 | /** |
539 | * GMR Id manager | ||
540 | */ | ||
541 | |||
542 | extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; | ||
543 | |||
544 | /** | ||
540 | * Inline helper functions | 545 | * Inline helper functions |
541 | */ | 546 | */ |
542 | 547 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 8e396850513c..41b95ed6dbcd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -538,8 +538,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) | |||
538 | reloc = &sw_context->relocs[i]; | 538 | reloc = &sw_context->relocs[i]; |
539 | validate = &sw_context->val_bufs[reloc->index]; | 539 | validate = &sw_context->val_bufs[reloc->index]; |
540 | bo = validate->bo; | 540 | bo = validate->bo; |
541 | reloc->location->offset += bo->offset; | 541 | if (bo->mem.mem_type == TTM_PL_VRAM) { |
542 | reloc->location->gmrId = vmw_dmabuf_gmr(bo); | 542 | reloc->location->offset += bo->offset; |
543 | reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; | ||
544 | } else | ||
545 | reloc->location->gmrId = bo->mem.start; | ||
543 | } | 546 | } |
544 | vmw_free_relocations(sw_context); | 547 | vmw_free_relocations(sw_context); |
545 | } | 548 | } |
@@ -563,25 +566,14 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
563 | { | 566 | { |
564 | int ret; | 567 | int ret; |
565 | 568 | ||
566 | if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) | ||
567 | return 0; | ||
568 | |||
569 | /** | 569 | /** |
570 | * Put BO in VRAM, only if there is space. | 570 | * Put BO in VRAM if there is space, otherwise as a GMR. |
571 | * If there is no space in VRAM and GMR ids are all used up, | ||
572 | * start evicting GMRs to make room. If the DMA buffer can't be | ||
573 | * used as a GMR, this will return -ENOMEM. | ||
571 | */ | 574 | */ |
572 | 575 | ||
573 | ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false); | 576 | ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false); |
574 | if (unlikely(ret == -ERESTARTSYS)) | ||
575 | return ret; | ||
576 | |||
577 | /** | ||
578 | * Otherwise, set it up as GMR. | ||
579 | */ | ||
580 | |||
581 | if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) | ||
582 | return 0; | ||
583 | |||
584 | ret = vmw_gmr_bind(dev_priv, bo); | ||
585 | if (likely(ret == 0 || ret == -ERESTARTSYS)) | 577 | if (likely(ret == 0 || ret == -ERESTARTSYS)) |
586 | return ret; | 578 | return ret; |
587 | 579 | ||
@@ -590,6 +582,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
590 | * previous contents. | 582 | * previous contents. |
591 | */ | 583 | */ |
592 | 584 | ||
585 | DRM_INFO("Falling through to VRAM.\n"); | ||
593 | ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false); | 586 | ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false); |
594 | return ret; | 587 | return ret; |
595 | } | 588 | } |
@@ -660,8 +653,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
660 | ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size); | 653 | ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size); |
661 | if (unlikely(ret != 0)) | 654 | if (unlikely(ret != 0)) |
662 | goto out_err; | 655 | goto out_err; |
663 | ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes, | 656 | ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); |
664 | dev_priv->val_seq++); | ||
665 | if (unlikely(ret != 0)) | 657 | if (unlikely(ret != 0)) |
666 | goto out_err; | 658 | goto out_err; |
667 | 659 | ||
@@ -698,6 +690,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
698 | 690 | ||
699 | fence_rep.error = ret; | 691 | fence_rep.error = ret; |
700 | fence_rep.fence_seq = (uint64_t) sequence; | 692 | fence_rep.fence_seq = (uint64_t) sequence; |
693 | fence_rep.pad64 = 0; | ||
701 | 694 | ||
702 | user_fence_rep = (struct drm_vmw_fence_rep __user *) | 695 | user_fence_rep = (struct drm_vmw_fence_rep __user *) |
703 | (unsigned long)arg->fence_rep; | 696 | (unsigned long)arg->fence_rep; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 409e172f4abf..bfab60c938ac 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -144,6 +144,13 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var, | |||
144 | return -EINVAL; | 144 | return -EINVAL; |
145 | } | 145 | } |
146 | 146 | ||
147 | if (!vmw_kms_validate_mode_vram(vmw_priv, | ||
148 | info->fix.line_length, | ||
149 | var->yoffset + var->yres)) { | ||
150 | DRM_ERROR("Requested geom can not fit in framebuffer\n"); | ||
151 | return -EINVAL; | ||
152 | } | ||
153 | |||
147 | return 0; | 154 | return 0; |
148 | } | 155 | } |
149 | 156 | ||
@@ -205,6 +212,9 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par) | |||
205 | SVGAFifoCmdUpdate body; | 212 | SVGAFifoCmdUpdate body; |
206 | } *cmd; | 213 | } *cmd; |
207 | 214 | ||
215 | if (vmw_priv->suspended) | ||
216 | return; | ||
217 | |||
208 | spin_lock_irqsave(&par->dirty.lock, flags); | 218 | spin_lock_irqsave(&par->dirty.lock, flags); |
209 | if (!par->dirty.active) { | 219 | if (!par->dirty.active) { |
210 | spin_unlock_irqrestore(&par->dirty.lock, flags); | 220 | spin_unlock_irqrestore(&par->dirty.lock, flags); |
@@ -470,9 +480,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
470 | info->fix.smem_start = 0; | 480 | info->fix.smem_start = 0; |
471 | info->fix.smem_len = fb_size; | 481 | info->fix.smem_len = fb_size; |
472 | 482 | ||
473 | info->fix.mmio_start = 0; | ||
474 | info->fix.mmio_len = 0; | ||
475 | |||
476 | info->pseudo_palette = par->pseudo_palette; | 483 | info->pseudo_palette = par->pseudo_palette; |
477 | info->screen_base = par->vmalloc; | 484 | info->screen_base = par->vmalloc; |
478 | info->screen_size = fb_size; | 485 | info->screen_size = fb_size; |
@@ -616,7 +623,8 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | |||
616 | goto err_unlock; | 623 | goto err_unlock; |
617 | 624 | ||
618 | if (bo->mem.mem_type == TTM_PL_VRAM && | 625 | if (bo->mem.mem_type == TTM_PL_VRAM && |
619 | bo->mem.mm_node->start < bo->num_pages) | 626 | bo->mem.start < bo->num_pages && |
627 | bo->mem.start > 0) | ||
620 | (void) ttm_bo_validate(bo, &vmw_sys_placement, false, | 628 | (void) ttm_bo_validate(bo, &vmw_sys_placement, false, |
621 | false, false); | 629 | false, false); |
622 | 630 | ||
@@ -648,7 +656,7 @@ int vmw_fb_off(struct vmw_private *vmw_priv) | |||
648 | par->dirty.active = false; | 656 | par->dirty.active = false; |
649 | spin_unlock_irqrestore(&par->dirty.lock, flags); | 657 | spin_unlock_irqrestore(&par->dirty.lock, flags); |
650 | 658 | ||
651 | flush_scheduled_work(); | 659 | flush_delayed_work_sync(&info->deferred_work); |
652 | 660 | ||
653 | par->bo_ptr = NULL; | 661 | par->bo_ptr = NULL; |
654 | ttm_bo_kunmap(&par->map); | 662 | ttm_bo_kunmap(&par->map); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 0fe31766e4cf..635c0ffee7fe 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -545,7 +545,7 @@ int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma) | |||
545 | struct drm_file *file_priv; | 545 | struct drm_file *file_priv; |
546 | struct vmw_private *dev_priv; | 546 | struct vmw_private *dev_priv; |
547 | 547 | ||
548 | file_priv = (struct drm_file *)filp->private_data; | 548 | file_priv = filp->private_data; |
549 | dev_priv = vmw_priv(file_priv->minor->dev); | 549 | dev_priv = vmw_priv(file_priv->minor->dev); |
550 | 550 | ||
551 | if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) || | 551 | if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) || |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 5f8908a5d7fd..de0c5948521d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | |||
@@ -146,7 +146,7 @@ static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv, | |||
146 | */ | 146 | */ |
147 | 147 | ||
148 | static unsigned long vmw_gmr_count_descriptors(struct page *pages[], | 148 | static unsigned long vmw_gmr_count_descriptors(struct page *pages[], |
149 | unsigned long num_pages) | 149 | unsigned long num_pages) |
150 | { | 150 | { |
151 | unsigned long prev_pfn = ~(0UL); | 151 | unsigned long prev_pfn = ~(0UL); |
152 | unsigned long pfn; | 152 | unsigned long pfn; |
@@ -163,45 +163,33 @@ static unsigned long vmw_gmr_count_descriptors(struct page *pages[], | |||
163 | } | 163 | } |
164 | 164 | ||
165 | int vmw_gmr_bind(struct vmw_private *dev_priv, | 165 | int vmw_gmr_bind(struct vmw_private *dev_priv, |
166 | struct ttm_buffer_object *bo) | 166 | struct page *pages[], |
167 | unsigned long num_pages, | ||
168 | int gmr_id) | ||
167 | { | 169 | { |
168 | struct ttm_tt *ttm = bo->ttm; | ||
169 | unsigned long descriptors; | ||
170 | int ret; | ||
171 | uint32_t id; | ||
172 | struct list_head desc_pages; | 170 | struct list_head desc_pages; |
171 | int ret; | ||
173 | 172 | ||
174 | if (!(dev_priv->capabilities & SVGA_CAP_GMR)) | 173 | if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR))) |
175 | return -EINVAL; | 174 | return -EINVAL; |
176 | 175 | ||
177 | ret = ttm_tt_populate(ttm); | 176 | if (vmw_gmr_count_descriptors(pages, num_pages) > |
178 | if (unlikely(ret != 0)) | 177 | dev_priv->max_gmr_descriptors) |
179 | return ret; | ||
180 | |||
181 | descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages); | ||
182 | if (unlikely(descriptors > dev_priv->max_gmr_descriptors)) | ||
183 | return -EINVAL; | 178 | return -EINVAL; |
184 | 179 | ||
185 | INIT_LIST_HEAD(&desc_pages); | 180 | INIT_LIST_HEAD(&desc_pages); |
186 | ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages, | ||
187 | ttm->num_pages); | ||
188 | if (unlikely(ret != 0)) | ||
189 | return ret; | ||
190 | 181 | ||
191 | ret = vmw_gmr_id_alloc(dev_priv, &id); | 182 | ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages); |
192 | if (unlikely(ret != 0)) | 183 | if (unlikely(ret != 0)) |
193 | goto out_no_id; | 184 | return ret; |
194 | 185 | ||
195 | vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages); | 186 | vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages); |
196 | vmw_gmr_free_descriptors(&desc_pages); | 187 | vmw_gmr_free_descriptors(&desc_pages); |
197 | vmw_dmabuf_set_gmr(bo, id); | ||
198 | return 0; | ||
199 | 188 | ||
200 | out_no_id: | 189 | return 0; |
201 | vmw_gmr_free_descriptors(&desc_pages); | ||
202 | return ret; | ||
203 | } | 190 | } |
204 | 191 | ||
192 | |||
205 | void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) | 193 | void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) |
206 | { | 194 | { |
207 | mutex_lock(&dev_priv->hw_mutex); | 195 | mutex_lock(&dev_priv->hw_mutex); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c new file mode 100644 index 000000000000..ac6e0d1bd629 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | |||
@@ -0,0 +1,137 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | /* | ||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
29 | */ | ||
30 | |||
31 | #include "vmwgfx_drv.h" | ||
32 | #include "ttm/ttm_module.h" | ||
33 | #include "ttm/ttm_bo_driver.h" | ||
34 | #include "ttm/ttm_placement.h" | ||
35 | #include <linux/idr.h> | ||
36 | #include <linux/spinlock.h> | ||
37 | #include <linux/kernel.h> | ||
38 | |||
39 | struct vmwgfx_gmrid_man { | ||
40 | spinlock_t lock; | ||
41 | struct ida gmr_ida; | ||
42 | uint32_t max_gmr_ids; | ||
43 | }; | ||
44 | |||
45 | static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, | ||
46 | struct ttm_buffer_object *bo, | ||
47 | struct ttm_placement *placement, | ||
48 | struct ttm_mem_reg *mem) | ||
49 | { | ||
50 | struct vmwgfx_gmrid_man *gman = | ||
51 | (struct vmwgfx_gmrid_man *)man->priv; | ||
52 | int ret; | ||
53 | int id; | ||
54 | |||
55 | mem->mm_node = NULL; | ||
56 | |||
57 | do { | ||
58 | if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) | ||
59 | return -ENOMEM; | ||
60 | |||
61 | spin_lock(&gman->lock); | ||
62 | ret = ida_get_new(&gman->gmr_ida, &id); | ||
63 | |||
64 | if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) { | ||
65 | ida_remove(&gman->gmr_ida, id); | ||
66 | spin_unlock(&gman->lock); | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | spin_unlock(&gman->lock); | ||
71 | |||
72 | } while (ret == -EAGAIN); | ||
73 | |||
74 | if (likely(ret == 0)) { | ||
75 | mem->mm_node = gman; | ||
76 | mem->start = id; | ||
77 | } | ||
78 | |||
79 | return ret; | ||
80 | } | ||
81 | |||
82 | static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, | ||
83 | struct ttm_mem_reg *mem) | ||
84 | { | ||
85 | struct vmwgfx_gmrid_man *gman = | ||
86 | (struct vmwgfx_gmrid_man *)man->priv; | ||
87 | |||
88 | if (mem->mm_node) { | ||
89 | spin_lock(&gman->lock); | ||
90 | ida_remove(&gman->gmr_ida, mem->start); | ||
91 | spin_unlock(&gman->lock); | ||
92 | mem->mm_node = NULL; | ||
93 | } | ||
94 | } | ||
95 | |||
96 | static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, | ||
97 | unsigned long p_size) | ||
98 | { | ||
99 | struct vmwgfx_gmrid_man *gman = | ||
100 | kzalloc(sizeof(*gman), GFP_KERNEL); | ||
101 | |||
102 | if (unlikely(gman == NULL)) | ||
103 | return -ENOMEM; | ||
104 | |||
105 | spin_lock_init(&gman->lock); | ||
106 | ida_init(&gman->gmr_ida); | ||
107 | gman->max_gmr_ids = p_size; | ||
108 | man->priv = (void *) gman; | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man) | ||
113 | { | ||
114 | struct vmwgfx_gmrid_man *gman = | ||
115 | (struct vmwgfx_gmrid_man *)man->priv; | ||
116 | |||
117 | if (gman) { | ||
118 | ida_destroy(&gman->gmr_ida); | ||
119 | kfree(gman); | ||
120 | } | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man, | ||
125 | const char *prefix) | ||
126 | { | ||
127 | printk(KERN_INFO "%s: No debug info available for the GMR " | ||
128 | "id manager.\n", prefix); | ||
129 | } | ||
130 | |||
131 | const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = { | ||
132 | vmw_gmrid_man_init, | ||
133 | vmw_gmrid_man_takedown, | ||
134 | vmw_gmrid_man_get_node, | ||
135 | vmw_gmrid_man_put_node, | ||
136 | vmw_gmrid_man_debug | ||
137 | }; | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 1c7a316454d8..570d57775a58 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
@@ -54,6 +54,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
54 | case DRM_VMW_PARAM_FIFO_CAPS: | 54 | case DRM_VMW_PARAM_FIFO_CAPS: |
55 | param->value = dev_priv->fifo.capabilities; | 55 | param->value = dev_priv->fifo.capabilities; |
56 | break; | 56 | break; |
57 | case DRM_VMW_PARAM_MAX_FB_SIZE: | ||
58 | param->value = dev_priv->vram_size; | ||
59 | break; | ||
57 | default: | 60 | default: |
58 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | 61 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", |
59 | param->param); | 62 | param->param); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index e882ba099f0c..dfe32e62bd90 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -245,7 +245,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, | |||
245 | /* TODO handle none page aligned offsets */ | 245 | /* TODO handle none page aligned offsets */ |
246 | /* TODO handle partial uploads and pitch != 256 */ | 246 | /* TODO handle partial uploads and pitch != 256 */ |
247 | /* TODO handle more then one copy (size != 64) */ | 247 | /* TODO handle more then one copy (size != 64) */ |
248 | DRM_ERROR("lazy programer, cant handle wierd stuff\n"); | 248 | DRM_ERROR("lazy programmer, can't handle weird stuff\n"); |
249 | return; | 249 | return; |
250 | } | 250 | } |
251 | 251 | ||
@@ -332,18 +332,55 @@ struct vmw_framebuffer_surface { | |||
332 | struct delayed_work d_work; | 332 | struct delayed_work d_work; |
333 | struct mutex work_lock; | 333 | struct mutex work_lock; |
334 | bool present_fs; | 334 | bool present_fs; |
335 | struct list_head head; | ||
336 | struct drm_master *master; | ||
335 | }; | 337 | }; |
336 | 338 | ||
339 | /** | ||
340 | * vmw_kms_idle_workqueues - Flush workqueues on this master | ||
341 | * | ||
342 | * @vmaster - Pointer identifying the master, for the surfaces of which | ||
343 | * we idle the dirty work queues. | ||
344 | * | ||
345 | * This function should be called with the ttm lock held in exclusive mode | ||
346 | * to idle all dirty work queues before the fifo is taken down. | ||
347 | * | ||
348 | * The work task may actually requeue itself, but after the flush returns we're | ||
349 | * sure that there's nothing to present, since the ttm lock is held in | ||
350 | * exclusive mode, so the fifo will never get used. | ||
351 | */ | ||
352 | |||
353 | void vmw_kms_idle_workqueues(struct vmw_master *vmaster) | ||
354 | { | ||
355 | struct vmw_framebuffer_surface *entry; | ||
356 | |||
357 | mutex_lock(&vmaster->fb_surf_mutex); | ||
358 | list_for_each_entry(entry, &vmaster->fb_surf, head) { | ||
359 | if (cancel_delayed_work_sync(&entry->d_work)) | ||
360 | (void) entry->d_work.work.func(&entry->d_work.work); | ||
361 | |||
362 | (void) cancel_delayed_work_sync(&entry->d_work); | ||
363 | } | ||
364 | mutex_unlock(&vmaster->fb_surf_mutex); | ||
365 | } | ||
366 | |||
337 | void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) | 367 | void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) |
338 | { | 368 | { |
339 | struct vmw_framebuffer_surface *vfb = | 369 | struct vmw_framebuffer_surface *vfbs = |
340 | vmw_framebuffer_to_vfbs(framebuffer); | 370 | vmw_framebuffer_to_vfbs(framebuffer); |
371 | struct vmw_master *vmaster = vmw_master(vfbs->master); | ||
372 | |||
373 | |||
374 | mutex_lock(&vmaster->fb_surf_mutex); | ||
375 | list_del(&vfbs->head); | ||
376 | mutex_unlock(&vmaster->fb_surf_mutex); | ||
341 | 377 | ||
342 | cancel_delayed_work_sync(&vfb->d_work); | 378 | cancel_delayed_work_sync(&vfbs->d_work); |
379 | drm_master_put(&vfbs->master); | ||
343 | drm_framebuffer_cleanup(framebuffer); | 380 | drm_framebuffer_cleanup(framebuffer); |
344 | vmw_surface_unreference(&vfb->surface); | 381 | vmw_surface_unreference(&vfbs->surface); |
345 | 382 | ||
346 | kfree(framebuffer); | 383 | kfree(vfbs); |
347 | } | 384 | } |
348 | 385 | ||
349 | static void vmw_framebuffer_present_fs_callback(struct work_struct *work) | 386 | static void vmw_framebuffer_present_fs_callback(struct work_struct *work) |
@@ -362,6 +399,12 @@ static void vmw_framebuffer_present_fs_callback(struct work_struct *work) | |||
362 | SVGA3dCopyRect cr; | 399 | SVGA3dCopyRect cr; |
363 | } *cmd; | 400 | } *cmd; |
364 | 401 | ||
402 | /** | ||
403 | * Strictly we should take the ttm_lock in read mode before accessing | ||
404 | * the fifo, to make sure the fifo is present and up. However, | ||
405 | * instead we flush all workqueues under the ttm lock in exclusive mode | ||
406 | * before taking down the fifo. | ||
407 | */ | ||
365 | mutex_lock(&vfbs->work_lock); | 408 | mutex_lock(&vfbs->work_lock); |
366 | if (!vfbs->present_fs) | 409 | if (!vfbs->present_fs) |
367 | goto out_unlock; | 410 | goto out_unlock; |
@@ -392,17 +435,20 @@ out_unlock: | |||
392 | 435 | ||
393 | 436 | ||
394 | int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | 437 | int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, |
438 | struct drm_file *file_priv, | ||
395 | unsigned flags, unsigned color, | 439 | unsigned flags, unsigned color, |
396 | struct drm_clip_rect *clips, | 440 | struct drm_clip_rect *clips, |
397 | unsigned num_clips) | 441 | unsigned num_clips) |
398 | { | 442 | { |
399 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | 443 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); |
444 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
400 | struct vmw_framebuffer_surface *vfbs = | 445 | struct vmw_framebuffer_surface *vfbs = |
401 | vmw_framebuffer_to_vfbs(framebuffer); | 446 | vmw_framebuffer_to_vfbs(framebuffer); |
402 | struct vmw_surface *surf = vfbs->surface; | 447 | struct vmw_surface *surf = vfbs->surface; |
403 | struct drm_clip_rect norect; | 448 | struct drm_clip_rect norect; |
404 | SVGA3dCopyRect *cr; | 449 | SVGA3dCopyRect *cr; |
405 | int i, inc = 1; | 450 | int i, inc = 1; |
451 | int ret; | ||
406 | 452 | ||
407 | struct { | 453 | struct { |
408 | SVGA3dCmdHeader header; | 454 | SVGA3dCmdHeader header; |
@@ -410,6 +456,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
410 | SVGA3dCopyRect cr; | 456 | SVGA3dCopyRect cr; |
411 | } *cmd; | 457 | } *cmd; |
412 | 458 | ||
459 | if (unlikely(vfbs->master != file_priv->master)) | ||
460 | return -EINVAL; | ||
461 | |||
462 | ret = ttm_read_lock(&vmaster->lock, true); | ||
463 | if (unlikely(ret != 0)) | ||
464 | return ret; | ||
465 | |||
413 | if (!num_clips || | 466 | if (!num_clips || |
414 | !(dev_priv->fifo.capabilities & | 467 | !(dev_priv->fifo.capabilities & |
415 | SVGA_FIFO_CAP_SCREEN_OBJECT)) { | 468 | SVGA_FIFO_CAP_SCREEN_OBJECT)) { |
@@ -425,6 +478,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
425 | */ | 478 | */ |
426 | vmw_framebuffer_present_fs_callback(&vfbs->d_work.work); | 479 | vmw_framebuffer_present_fs_callback(&vfbs->d_work.work); |
427 | } | 480 | } |
481 | ttm_read_unlock(&vmaster->lock); | ||
428 | return 0; | 482 | return 0; |
429 | } | 483 | } |
430 | 484 | ||
@@ -442,6 +496,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
442 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); | 496 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); |
443 | if (unlikely(cmd == NULL)) { | 497 | if (unlikely(cmd == NULL)) { |
444 | DRM_ERROR("Fifo reserve failed.\n"); | 498 | DRM_ERROR("Fifo reserve failed.\n"); |
499 | ttm_read_unlock(&vmaster->lock); | ||
445 | return -ENOMEM; | 500 | return -ENOMEM; |
446 | } | 501 | } |
447 | 502 | ||
@@ -461,7 +516,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
461 | } | 516 | } |
462 | 517 | ||
463 | vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); | 518 | vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); |
464 | 519 | ttm_read_unlock(&vmaster->lock); | |
465 | return 0; | 520 | return 0; |
466 | } | 521 | } |
467 | 522 | ||
@@ -471,16 +526,57 @@ static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { | |||
471 | .create_handle = vmw_framebuffer_create_handle, | 526 | .create_handle = vmw_framebuffer_create_handle, |
472 | }; | 527 | }; |
473 | 528 | ||
474 | int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | 529 | static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, |
475 | struct vmw_surface *surface, | 530 | struct drm_file *file_priv, |
476 | struct vmw_framebuffer **out, | 531 | struct vmw_surface *surface, |
477 | unsigned width, unsigned height) | 532 | struct vmw_framebuffer **out, |
533 | const struct drm_mode_fb_cmd | ||
534 | *mode_cmd) | ||
478 | 535 | ||
479 | { | 536 | { |
480 | struct drm_device *dev = dev_priv->dev; | 537 | struct drm_device *dev = dev_priv->dev; |
481 | struct vmw_framebuffer_surface *vfbs; | 538 | struct vmw_framebuffer_surface *vfbs; |
539 | enum SVGA3dSurfaceFormat format; | ||
540 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
482 | int ret; | 541 | int ret; |
483 | 542 | ||
543 | /* | ||
544 | * Sanity checks. | ||
545 | */ | ||
546 | |||
547 | if (unlikely(surface->mip_levels[0] != 1 || | ||
548 | surface->num_sizes != 1 || | ||
549 | surface->sizes[0].width < mode_cmd->width || | ||
550 | surface->sizes[0].height < mode_cmd->height || | ||
551 | surface->sizes[0].depth != 1)) { | ||
552 | DRM_ERROR("Incompatible surface dimensions " | ||
553 | "for requested mode.\n"); | ||
554 | return -EINVAL; | ||
555 | } | ||
556 | |||
557 | switch (mode_cmd->depth) { | ||
558 | case 32: | ||
559 | format = SVGA3D_A8R8G8B8; | ||
560 | break; | ||
561 | case 24: | ||
562 | format = SVGA3D_X8R8G8B8; | ||
563 | break; | ||
564 | case 16: | ||
565 | format = SVGA3D_R5G6B5; | ||
566 | break; | ||
567 | case 15: | ||
568 | format = SVGA3D_A1R5G5B5; | ||
569 | break; | ||
570 | default: | ||
571 | DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth); | ||
572 | return -EINVAL; | ||
573 | } | ||
574 | |||
575 | if (unlikely(format != surface->format)) { | ||
576 | DRM_ERROR("Invalid surface format for requested mode.\n"); | ||
577 | return -EINVAL; | ||
578 | } | ||
579 | |||
484 | vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); | 580 | vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); |
485 | if (!vfbs) { | 581 | if (!vfbs) { |
486 | ret = -ENOMEM; | 582 | ret = -ENOMEM; |
@@ -498,16 +594,22 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | |||
498 | } | 594 | } |
499 | 595 | ||
500 | /* XXX get the first 3 from the surface info */ | 596 | /* XXX get the first 3 from the surface info */ |
501 | vfbs->base.base.bits_per_pixel = 32; | 597 | vfbs->base.base.bits_per_pixel = mode_cmd->bpp; |
502 | vfbs->base.base.pitch = width * 32 / 4; | 598 | vfbs->base.base.pitch = mode_cmd->pitch; |
503 | vfbs->base.base.depth = 24; | 599 | vfbs->base.base.depth = mode_cmd->depth; |
504 | vfbs->base.base.width = width; | 600 | vfbs->base.base.width = mode_cmd->width; |
505 | vfbs->base.base.height = height; | 601 | vfbs->base.base.height = mode_cmd->height; |
506 | vfbs->base.pin = &vmw_surface_dmabuf_pin; | 602 | vfbs->base.pin = &vmw_surface_dmabuf_pin; |
507 | vfbs->base.unpin = &vmw_surface_dmabuf_unpin; | 603 | vfbs->base.unpin = &vmw_surface_dmabuf_unpin; |
508 | vfbs->surface = surface; | 604 | vfbs->surface = surface; |
605 | vfbs->master = drm_master_get(file_priv->master); | ||
509 | mutex_init(&vfbs->work_lock); | 606 | mutex_init(&vfbs->work_lock); |
607 | |||
608 | mutex_lock(&vmaster->fb_surf_mutex); | ||
510 | INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); | 609 | INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); |
610 | list_add_tail(&vfbs->head, &vmaster->fb_surf); | ||
611 | mutex_unlock(&vmaster->fb_surf_mutex); | ||
612 | |||
511 | *out = &vfbs->base; | 613 | *out = &vfbs->base; |
512 | 614 | ||
513 | return 0; | 615 | return 0; |
@@ -544,18 +646,25 @@ void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) | |||
544 | } | 646 | } |
545 | 647 | ||
546 | int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | 648 | int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, |
649 | struct drm_file *file_priv, | ||
547 | unsigned flags, unsigned color, | 650 | unsigned flags, unsigned color, |
548 | struct drm_clip_rect *clips, | 651 | struct drm_clip_rect *clips, |
549 | unsigned num_clips) | 652 | unsigned num_clips) |
550 | { | 653 | { |
551 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | 654 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); |
655 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
552 | struct drm_clip_rect norect; | 656 | struct drm_clip_rect norect; |
657 | int ret; | ||
553 | struct { | 658 | struct { |
554 | uint32_t header; | 659 | uint32_t header; |
555 | SVGAFifoCmdUpdate body; | 660 | SVGAFifoCmdUpdate body; |
556 | } *cmd; | 661 | } *cmd; |
557 | int i, increment = 1; | 662 | int i, increment = 1; |
558 | 663 | ||
664 | ret = ttm_read_lock(&vmaster->lock, true); | ||
665 | if (unlikely(ret != 0)) | ||
666 | return ret; | ||
667 | |||
559 | if (!num_clips) { | 668 | if (!num_clips) { |
560 | num_clips = 1; | 669 | num_clips = 1; |
561 | clips = &norect; | 670 | clips = &norect; |
@@ -570,6 +679,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
570 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); | 679 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); |
571 | if (unlikely(cmd == NULL)) { | 680 | if (unlikely(cmd == NULL)) { |
572 | DRM_ERROR("Fifo reserve failed.\n"); | 681 | DRM_ERROR("Fifo reserve failed.\n"); |
682 | ttm_read_unlock(&vmaster->lock); | ||
573 | return -ENOMEM; | 683 | return -ENOMEM; |
574 | } | 684 | } |
575 | 685 | ||
@@ -582,6 +692,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
582 | } | 692 | } |
583 | 693 | ||
584 | vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); | 694 | vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); |
695 | ttm_read_unlock(&vmaster->lock); | ||
585 | 696 | ||
586 | return 0; | 697 | return 0; |
587 | } | 698 | } |
@@ -609,6 +720,8 @@ static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb) | |||
609 | &vmw_vram_ne_placement, | 720 | &vmw_vram_ne_placement, |
610 | false, &vmw_dmabuf_bo_free); | 721 | false, &vmw_dmabuf_bo_free); |
611 | vmw_overlay_resume_all(dev_priv); | 722 | vmw_overlay_resume_all(dev_priv); |
723 | if (unlikely(ret != 0)) | ||
724 | vfbs->buffer = NULL; | ||
612 | 725 | ||
613 | return ret; | 726 | return ret; |
614 | } | 727 | } |
@@ -619,6 +732,9 @@ static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb) | |||
619 | struct vmw_framebuffer_surface *vfbs = | 732 | struct vmw_framebuffer_surface *vfbs = |
620 | vmw_framebuffer_to_vfbs(&vfb->base); | 733 | vmw_framebuffer_to_vfbs(&vfb->base); |
621 | 734 | ||
735 | if (unlikely(vfbs->buffer == NULL)) | ||
736 | return 0; | ||
737 | |||
622 | bo = &vfbs->buffer->base; | 738 | bo = &vfbs->buffer->base; |
623 | ttm_bo_unref(&bo); | 739 | ttm_bo_unref(&bo); |
624 | vfbs->buffer = NULL; | 740 | vfbs->buffer = NULL; |
@@ -659,16 +775,25 @@ static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb) | |||
659 | return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer); | 775 | return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer); |
660 | } | 776 | } |
661 | 777 | ||
662 | int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, | 778 | static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, |
663 | struct vmw_dma_buffer *dmabuf, | 779 | struct vmw_dma_buffer *dmabuf, |
664 | struct vmw_framebuffer **out, | 780 | struct vmw_framebuffer **out, |
665 | unsigned width, unsigned height) | 781 | const struct drm_mode_fb_cmd |
782 | *mode_cmd) | ||
666 | 783 | ||
667 | { | 784 | { |
668 | struct drm_device *dev = dev_priv->dev; | 785 | struct drm_device *dev = dev_priv->dev; |
669 | struct vmw_framebuffer_dmabuf *vfbd; | 786 | struct vmw_framebuffer_dmabuf *vfbd; |
787 | unsigned int requested_size; | ||
670 | int ret; | 788 | int ret; |
671 | 789 | ||
790 | requested_size = mode_cmd->height * mode_cmd->pitch; | ||
791 | if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) { | ||
792 | DRM_ERROR("Screen buffer object size is too small " | ||
793 | "for requested mode.\n"); | ||
794 | return -EINVAL; | ||
795 | } | ||
796 | |||
672 | vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); | 797 | vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); |
673 | if (!vfbd) { | 798 | if (!vfbd) { |
674 | ret = -ENOMEM; | 799 | ret = -ENOMEM; |
@@ -685,12 +810,11 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, | |||
685 | goto out_err3; | 810 | goto out_err3; |
686 | } | 811 | } |
687 | 812 | ||
688 | /* XXX get the first 3 from the surface info */ | 813 | vfbd->base.base.bits_per_pixel = mode_cmd->bpp; |
689 | vfbd->base.base.bits_per_pixel = 32; | 814 | vfbd->base.base.pitch = mode_cmd->pitch; |
690 | vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8; | 815 | vfbd->base.base.depth = mode_cmd->depth; |
691 | vfbd->base.base.depth = 24; | 816 | vfbd->base.base.width = mode_cmd->width; |
692 | vfbd->base.base.width = width; | 817 | vfbd->base.base.height = mode_cmd->height; |
693 | vfbd->base.base.height = height; | ||
694 | vfbd->base.pin = vmw_framebuffer_dmabuf_pin; | 818 | vfbd->base.pin = vmw_framebuffer_dmabuf_pin; |
695 | vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; | 819 | vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; |
696 | vfbd->buffer = dmabuf; | 820 | vfbd->buffer = dmabuf; |
@@ -719,8 +843,25 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, | |||
719 | struct vmw_framebuffer *vfb = NULL; | 843 | struct vmw_framebuffer *vfb = NULL; |
720 | struct vmw_surface *surface = NULL; | 844 | struct vmw_surface *surface = NULL; |
721 | struct vmw_dma_buffer *bo = NULL; | 845 | struct vmw_dma_buffer *bo = NULL; |
846 | u64 required_size; | ||
722 | int ret; | 847 | int ret; |
723 | 848 | ||
849 | /** | ||
850 | * This code should be conditioned on Screen Objects not being used. | ||
851 | * If screen objects are used, we can allocate a GMR to hold the | ||
852 | * requested framebuffer. | ||
853 | */ | ||
854 | |||
855 | required_size = mode_cmd->pitch * mode_cmd->height; | ||
856 | if (unlikely(required_size > (u64) dev_priv->vram_size)) { | ||
857 | DRM_ERROR("VRAM size is too small for requested mode.\n"); | ||
858 | return NULL; | ||
859 | } | ||
860 | |||
861 | /** | ||
862 | * End conditioned code. | ||
863 | */ | ||
864 | |||
724 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, | 865 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, |
725 | mode_cmd->handle, &surface); | 866 | mode_cmd->handle, &surface); |
726 | if (ret) | 867 | if (ret) |
@@ -729,8 +870,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, | |||
729 | if (!surface->scanout) | 870 | if (!surface->scanout) |
730 | goto err_not_scanout; | 871 | goto err_not_scanout; |
731 | 872 | ||
732 | ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, | 873 | ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface, |
733 | mode_cmd->width, mode_cmd->height); | 874 | &vfb, mode_cmd); |
734 | 875 | ||
735 | /* vmw_user_surface_lookup takes one ref so does new_fb */ | 876 | /* vmw_user_surface_lookup takes one ref so does new_fb */ |
736 | vmw_surface_unreference(&surface); | 877 | vmw_surface_unreference(&surface); |
@@ -751,7 +892,7 @@ try_dmabuf: | |||
751 | } | 892 | } |
752 | 893 | ||
753 | ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, | 894 | ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, |
754 | mode_cmd->width, mode_cmd->height); | 895 | mode_cmd); |
755 | 896 | ||
756 | /* vmw_user_dmabuf_lookup takes one ref so does new_fb */ | 897 | /* vmw_user_dmabuf_lookup takes one ref so does new_fb */ |
757 | vmw_dmabuf_unreference(&bo); | 898 | vmw_dmabuf_unreference(&bo); |
@@ -889,6 +1030,9 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv) | |||
889 | vmw_priv->num_displays = vmw_read(vmw_priv, | 1030 | vmw_priv->num_displays = vmw_read(vmw_priv, |
890 | SVGA_REG_NUM_GUEST_DISPLAYS); | 1031 | SVGA_REG_NUM_GUEST_DISPLAYS); |
891 | 1032 | ||
1033 | if (vmw_priv->num_displays == 0) | ||
1034 | vmw_priv->num_displays = 1; | ||
1035 | |||
892 | for (i = 0; i < vmw_priv->num_displays; ++i) { | 1036 | for (i = 0; i < vmw_priv->num_displays; ++i) { |
893 | save = &vmw_priv->vga_save[i]; | 1037 | save = &vmw_priv->vga_save[i]; |
894 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); | 1038 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); |
@@ -997,6 +1141,13 @@ out_unlock: | |||
997 | return ret; | 1141 | return ret; |
998 | } | 1142 | } |
999 | 1143 | ||
1144 | bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, | ||
1145 | uint32_t pitch, | ||
1146 | uint32_t height) | ||
1147 | { | ||
1148 | return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size; | ||
1149 | } | ||
1150 | |||
1000 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) | 1151 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) |
1001 | { | 1152 | { |
1002 | return 0; | 1153 | return 0; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 11cb39e3accb..b3a2cd5118d7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -345,7 +345,7 @@ static enum drm_connector_status | |||
345 | return connector_status_disconnected; | 345 | return connector_status_disconnected; |
346 | } | 346 | } |
347 | 347 | ||
348 | static struct drm_display_mode vmw_ldu_connector_builtin[] = { | 348 | static const struct drm_display_mode vmw_ldu_connector_builtin[] = { |
349 | /* 640x480@60Hz */ | 349 | /* 640x480@60Hz */ |
350 | { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, | 350 | { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, |
351 | 752, 800, 0, 480, 489, 492, 525, 0, | 351 | 752, 800, 0, 480, 489, 492, 525, 0, |
@@ -427,6 +427,7 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, | |||
427 | { | 427 | { |
428 | struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector); | 428 | struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector); |
429 | struct drm_device *dev = connector->dev; | 429 | struct drm_device *dev = connector->dev; |
430 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
430 | struct drm_display_mode *mode = NULL; | 431 | struct drm_display_mode *mode = NULL; |
431 | struct drm_display_mode prefmode = { DRM_MODE("preferred", | 432 | struct drm_display_mode prefmode = { DRM_MODE("preferred", |
432 | DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, | 433 | DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, |
@@ -443,22 +444,32 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, | |||
443 | mode->hdisplay = ldu->pref_width; | 444 | mode->hdisplay = ldu->pref_width; |
444 | mode->vdisplay = ldu->pref_height; | 445 | mode->vdisplay = ldu->pref_height; |
445 | mode->vrefresh = drm_mode_vrefresh(mode); | 446 | mode->vrefresh = drm_mode_vrefresh(mode); |
446 | drm_mode_probed_add(connector, mode); | 447 | if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, |
448 | mode->vdisplay)) { | ||
449 | drm_mode_probed_add(connector, mode); | ||
447 | 450 | ||
448 | if (ldu->pref_mode) { | 451 | if (ldu->pref_mode) { |
449 | list_del_init(&ldu->pref_mode->head); | 452 | list_del_init(&ldu->pref_mode->head); |
450 | drm_mode_destroy(dev, ldu->pref_mode); | 453 | drm_mode_destroy(dev, ldu->pref_mode); |
451 | } | 454 | } |
452 | 455 | ||
453 | ldu->pref_mode = mode; | 456 | ldu->pref_mode = mode; |
457 | } | ||
454 | } | 458 | } |
455 | 459 | ||
456 | for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { | 460 | for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { |
457 | if (vmw_ldu_connector_builtin[i].hdisplay > max_width || | 461 | const struct drm_display_mode *bmode; |
458 | vmw_ldu_connector_builtin[i].vdisplay > max_height) | 462 | |
463 | bmode = &vmw_ldu_connector_builtin[i]; | ||
464 | if (bmode->hdisplay > max_width || | ||
465 | bmode->vdisplay > max_height) | ||
466 | continue; | ||
467 | |||
468 | if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2, | ||
469 | bmode->vdisplay)) | ||
459 | continue; | 470 | continue; |
460 | 471 | ||
461 | mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]); | 472 | mode = drm_mode_duplicate(dev, bmode); |
462 | if (!mode) | 473 | if (!mode) |
463 | return 0; | 474 | return 0; |
464 | mode->vrefresh = drm_mode_vrefresh(mode); | 475 | mode->vrefresh = drm_mode_vrefresh(mode); |
@@ -547,7 +558,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | |||
547 | return -EINVAL; | 558 | return -EINVAL; |
548 | } | 559 | } |
549 | 560 | ||
550 | dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv)); | 561 | dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL); |
551 | 562 | ||
552 | if (!dev_priv->ldu_priv) | 563 | if (!dev_priv->ldu_priv) |
553 | return -ENOMEM; | 564 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index df2036ed18d5..f1a52f9e7298 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | |||
@@ -585,7 +585,7 @@ int vmw_overlay_init(struct vmw_private *dev_priv) | |||
585 | return -ENOSYS; | 585 | return -ENOSYS; |
586 | } | 586 | } |
587 | 587 | ||
588 | overlay = kmalloc(GFP_KERNEL, sizeof(*overlay)); | 588 | overlay = kmalloc(sizeof(*overlay), GFP_KERNEL); |
589 | if (!overlay) | 589 | if (!overlay) |
590 | return -ENOMEM; | 590 | return -ENOMEM; |
591 | 591 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c8c40e9979db..5408b1b7996f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -765,28 +765,11 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob, | |||
765 | return bo_user_size + page_array_size; | 765 | return bo_user_size + page_array_size; |
766 | } | 766 | } |
767 | 767 | ||
768 | void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo) | ||
769 | { | ||
770 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
771 | struct ttm_bo_global *glob = bo->glob; | ||
772 | struct vmw_private *dev_priv = | ||
773 | container_of(bo->bdev, struct vmw_private, bdev); | ||
774 | |||
775 | if (vmw_bo->gmr_bound) { | ||
776 | vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); | ||
777 | spin_lock(&glob->lru_lock); | ||
778 | ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); | ||
779 | spin_unlock(&glob->lru_lock); | ||
780 | vmw_bo->gmr_bound = false; | ||
781 | } | ||
782 | } | ||
783 | |||
784 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) | 768 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) |
785 | { | 769 | { |
786 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | 770 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); |
787 | struct ttm_bo_global *glob = bo->glob; | 771 | struct ttm_bo_global *glob = bo->glob; |
788 | 772 | ||
789 | vmw_dmabuf_gmr_unbind(bo); | ||
790 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | 773 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); |
791 | kfree(vmw_bo); | 774 | kfree(vmw_bo); |
792 | } | 775 | } |
@@ -818,10 +801,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
818 | 801 | ||
819 | memset(vmw_bo, 0, sizeof(*vmw_bo)); | 802 | memset(vmw_bo, 0, sizeof(*vmw_bo)); |
820 | 803 | ||
821 | INIT_LIST_HEAD(&vmw_bo->gmr_lru); | ||
822 | INIT_LIST_HEAD(&vmw_bo->validate_list); | 804 | INIT_LIST_HEAD(&vmw_bo->validate_list); |
823 | vmw_bo->gmr_id = 0; | ||
824 | vmw_bo->gmr_bound = false; | ||
825 | 805 | ||
826 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, | 806 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
827 | ttm_bo_type_device, placement, | 807 | ttm_bo_type_device, placement, |
@@ -835,7 +815,6 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) | |||
835 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); | 815 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); |
836 | struct ttm_bo_global *glob = bo->glob; | 816 | struct ttm_bo_global *glob = bo->glob; |
837 | 817 | ||
838 | vmw_dmabuf_gmr_unbind(bo); | ||
839 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | 818 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); |
840 | kfree(vmw_user_bo); | 819 | kfree(vmw_user_bo); |
841 | } | 820 | } |
@@ -883,7 +862,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
883 | &vmw_vram_sys_placement, true, | 862 | &vmw_vram_sys_placement, true, |
884 | &vmw_user_dmabuf_destroy); | 863 | &vmw_user_dmabuf_destroy); |
885 | if (unlikely(ret != 0)) | 864 | if (unlikely(ret != 0)) |
886 | return ret; | 865 | goto out_no_dmabuf; |
887 | 866 | ||
888 | tmp = ttm_bo_reference(&vmw_user_bo->dma.base); | 867 | tmp = ttm_bo_reference(&vmw_user_bo->dma.base); |
889 | ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, | 868 | ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, |
@@ -891,19 +870,21 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
891 | false, | 870 | false, |
892 | ttm_buffer_type, | 871 | ttm_buffer_type, |
893 | &vmw_user_dmabuf_release, NULL); | 872 | &vmw_user_dmabuf_release, NULL); |
894 | if (unlikely(ret != 0)) { | 873 | if (unlikely(ret != 0)) |
895 | ttm_bo_unref(&tmp); | 874 | goto out_no_base_object; |
896 | } else { | 875 | else { |
897 | rep->handle = vmw_user_bo->base.hash.key; | 876 | rep->handle = vmw_user_bo->base.hash.key; |
898 | rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; | 877 | rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; |
899 | rep->cur_gmr_id = vmw_user_bo->base.hash.key; | 878 | rep->cur_gmr_id = vmw_user_bo->base.hash.key; |
900 | rep->cur_gmr_offset = 0; | 879 | rep->cur_gmr_offset = 0; |
901 | } | 880 | } |
902 | ttm_bo_unref(&tmp); | ||
903 | 881 | ||
882 | out_no_base_object: | ||
883 | ttm_bo_unref(&tmp); | ||
884 | out_no_dmabuf: | ||
904 | ttm_read_unlock(&vmaster->lock); | 885 | ttm_read_unlock(&vmaster->lock); |
905 | 886 | ||
906 | return 0; | 887 | return ret; |
907 | } | 888 | } |
908 | 889 | ||
909 | int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, | 890 | int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, |
@@ -938,25 +919,6 @@ void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo) | |||
938 | vmw_bo->on_validate_list = false; | 919 | vmw_bo->on_validate_list = false; |
939 | } | 920 | } |
940 | 921 | ||
941 | uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo) | ||
942 | { | ||
943 | struct vmw_dma_buffer *vmw_bo; | ||
944 | |||
945 | if (bo->mem.mem_type == TTM_PL_VRAM) | ||
946 | return SVGA_GMR_FRAMEBUFFER; | ||
947 | |||
948 | vmw_bo = vmw_dma_buffer(bo); | ||
949 | |||
950 | return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL; | ||
951 | } | ||
952 | |||
953 | void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id) | ||
954 | { | ||
955 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
956 | vmw_bo->gmr_bound = true; | ||
957 | vmw_bo->gmr_id = id; | ||
958 | } | ||
959 | |||
960 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | 922 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
961 | uint32_t handle, struct vmw_dma_buffer **out) | 923 | uint32_t handle, struct vmw_dma_buffer **out) |
962 | { | 924 | { |
@@ -985,41 +947,6 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | |||
985 | return 0; | 947 | return 0; |
986 | } | 948 | } |
987 | 949 | ||
988 | /** | ||
989 | * TODO: Implement a gmr id eviction mechanism. Currently we just fail | ||
990 | * when we're out of ids, causing GMR space to be allocated | ||
991 | * out of VRAM. | ||
992 | */ | ||
993 | |||
994 | int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id) | ||
995 | { | ||
996 | struct ttm_bo_global *glob = dev_priv->bdev.glob; | ||
997 | int id; | ||
998 | int ret; | ||
999 | |||
1000 | do { | ||
1001 | if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0)) | ||
1002 | return -ENOMEM; | ||
1003 | |||
1004 | spin_lock(&glob->lru_lock); | ||
1005 | ret = ida_get_new(&dev_priv->gmr_ida, &id); | ||
1006 | spin_unlock(&glob->lru_lock); | ||
1007 | } while (ret == -EAGAIN); | ||
1008 | |||
1009 | if (unlikely(ret != 0)) | ||
1010 | return ret; | ||
1011 | |||
1012 | if (unlikely(id >= dev_priv->max_gmr_ids)) { | ||
1013 | spin_lock(&glob->lru_lock); | ||
1014 | ida_remove(&dev_priv->gmr_ida, id); | ||
1015 | spin_unlock(&glob->lru_lock); | ||
1016 | return -EBUSY; | ||
1017 | } | ||
1018 | |||
1019 | *p_id = (uint32_t) id; | ||
1020 | return 0; | ||
1021 | } | ||
1022 | |||
1023 | /* | 950 | /* |
1024 | * Stream management | 951 | * Stream management |
1025 | */ | 952 | */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index 83123287c60c..1e8eedd901e0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c | |||
@@ -39,7 +39,7 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) | |||
39 | return drm_mmap(filp, vma); | 39 | return drm_mmap(filp, vma); |
40 | } | 40 | } |
41 | 41 | ||
42 | file_priv = (struct drm_file *)filp->private_data; | 42 | file_priv = filp->private_data; |
43 | dev_priv = vmw_priv(file_priv->minor->dev); | 43 | dev_priv = vmw_priv(file_priv->minor->dev); |
44 | return ttm_bo_mmap(filp, vma, &dev_priv->bdev); | 44 | return ttm_bo_mmap(filp, vma, &dev_priv->bdev); |
45 | } | 45 | } |