diff options
author | Dave Airlie <airlied@redhat.com> | 2014-01-19 19:03:27 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2014-01-19 19:03:27 -0500 |
commit | 9354eafd893f45320a37da360e1728104e49cc2f (patch) | |
tree | 8cd82ac2ff70ea3a9fd97b432f10c880b1d97a4c /drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |
parent | 53dac830537b51df555ba5e7ebb236705b7eaa7c (diff) | |
parent | 1985f99987ff04e1bb0405101dd8e25cf1b6b037 (diff) |
Merge tag 'vmwgfx-next-2014-01-17' of git://people.freedesktop.org/~thomash/linux into drm-next
Pull request of 2014-01-17
Pull request for 3.14. One not so urgent fix, One huge device update.
The pull request corresponds to the patches sent out on dri-devel, except:
[PATCH 02/33], review tag typo pointed out by Matt Turner.
[PATCH 04/33], dropped. The new surface formats are never used.
The upcoming vmware svga2 hardware version 11 will introduce the concept
of "guest backed objects" or -resources. The device will in principle
get all
of its memory from the guest, which has big advantages from the device
point of view.
This means that vmwgfx contexts, shaders and surfaces need to be backed
by guest memory in the form of buffer objects called MOBs, presumably
short for MemoryOBjects, which are bound to the device in a special way.
This patch series introduces guest backed object support. Some new IOCTLs
are added to allocate these new guest backed object, and to optionally
provide
them with a backing MOB.
There is an update to the gallium driver that comes with this update, and
it will be pushed in the near timeframe presumably to a separate mesa branch
before merged to master.
* tag 'vmwgfx-next-2014-01-17' of git://people.freedesktop.org/~thomash/linux: (33 commits)
drm/vmwgfx: Invalidate surface on non-readback unbind
drm/vmwgfx: Silence the device command verifier
drm/vmwgfx: Implement 64-bit Otable- and MOB binding v2
drm/vmwgfx: Fix surface framebuffer check for guest-backed surfaces
drm/vmwgfx: Update otable definitions
drm/vmwgfx: Use the linux DMA api also for MOBs
drm/vmwgfx: Ditch the vmw_dummy_query_bo_prepare function
drm/vmwgfx: Persistent tracking of context bindings
drm/vmwgfx: Track context bindings and scrub them upon exiting execbuf
drm/vmwgfx: Block the BIND_SHADERCONSTS command
drm/vmwgfx: Add a parameter to get max MOB memory size
drm/vmwgfx: Implement a buffer object synccpu ioctl.
drm/vmwgfx: Make sure that the multisampling is off
drm/vmwgfx: Extend the command verifier to handle guest-backed on / off
drm/vmwgfx: Fix up the vmwgfx_drv.h header for new files
drm/vmwgfx: Enable 3D for new hardware version
drm/vmwgfx: Add new unused (by user-space) commands to the verifier
drm/vmwgfx: Validate guest-backed shader const commands
drm/vmwgfx: Add guest-backed shaders
drm/vmwgfx: Hook up guest-backed surfaces
...
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_drv.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 209 |
1 files changed, 154 insertions, 55 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index c7a549694e59..078b9b0d2dfe 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -112,6 +112,21 @@ | |||
112 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ | 112 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ |
113 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ | 113 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ |
114 | struct drm_vmw_update_layout_arg) | 114 | struct drm_vmw_update_layout_arg) |
115 | #define DRM_IOCTL_VMW_CREATE_SHADER \ | ||
116 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ | ||
117 | struct drm_vmw_shader_create_arg) | ||
118 | #define DRM_IOCTL_VMW_UNREF_SHADER \ | ||
119 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ | ||
120 | struct drm_vmw_shader_arg) | ||
121 | #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ | ||
122 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ | ||
123 | union drm_vmw_gb_surface_create_arg) | ||
124 | #define DRM_IOCTL_VMW_GB_SURFACE_REF \ | ||
125 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ | ||
126 | union drm_vmw_gb_surface_reference_arg) | ||
127 | #define DRM_IOCTL_VMW_SYNCCPU \ | ||
128 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ | ||
129 | struct drm_vmw_synccpu_arg) | ||
115 | 130 | ||
116 | /** | 131 | /** |
117 | * The core DRM version of this macro doesn't account for | 132 | * The core DRM version of this macro doesn't account for |
@@ -177,6 +192,21 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { | |||
177 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, | 192 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, |
178 | vmw_kms_update_layout_ioctl, | 193 | vmw_kms_update_layout_ioctl, |
179 | DRM_MASTER | DRM_UNLOCKED), | 194 | DRM_MASTER | DRM_UNLOCKED), |
195 | VMW_IOCTL_DEF(VMW_CREATE_SHADER, | ||
196 | vmw_shader_define_ioctl, | ||
197 | DRM_AUTH | DRM_UNLOCKED), | ||
198 | VMW_IOCTL_DEF(VMW_UNREF_SHADER, | ||
199 | vmw_shader_destroy_ioctl, | ||
200 | DRM_AUTH | DRM_UNLOCKED), | ||
201 | VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, | ||
202 | vmw_gb_surface_define_ioctl, | ||
203 | DRM_AUTH | DRM_UNLOCKED), | ||
204 | VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, | ||
205 | vmw_gb_surface_reference_ioctl, | ||
206 | DRM_AUTH | DRM_UNLOCKED), | ||
207 | VMW_IOCTL_DEF(VMW_SYNCCPU, | ||
208 | vmw_user_dmabuf_synccpu_ioctl, | ||
209 | DRM_AUTH | DRM_UNLOCKED), | ||
180 | }; | 210 | }; |
181 | 211 | ||
182 | static struct pci_device_id vmw_pci_id_list[] = { | 212 | static struct pci_device_id vmw_pci_id_list[] = { |
@@ -189,6 +219,7 @@ static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); | |||
189 | static int vmw_force_iommu; | 219 | static int vmw_force_iommu; |
190 | static int vmw_restrict_iommu; | 220 | static int vmw_restrict_iommu; |
191 | static int vmw_force_coherent; | 221 | static int vmw_force_coherent; |
222 | static int vmw_restrict_dma_mask; | ||
192 | 223 | ||
193 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); | 224 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
194 | static void vmw_master_init(struct vmw_master *); | 225 | static void vmw_master_init(struct vmw_master *); |
@@ -203,6 +234,8 @@ MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); | |||
203 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); | 234 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); |
204 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); | 235 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); |
205 | module_param_named(force_coherent, vmw_force_coherent, int, 0600); | 236 | module_param_named(force_coherent, vmw_force_coherent, int, 0600); |
237 | MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); | ||
238 | module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); | ||
206 | 239 | ||
207 | 240 | ||
208 | static void vmw_print_capabilities(uint32_t capabilities) | 241 | static void vmw_print_capabilities(uint32_t capabilities) |
@@ -240,38 +273,52 @@ static void vmw_print_capabilities(uint32_t capabilities) | |||
240 | DRM_INFO(" GMR2.\n"); | 273 | DRM_INFO(" GMR2.\n"); |
241 | if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) | 274 | if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) |
242 | DRM_INFO(" Screen Object 2.\n"); | 275 | DRM_INFO(" Screen Object 2.\n"); |
276 | if (capabilities & SVGA_CAP_COMMAND_BUFFERS) | ||
277 | DRM_INFO(" Command Buffers.\n"); | ||
278 | if (capabilities & SVGA_CAP_CMD_BUFFERS_2) | ||
279 | DRM_INFO(" Command Buffers 2.\n"); | ||
280 | if (capabilities & SVGA_CAP_GBOBJECTS) | ||
281 | DRM_INFO(" Guest Backed Resources.\n"); | ||
243 | } | 282 | } |
244 | 283 | ||
245 | |||
246 | /** | 284 | /** |
247 | * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at | 285 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result |
248 | * the start of a buffer object. | ||
249 | * | 286 | * |
250 | * @dev_priv: The device private structure. | 287 | * @dev_priv: A device private structure. |
251 | * | 288 | * |
252 | * This function will idle the buffer using an uninterruptible wait, then | 289 | * This function creates a small buffer object that holds the query |
253 | * map the first page and initialize a pending occlusion query result structure, | 290 | * result for dummy queries emitted as query barriers. |
254 | * Finally it will unmap the buffer. | 291 | * The function will then map the first page and initialize a pending |
292 | * occlusion query result structure, Finally it will unmap the buffer. | ||
293 | * No interruptible waits are done within this function. | ||
255 | * | 294 | * |
256 | * TODO: Since we're only mapping a single page, we should optimize the map | 295 | * Returns an error if bo creation or initialization fails. |
257 | * to use kmap_atomic / iomap_atomic. | ||
258 | */ | 296 | */ |
259 | static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) | 297 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) |
260 | { | 298 | { |
299 | int ret; | ||
300 | struct ttm_buffer_object *bo; | ||
261 | struct ttm_bo_kmap_obj map; | 301 | struct ttm_bo_kmap_obj map; |
262 | volatile SVGA3dQueryResult *result; | 302 | volatile SVGA3dQueryResult *result; |
263 | bool dummy; | 303 | bool dummy; |
264 | int ret; | ||
265 | struct ttm_bo_device *bdev = &dev_priv->bdev; | ||
266 | struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; | ||
267 | 304 | ||
268 | ttm_bo_reserve(bo, false, false, false, 0); | 305 | /* |
269 | spin_lock(&bdev->fence_lock); | 306 | * Create the bo as pinned, so that a tryreserve will |
270 | ret = ttm_bo_wait(bo, false, false, false); | 307 | * immediately succeed. This is because we're the only |
271 | spin_unlock(&bdev->fence_lock); | 308 | * user of the bo currently. |
309 | */ | ||
310 | ret = ttm_bo_create(&dev_priv->bdev, | ||
311 | PAGE_SIZE, | ||
312 | ttm_bo_type_device, | ||
313 | &vmw_sys_ne_placement, | ||
314 | 0, false, NULL, | ||
315 | &bo); | ||
316 | |||
272 | if (unlikely(ret != 0)) | 317 | if (unlikely(ret != 0)) |
273 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, | 318 | return ret; |
274 | 10*HZ); | 319 | |
320 | ret = ttm_bo_reserve(bo, false, true, false, 0); | ||
321 | BUG_ON(ret != 0); | ||
275 | 322 | ||
276 | ret = ttm_bo_kmap(bo, 0, 1, &map); | 323 | ret = ttm_bo_kmap(bo, 0, 1, &map); |
277 | if (likely(ret == 0)) { | 324 | if (likely(ret == 0)) { |
@@ -280,34 +327,19 @@ static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) | |||
280 | result->state = SVGA3D_QUERYSTATE_PENDING; | 327 | result->state = SVGA3D_QUERYSTATE_PENDING; |
281 | result->result32 = 0xff; | 328 | result->result32 = 0xff; |
282 | ttm_bo_kunmap(&map); | 329 | ttm_bo_kunmap(&map); |
283 | } else | 330 | } |
284 | DRM_ERROR("Dummy query buffer map failed.\n"); | 331 | vmw_bo_pin(bo, false); |
285 | ttm_bo_unreserve(bo); | 332 | ttm_bo_unreserve(bo); |
286 | } | ||
287 | 333 | ||
334 | if (unlikely(ret != 0)) { | ||
335 | DRM_ERROR("Dummy query buffer map failed.\n"); | ||
336 | ttm_bo_unref(&bo); | ||
337 | } else | ||
338 | dev_priv->dummy_query_bo = bo; | ||
288 | 339 | ||
289 | /** | 340 | return ret; |
290 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result | ||
291 | * | ||
292 | * @dev_priv: A device private structure. | ||
293 | * | ||
294 | * This function creates a small buffer object that holds the query | ||
295 | * result for dummy queries emitted as query barriers. | ||
296 | * No interruptible waits are done within this function. | ||
297 | * | ||
298 | * Returns an error if bo creation fails. | ||
299 | */ | ||
300 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) | ||
301 | { | ||
302 | return ttm_bo_create(&dev_priv->bdev, | ||
303 | PAGE_SIZE, | ||
304 | ttm_bo_type_device, | ||
305 | &vmw_vram_sys_placement, | ||
306 | 0, false, NULL, | ||
307 | &dev_priv->dummy_query_bo); | ||
308 | } | 341 | } |
309 | 342 | ||
310 | |||
311 | static int vmw_request_device(struct vmw_private *dev_priv) | 343 | static int vmw_request_device(struct vmw_private *dev_priv) |
312 | { | 344 | { |
313 | int ret; | 345 | int ret; |
@@ -318,14 +350,24 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
318 | return ret; | 350 | return ret; |
319 | } | 351 | } |
320 | vmw_fence_fifo_up(dev_priv->fman); | 352 | vmw_fence_fifo_up(dev_priv->fman); |
353 | if (dev_priv->has_mob) { | ||
354 | ret = vmw_otables_setup(dev_priv); | ||
355 | if (unlikely(ret != 0)) { | ||
356 | DRM_ERROR("Unable to initialize " | ||
357 | "guest Memory OBjects.\n"); | ||
358 | goto out_no_mob; | ||
359 | } | ||
360 | } | ||
321 | ret = vmw_dummy_query_bo_create(dev_priv); | 361 | ret = vmw_dummy_query_bo_create(dev_priv); |
322 | if (unlikely(ret != 0)) | 362 | if (unlikely(ret != 0)) |
323 | goto out_no_query_bo; | 363 | goto out_no_query_bo; |
324 | vmw_dummy_query_bo_prepare(dev_priv); | ||
325 | 364 | ||
326 | return 0; | 365 | return 0; |
327 | 366 | ||
328 | out_no_query_bo: | 367 | out_no_query_bo: |
368 | if (dev_priv->has_mob) | ||
369 | vmw_otables_takedown(dev_priv); | ||
370 | out_no_mob: | ||
329 | vmw_fence_fifo_down(dev_priv->fman); | 371 | vmw_fence_fifo_down(dev_priv->fman); |
330 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 372 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
331 | return ret; | 373 | return ret; |
@@ -341,10 +383,13 @@ static void vmw_release_device(struct vmw_private *dev_priv) | |||
341 | BUG_ON(dev_priv->pinned_bo != NULL); | 383 | BUG_ON(dev_priv->pinned_bo != NULL); |
342 | 384 | ||
343 | ttm_bo_unref(&dev_priv->dummy_query_bo); | 385 | ttm_bo_unref(&dev_priv->dummy_query_bo); |
386 | if (dev_priv->has_mob) | ||
387 | vmw_otables_takedown(dev_priv); | ||
344 | vmw_fence_fifo_down(dev_priv->fman); | 388 | vmw_fence_fifo_down(dev_priv->fman); |
345 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 389 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
346 | } | 390 | } |
347 | 391 | ||
392 | |||
348 | /** | 393 | /** |
349 | * Increase the 3d resource refcount. | 394 | * Increase the 3d resource refcount. |
350 | * If the count was prevously zero, initialize the fifo, switching to svga | 395 | * If the count was prevously zero, initialize the fifo, switching to svga |
@@ -510,6 +555,33 @@ out_fixup: | |||
510 | return 0; | 555 | return 0; |
511 | } | 556 | } |
512 | 557 | ||
558 | /** | ||
559 | * vmw_dma_masks - set required page- and dma masks | ||
560 | * | ||
561 | * @dev: Pointer to struct drm-device | ||
562 | * | ||
563 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that | ||
564 | * restriction also for 64-bit systems. | ||
565 | */ | ||
566 | #ifdef CONFIG_INTEL_IOMMU | ||
567 | static int vmw_dma_masks(struct vmw_private *dev_priv) | ||
568 | { | ||
569 | struct drm_device *dev = dev_priv->dev; | ||
570 | |||
571 | if (intel_iommu_enabled && | ||
572 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { | ||
573 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); | ||
574 | return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); | ||
575 | } | ||
576 | return 0; | ||
577 | } | ||
578 | #else | ||
579 | static int vmw_dma_masks(struct vmw_private *dev_priv) | ||
580 | { | ||
581 | return 0; | ||
582 | } | ||
583 | #endif | ||
584 | |||
513 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | 585 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
514 | { | 586 | { |
515 | struct vmw_private *dev_priv; | 587 | struct vmw_private *dev_priv; |
@@ -532,6 +604,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
532 | mutex_init(&dev_priv->hw_mutex); | 604 | mutex_init(&dev_priv->hw_mutex); |
533 | mutex_init(&dev_priv->cmdbuf_mutex); | 605 | mutex_init(&dev_priv->cmdbuf_mutex); |
534 | mutex_init(&dev_priv->release_mutex); | 606 | mutex_init(&dev_priv->release_mutex); |
607 | mutex_init(&dev_priv->binding_mutex); | ||
535 | rwlock_init(&dev_priv->resource_lock); | 608 | rwlock_init(&dev_priv->resource_lock); |
536 | 609 | ||
537 | for (i = vmw_res_context; i < vmw_res_max; ++i) { | 610 | for (i = vmw_res_context; i < vmw_res_max; ++i) { |
@@ -578,14 +651,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
578 | 651 | ||
579 | vmw_get_initial_size(dev_priv); | 652 | vmw_get_initial_size(dev_priv); |
580 | 653 | ||
581 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | 654 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
582 | dev_priv->max_gmr_descriptors = | ||
583 | vmw_read(dev_priv, | ||
584 | SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); | ||
585 | dev_priv->max_gmr_ids = | 655 | dev_priv->max_gmr_ids = |
586 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); | 656 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); |
587 | } | ||
588 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { | ||
589 | dev_priv->max_gmr_pages = | 657 | dev_priv->max_gmr_pages = |
590 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); | 658 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); |
591 | dev_priv->memory_size = | 659 | dev_priv->memory_size = |
@@ -598,23 +666,40 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
598 | */ | 666 | */ |
599 | dev_priv->memory_size = 512*1024*1024; | 667 | dev_priv->memory_size = 512*1024*1024; |
600 | } | 668 | } |
669 | dev_priv->max_mob_pages = 0; | ||
670 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
671 | uint64_t mem_size = | ||
672 | vmw_read(dev_priv, | ||
673 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); | ||
674 | |||
675 | dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; | ||
676 | dev_priv->prim_bb_mem = | ||
677 | vmw_read(dev_priv, | ||
678 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); | ||
679 | } else | ||
680 | dev_priv->prim_bb_mem = dev_priv->vram_size; | ||
681 | |||
682 | ret = vmw_dma_masks(dev_priv); | ||
683 | if (unlikely(ret != 0)) | ||
684 | goto out_err0; | ||
685 | |||
686 | if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size)) | ||
687 | dev_priv->prim_bb_mem = dev_priv->vram_size; | ||
601 | 688 | ||
602 | mutex_unlock(&dev_priv->hw_mutex); | 689 | mutex_unlock(&dev_priv->hw_mutex); |
603 | 690 | ||
604 | vmw_print_capabilities(dev_priv->capabilities); | 691 | vmw_print_capabilities(dev_priv->capabilities); |
605 | 692 | ||
606 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | 693 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
607 | DRM_INFO("Max GMR ids is %u\n", | 694 | DRM_INFO("Max GMR ids is %u\n", |
608 | (unsigned)dev_priv->max_gmr_ids); | 695 | (unsigned)dev_priv->max_gmr_ids); |
609 | DRM_INFO("Max GMR descriptors is %u\n", | ||
610 | (unsigned)dev_priv->max_gmr_descriptors); | ||
611 | } | ||
612 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { | ||
613 | DRM_INFO("Max number of GMR pages is %u\n", | 696 | DRM_INFO("Max number of GMR pages is %u\n", |
614 | (unsigned)dev_priv->max_gmr_pages); | 697 | (unsigned)dev_priv->max_gmr_pages); |
615 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", | 698 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", |
616 | (unsigned)dev_priv->memory_size / 1024); | 699 | (unsigned)dev_priv->memory_size / 1024); |
617 | } | 700 | } |
701 | DRM_INFO("Maximum display memory size is %u kiB\n", | ||
702 | dev_priv->prim_bb_mem / 1024); | ||
618 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", | 703 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", |
619 | dev_priv->vram_start, dev_priv->vram_size / 1024); | 704 | dev_priv->vram_start, dev_priv->vram_size / 1024); |
620 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", | 705 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", |
@@ -649,12 +734,22 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
649 | dev_priv->has_gmr = true; | 734 | dev_priv->has_gmr = true; |
650 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || | 735 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || |
651 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | 736 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, |
652 | dev_priv->max_gmr_ids) != 0) { | 737 | VMW_PL_GMR) != 0) { |
653 | DRM_INFO("No GMR memory available. " | 738 | DRM_INFO("No GMR memory available. " |
654 | "Graphics memory resources are very limited.\n"); | 739 | "Graphics memory resources are very limited.\n"); |
655 | dev_priv->has_gmr = false; | 740 | dev_priv->has_gmr = false; |
656 | } | 741 | } |
657 | 742 | ||
743 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
744 | dev_priv->has_mob = true; | ||
745 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, | ||
746 | VMW_PL_MOB) != 0) { | ||
747 | DRM_INFO("No MOB memory available. " | ||
748 | "3D will be disabled.\n"); | ||
749 | dev_priv->has_mob = false; | ||
750 | } | ||
751 | } | ||
752 | |||
658 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, | 753 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, |
659 | dev_priv->mmio_size); | 754 | dev_priv->mmio_size); |
660 | 755 | ||
@@ -757,6 +852,8 @@ out_err4: | |||
757 | iounmap(dev_priv->mmio_virt); | 852 | iounmap(dev_priv->mmio_virt); |
758 | out_err3: | 853 | out_err3: |
759 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 854 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
855 | if (dev_priv->has_mob) | ||
856 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
760 | if (dev_priv->has_gmr) | 857 | if (dev_priv->has_gmr) |
761 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | 858 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
762 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | 859 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
@@ -801,6 +898,8 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
801 | ttm_object_device_release(&dev_priv->tdev); | 898 | ttm_object_device_release(&dev_priv->tdev); |
802 | iounmap(dev_priv->mmio_virt); | 899 | iounmap(dev_priv->mmio_virt); |
803 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 900 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
901 | if (dev_priv->has_mob) | ||
902 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
804 | if (dev_priv->has_gmr) | 903 | if (dev_priv->has_gmr) |
805 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | 904 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
806 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | 905 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |