diff options
author | Dave Airlie <airlied@redhat.com> | 2014-01-19 19:03:27 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2014-01-19 19:03:27 -0500 |
commit | 9354eafd893f45320a37da360e1728104e49cc2f (patch) | |
tree | 8cd82ac2ff70ea3a9fd97b432f10c880b1d97a4c /drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |
parent | 53dac830537b51df555ba5e7ebb236705b7eaa7c (diff) | |
parent | 1985f99987ff04e1bb0405101dd8e25cf1b6b037 (diff) |
Merge tag 'vmwgfx-next-2014-01-17' of git://people.freedesktop.org/~thomash/linux into drm-next
Pull request of 2014-01-17
Pull request for 3.14. One not so urgent fix, One huge device update.
The pull request corresponds to the patches sent out on dri-devel, except:
[PATCH 02/33], review tag typo pointed out by Matt Turner.
[PATCH 04/33], dropped. The new surface formats are never used.
The upcoming vmware svga2 hardware version 11 will introduce the concept
of "guest backed objects" or -resources. The device will in principle
get all
of its memory from the guest, which has big advantages from the device
point of view.
This means that vmwgfx contexts, shaders and surfaces need to be backed
by guest memory in the form of buffer objects called MOBs, presumably
short for MemoryOBjects, which are bound to the device in a special way.
This patch series introduces guest backed object support. Some new IOCTLs
are added to allocate these new guest backed object, and to optionally
provide
them with a backing MOB.
There is an update to the gallium driver that comes with this update, and
it will be pushed in the near timeframe presumably to a separate mesa branch
before merged to master.
* tag 'vmwgfx-next-2014-01-17' of git://people.freedesktop.org/~thomash/linux: (33 commits)
drm/vmwgfx: Invalidate surface on non-readback unbind
drm/vmwgfx: Silence the device command verifier
drm/vmwgfx: Implement 64-bit Otable- and MOB binding v2
drm/vmwgfx: Fix surface framebuffer check for guest-backed surfaces
drm/vmwgfx: Update otable definitions
drm/vmwgfx: Use the linux DMA api also for MOBs
drm/vmwgfx: Ditch the vmw_dummy_query_bo_prepare function
drm/vmwgfx: Persistent tracking of context bindings
drm/vmwgfx: Track context bindings and scrub them upon exiting execbuf
drm/vmwgfx: Block the BIND_SHADERCONSTS command
drm/vmwgfx: Add a parameter to get max MOB memory size
drm/vmwgfx: Implement a buffer object synccpu ioctl.
drm/vmwgfx: Make sure that the multisampling is off
drm/vmwgfx: Extend the command verifier to handle guest-backed on / off
drm/vmwgfx: Fix up the vmwgfx_drv.h header for new files
drm/vmwgfx: Enable 3D for new hardware version
drm/vmwgfx: Add new unused (by user-space) commands to the verifier
drm/vmwgfx: Validate guest-backed shader const commands
drm/vmwgfx: Add guest-backed shaders
drm/vmwgfx: Hook up guest-backed surfaces
...
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 174 |
1 files changed, 167 insertions, 7 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 2d61a2d86bd7..6327cfc36805 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -40,6 +40,10 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | | |||
40 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | | 40 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | |
41 | TTM_PL_FLAG_CACHED; | 41 | TTM_PL_FLAG_CACHED; |
42 | 42 | ||
43 | static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM | | ||
44 | TTM_PL_FLAG_CACHED | | ||
45 | TTM_PL_FLAG_NO_EVICT; | ||
46 | |||
43 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | | 47 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | |
44 | TTM_PL_FLAG_CACHED; | 48 | TTM_PL_FLAG_CACHED; |
45 | 49 | ||
@@ -47,6 +51,9 @@ static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | | |||
47 | TTM_PL_FLAG_CACHED | | 51 | TTM_PL_FLAG_CACHED | |
48 | TTM_PL_FLAG_NO_EVICT; | 52 | TTM_PL_FLAG_NO_EVICT; |
49 | 53 | ||
54 | static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB | | ||
55 | TTM_PL_FLAG_CACHED; | ||
56 | |||
50 | struct ttm_placement vmw_vram_placement = { | 57 | struct ttm_placement vmw_vram_placement = { |
51 | .fpfn = 0, | 58 | .fpfn = 0, |
52 | .lpfn = 0, | 59 | .lpfn = 0, |
@@ -116,16 +123,26 @@ struct ttm_placement vmw_sys_placement = { | |||
116 | .busy_placement = &sys_placement_flags | 123 | .busy_placement = &sys_placement_flags |
117 | }; | 124 | }; |
118 | 125 | ||
126 | struct ttm_placement vmw_sys_ne_placement = { | ||
127 | .fpfn = 0, | ||
128 | .lpfn = 0, | ||
129 | .num_placement = 1, | ||
130 | .placement = &sys_ne_placement_flags, | ||
131 | .num_busy_placement = 1, | ||
132 | .busy_placement = &sys_ne_placement_flags | ||
133 | }; | ||
134 | |||
119 | static uint32_t evictable_placement_flags[] = { | 135 | static uint32_t evictable_placement_flags[] = { |
120 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, | 136 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, |
121 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, | 137 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, |
122 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | 138 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, |
139 | VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | ||
123 | }; | 140 | }; |
124 | 141 | ||
125 | struct ttm_placement vmw_evictable_placement = { | 142 | struct ttm_placement vmw_evictable_placement = { |
126 | .fpfn = 0, | 143 | .fpfn = 0, |
127 | .lpfn = 0, | 144 | .lpfn = 0, |
128 | .num_placement = 3, | 145 | .num_placement = 4, |
129 | .placement = evictable_placement_flags, | 146 | .placement = evictable_placement_flags, |
130 | .num_busy_placement = 1, | 147 | .num_busy_placement = 1, |
131 | .busy_placement = &sys_placement_flags | 148 | .busy_placement = &sys_placement_flags |
@@ -140,10 +157,21 @@ struct ttm_placement vmw_srf_placement = { | |||
140 | .busy_placement = gmr_vram_placement_flags | 157 | .busy_placement = gmr_vram_placement_flags |
141 | }; | 158 | }; |
142 | 159 | ||
160 | struct ttm_placement vmw_mob_placement = { | ||
161 | .fpfn = 0, | ||
162 | .lpfn = 0, | ||
163 | .num_placement = 1, | ||
164 | .num_busy_placement = 1, | ||
165 | .placement = &mob_placement_flags, | ||
166 | .busy_placement = &mob_placement_flags | ||
167 | }; | ||
168 | |||
143 | struct vmw_ttm_tt { | 169 | struct vmw_ttm_tt { |
144 | struct ttm_dma_tt dma_ttm; | 170 | struct ttm_dma_tt dma_ttm; |
145 | struct vmw_private *dev_priv; | 171 | struct vmw_private *dev_priv; |
146 | int gmr_id; | 172 | int gmr_id; |
173 | struct vmw_mob *mob; | ||
174 | int mem_type; | ||
147 | struct sg_table sgt; | 175 | struct sg_table sgt; |
148 | struct vmw_sg_table vsgt; | 176 | struct vmw_sg_table vsgt; |
149 | uint64_t sg_alloc_size; | 177 | uint64_t sg_alloc_size; |
@@ -244,6 +272,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, | |||
244 | viter->dma_address = &__vmw_piter_dma_addr; | 272 | viter->dma_address = &__vmw_piter_dma_addr; |
245 | viter->page = &__vmw_piter_non_sg_page; | 273 | viter->page = &__vmw_piter_non_sg_page; |
246 | viter->addrs = vsgt->addrs; | 274 | viter->addrs = vsgt->addrs; |
275 | viter->pages = vsgt->pages; | ||
247 | break; | 276 | break; |
248 | case vmw_dma_map_populate: | 277 | case vmw_dma_map_populate: |
249 | case vmw_dma_map_bind: | 278 | case vmw_dma_map_bind: |
@@ -424,6 +453,63 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) | |||
424 | vmw_tt->mapped = false; | 453 | vmw_tt->mapped = false; |
425 | } | 454 | } |
426 | 455 | ||
456 | |||
457 | /** | ||
458 | * vmw_bo_map_dma - Make sure buffer object pages are visible to the device | ||
459 | * | ||
460 | * @bo: Pointer to a struct ttm_buffer_object | ||
461 | * | ||
462 | * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer | ||
463 | * instead of a pointer to a struct vmw_ttm_backend as argument. | ||
464 | * Note that the buffer object must be either pinned or reserved before | ||
465 | * calling this function. | ||
466 | */ | ||
467 | int vmw_bo_map_dma(struct ttm_buffer_object *bo) | ||
468 | { | ||
469 | struct vmw_ttm_tt *vmw_tt = | ||
470 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | ||
471 | |||
472 | return vmw_ttm_map_dma(vmw_tt); | ||
473 | } | ||
474 | |||
475 | |||
476 | /** | ||
477 | * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device | ||
478 | * | ||
479 | * @bo: Pointer to a struct ttm_buffer_object | ||
480 | * | ||
481 | * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer | ||
482 | * instead of a pointer to a struct vmw_ttm_backend as argument. | ||
483 | */ | ||
484 | void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) | ||
485 | { | ||
486 | struct vmw_ttm_tt *vmw_tt = | ||
487 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | ||
488 | |||
489 | vmw_ttm_unmap_dma(vmw_tt); | ||
490 | } | ||
491 | |||
492 | |||
493 | /** | ||
494 | * vmw_bo_sg_table - Return a struct vmw_sg_table object for a | ||
495 | * TTM buffer object | ||
496 | * | ||
497 | * @bo: Pointer to a struct ttm_buffer_object | ||
498 | * | ||
499 | * Returns a pointer to a struct vmw_sg_table object. The object should | ||
500 | * not be freed after use. | ||
501 | * Note that for the device addresses to be valid, the buffer object must | ||
502 | * either be reserved or pinned. | ||
503 | */ | ||
504 | const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) | ||
505 | { | ||
506 | struct vmw_ttm_tt *vmw_tt = | ||
507 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | ||
508 | |||
509 | return &vmw_tt->vsgt; | ||
510 | } | ||
511 | |||
512 | |||
427 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | 513 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) |
428 | { | 514 | { |
429 | struct vmw_ttm_tt *vmw_be = | 515 | struct vmw_ttm_tt *vmw_be = |
@@ -435,9 +521,27 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | |||
435 | return ret; | 521 | return ret; |
436 | 522 | ||
437 | vmw_be->gmr_id = bo_mem->start; | 523 | vmw_be->gmr_id = bo_mem->start; |
524 | vmw_be->mem_type = bo_mem->mem_type; | ||
525 | |||
526 | switch (bo_mem->mem_type) { | ||
527 | case VMW_PL_GMR: | ||
528 | return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, | ||
529 | ttm->num_pages, vmw_be->gmr_id); | ||
530 | case VMW_PL_MOB: | ||
531 | if (unlikely(vmw_be->mob == NULL)) { | ||
532 | vmw_be->mob = | ||
533 | vmw_mob_create(ttm->num_pages); | ||
534 | if (unlikely(vmw_be->mob == NULL)) | ||
535 | return -ENOMEM; | ||
536 | } | ||
438 | 537 | ||
439 | return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, | 538 | return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, |
440 | ttm->num_pages, vmw_be->gmr_id); | 539 | &vmw_be->vsgt, ttm->num_pages, |
540 | vmw_be->gmr_id); | ||
541 | default: | ||
542 | BUG(); | ||
543 | } | ||
544 | return 0; | ||
441 | } | 545 | } |
442 | 546 | ||
443 | static int vmw_ttm_unbind(struct ttm_tt *ttm) | 547 | static int vmw_ttm_unbind(struct ttm_tt *ttm) |
@@ -445,7 +549,16 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm) | |||
445 | struct vmw_ttm_tt *vmw_be = | 549 | struct vmw_ttm_tt *vmw_be = |
446 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); | 550 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
447 | 551 | ||
448 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); | 552 | switch (vmw_be->mem_type) { |
553 | case VMW_PL_GMR: | ||
554 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); | ||
555 | break; | ||
556 | case VMW_PL_MOB: | ||
557 | vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); | ||
558 | break; | ||
559 | default: | ||
560 | BUG(); | ||
561 | } | ||
449 | 562 | ||
450 | if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) | 563 | if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) |
451 | vmw_ttm_unmap_dma(vmw_be); | 564 | vmw_ttm_unmap_dma(vmw_be); |
@@ -453,6 +566,7 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm) | |||
453 | return 0; | 566 | return 0; |
454 | } | 567 | } |
455 | 568 | ||
569 | |||
456 | static void vmw_ttm_destroy(struct ttm_tt *ttm) | 570 | static void vmw_ttm_destroy(struct ttm_tt *ttm) |
457 | { | 571 | { |
458 | struct vmw_ttm_tt *vmw_be = | 572 | struct vmw_ttm_tt *vmw_be = |
@@ -463,9 +577,14 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm) | |||
463 | ttm_dma_tt_fini(&vmw_be->dma_ttm); | 577 | ttm_dma_tt_fini(&vmw_be->dma_ttm); |
464 | else | 578 | else |
465 | ttm_tt_fini(ttm); | 579 | ttm_tt_fini(ttm); |
580 | |||
581 | if (vmw_be->mob) | ||
582 | vmw_mob_destroy(vmw_be->mob); | ||
583 | |||
466 | kfree(vmw_be); | 584 | kfree(vmw_be); |
467 | } | 585 | } |
468 | 586 | ||
587 | |||
469 | static int vmw_ttm_populate(struct ttm_tt *ttm) | 588 | static int vmw_ttm_populate(struct ttm_tt *ttm) |
470 | { | 589 | { |
471 | struct vmw_ttm_tt *vmw_tt = | 590 | struct vmw_ttm_tt *vmw_tt = |
@@ -500,6 +619,12 @@ static void vmw_ttm_unpopulate(struct ttm_tt *ttm) | |||
500 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | 619 | struct vmw_private *dev_priv = vmw_tt->dev_priv; |
501 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); | 620 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
502 | 621 | ||
622 | |||
623 | if (vmw_tt->mob) { | ||
624 | vmw_mob_destroy(vmw_tt->mob); | ||
625 | vmw_tt->mob = NULL; | ||
626 | } | ||
627 | |||
503 | vmw_ttm_unmap_dma(vmw_tt); | 628 | vmw_ttm_unmap_dma(vmw_tt); |
504 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { | 629 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { |
505 | size_t size = | 630 | size_t size = |
@@ -530,6 +655,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, | |||
530 | 655 | ||
531 | vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; | 656 | vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; |
532 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); | 657 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); |
658 | vmw_be->mob = NULL; | ||
533 | 659 | ||
534 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) | 660 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
535 | ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, | 661 | ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, |
@@ -571,6 +697,7 @@ static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
571 | man->default_caching = TTM_PL_FLAG_CACHED; | 697 | man->default_caching = TTM_PL_FLAG_CACHED; |
572 | break; | 698 | break; |
573 | case VMW_PL_GMR: | 699 | case VMW_PL_GMR: |
700 | case VMW_PL_MOB: | ||
574 | /* | 701 | /* |
575 | * "Guest Memory Regions" is an aperture like feature with | 702 | * "Guest Memory Regions" is an aperture like feature with |
576 | * one slot per bo. There is an upper limit of the number of | 703 | * one slot per bo. There is an upper limit of the number of |
@@ -618,6 +745,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg | |||
618 | switch (mem->mem_type) { | 745 | switch (mem->mem_type) { |
619 | case TTM_PL_SYSTEM: | 746 | case TTM_PL_SYSTEM: |
620 | case VMW_PL_GMR: | 747 | case VMW_PL_GMR: |
748 | case VMW_PL_MOB: | ||
621 | return 0; | 749 | return 0; |
622 | case TTM_PL_VRAM: | 750 | case TTM_PL_VRAM: |
623 | mem->bus.offset = mem->start << PAGE_SHIFT; | 751 | mem->bus.offset = mem->start << PAGE_SHIFT; |
@@ -677,6 +805,38 @@ static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) | |||
677 | VMW_FENCE_WAIT_TIMEOUT); | 805 | VMW_FENCE_WAIT_TIMEOUT); |
678 | } | 806 | } |
679 | 807 | ||
808 | /** | ||
809 | * vmw_move_notify - TTM move_notify_callback | ||
810 | * | ||
811 | * @bo: The TTM buffer object about to move. | ||
812 | * @mem: The truct ttm_mem_reg indicating to what memory | ||
813 | * region the move is taking place. | ||
814 | * | ||
815 | * Calls move_notify for all subsystems needing it. | ||
816 | * (currently only resources). | ||
817 | */ | ||
818 | static void vmw_move_notify(struct ttm_buffer_object *bo, | ||
819 | struct ttm_mem_reg *mem) | ||
820 | { | ||
821 | vmw_resource_move_notify(bo, mem); | ||
822 | } | ||
823 | |||
824 | |||
825 | /** | ||
826 | * vmw_swap_notify - TTM move_notify_callback | ||
827 | * | ||
828 | * @bo: The TTM buffer object about to be swapped out. | ||
829 | */ | ||
830 | static void vmw_swap_notify(struct ttm_buffer_object *bo) | ||
831 | { | ||
832 | struct ttm_bo_device *bdev = bo->bdev; | ||
833 | |||
834 | spin_lock(&bdev->fence_lock); | ||
835 | ttm_bo_wait(bo, false, false, false); | ||
836 | spin_unlock(&bdev->fence_lock); | ||
837 | } | ||
838 | |||
839 | |||
680 | struct ttm_bo_driver vmw_bo_driver = { | 840 | struct ttm_bo_driver vmw_bo_driver = { |
681 | .ttm_tt_create = &vmw_ttm_tt_create, | 841 | .ttm_tt_create = &vmw_ttm_tt_create, |
682 | .ttm_tt_populate = &vmw_ttm_populate, | 842 | .ttm_tt_populate = &vmw_ttm_populate, |
@@ -691,8 +851,8 @@ struct ttm_bo_driver vmw_bo_driver = { | |||
691 | .sync_obj_flush = vmw_sync_obj_flush, | 851 | .sync_obj_flush = vmw_sync_obj_flush, |
692 | .sync_obj_unref = vmw_sync_obj_unref, | 852 | .sync_obj_unref = vmw_sync_obj_unref, |
693 | .sync_obj_ref = vmw_sync_obj_ref, | 853 | .sync_obj_ref = vmw_sync_obj_ref, |
694 | .move_notify = NULL, | 854 | .move_notify = vmw_move_notify, |
695 | .swap_notify = NULL, | 855 | .swap_notify = vmw_swap_notify, |
696 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, | 856 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
697 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, | 857 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, |
698 | .io_mem_free = &vmw_ttm_io_mem_free, | 858 | .io_mem_free = &vmw_ttm_io_mem_free, |