aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2010-04-07 06:21:19 -0400
committerDave Airlie <airlied@redhat.com>2010-04-07 20:21:19 -0400
commit9d87fa2138d06ff400551800d67d522625033e35 (patch)
tree284cd0f73ccb2f2fad1c71f974d4e9e4d0035e81 /drivers/gpu/drm/ttm
parent3a89b4a9ca7ce11e3b7d5119aea917b9fc29a302 (diff)
drm/ttm: split no_wait argument in 2 GPU or reserve wait
There is case where we want to be able to wait only for the GPU while not waiting for other buffer to be unreserved. This patch split the no_wait argument all the way down in the whole ttm path so that upper level can decide on what to wait on or not. [airlied: squashed these 4 for bisectability reasons.] drm/radeon/kms: update to TTM no_wait splitted argument drm/nouveau: update to TTM no_wait splitted argument drm/vmwgfx: update to TTM no_wait splitted argument [vmwgfx patch: Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>] Signed-off-by: Jerome Glisse <jglisse@redhat.com> Acked-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c57
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c9
2 files changed, 38 insertions, 28 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index dd47b2a9a79..40631e2866f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -357,7 +357,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
357 357
358static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 358static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
359 struct ttm_mem_reg *mem, 359 struct ttm_mem_reg *mem,
360 bool evict, bool interruptible, bool no_wait) 360 bool evict, bool interruptible,
361 bool no_wait_reserve, bool no_wait_gpu)
361{ 362{
362 struct ttm_bo_device *bdev = bo->bdev; 363 struct ttm_bo_device *bdev = bo->bdev;
363 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 364 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -402,12 +403,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
402 403
403 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 404 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
404 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 405 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
405 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); 406 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
406 else if (bdev->driver->move) 407 else if (bdev->driver->move)
407 ret = bdev->driver->move(bo, evict, interruptible, 408 ret = bdev->driver->move(bo, evict, interruptible,
408 no_wait, mem); 409 no_wait_reserve, no_wait_gpu, mem);
409 else 410 else
410 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem); 411 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
411 412
412 if (ret) 413 if (ret)
413 goto out_err; 414 goto out_err;
@@ -606,7 +607,7 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
606EXPORT_SYMBOL(ttm_bo_unref); 607EXPORT_SYMBOL(ttm_bo_unref);
607 608
608static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 609static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
609 bool no_wait) 610 bool no_wait_reserve, bool no_wait_gpu)
610{ 611{
611 struct ttm_bo_device *bdev = bo->bdev; 612 struct ttm_bo_device *bdev = bo->bdev;
612 struct ttm_bo_global *glob = bo->glob; 613 struct ttm_bo_global *glob = bo->glob;
@@ -615,7 +616,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
615 int ret = 0; 616 int ret = 0;
616 617
617 spin_lock(&bo->lock); 618 spin_lock(&bo->lock);
618 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 619 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
619 spin_unlock(&bo->lock); 620 spin_unlock(&bo->lock);
620 621
621 if (unlikely(ret != 0)) { 622 if (unlikely(ret != 0)) {
@@ -638,7 +639,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
638 placement.num_busy_placement = 0; 639 placement.num_busy_placement = 0;
639 bdev->driver->evict_flags(bo, &placement); 640 bdev->driver->evict_flags(bo, &placement);
640 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 641 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
641 no_wait); 642 no_wait_reserve, no_wait_gpu);
642 if (ret) { 643 if (ret) {
643 if (ret != -ERESTARTSYS) { 644 if (ret != -ERESTARTSYS) {
644 printk(KERN_ERR TTM_PFX 645 printk(KERN_ERR TTM_PFX
@@ -650,7 +651,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
650 } 651 }
651 652
652 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 653 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
653 no_wait); 654 no_wait_reserve, no_wait_gpu);
654 if (ret) { 655 if (ret) {
655 if (ret != -ERESTARTSYS) 656 if (ret != -ERESTARTSYS)
656 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); 657 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
@@ -670,7 +671,8 @@ out:
670 671
671static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 672static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
672 uint32_t mem_type, 673 uint32_t mem_type,
673 bool interruptible, bool no_wait) 674 bool interruptible, bool no_wait_reserve,
675 bool no_wait_gpu)
674{ 676{
675 struct ttm_bo_global *glob = bdev->glob; 677 struct ttm_bo_global *glob = bdev->glob;
676 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 678 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -687,11 +689,11 @@ retry:
687 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); 689 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
688 kref_get(&bo->list_kref); 690 kref_get(&bo->list_kref);
689 691
690 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 692 ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
691 693
692 if (unlikely(ret == -EBUSY)) { 694 if (unlikely(ret == -EBUSY)) {
693 spin_unlock(&glob->lru_lock); 695 spin_unlock(&glob->lru_lock);
694 if (likely(!no_wait)) 696 if (likely(!no_wait_gpu))
695 ret = ttm_bo_wait_unreserved(bo, interruptible); 697 ret = ttm_bo_wait_unreserved(bo, interruptible);
696 698
697 kref_put(&bo->list_kref, ttm_bo_release_list); 699 kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -713,7 +715,7 @@ retry:
713 while (put_count--) 715 while (put_count--)
714 kref_put(&bo->list_kref, ttm_bo_ref_bug); 716 kref_put(&bo->list_kref, ttm_bo_ref_bug);
715 717
716 ret = ttm_bo_evict(bo, interruptible, no_wait); 718 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
717 ttm_bo_unreserve(bo); 719 ttm_bo_unreserve(bo);
718 720
719 kref_put(&bo->list_kref, ttm_bo_release_list); 721 kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -764,7 +766,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
764 uint32_t mem_type, 766 uint32_t mem_type,
765 struct ttm_placement *placement, 767 struct ttm_placement *placement,
766 struct ttm_mem_reg *mem, 768 struct ttm_mem_reg *mem,
767 bool interruptible, bool no_wait) 769 bool interruptible,
770 bool no_wait_reserve,
771 bool no_wait_gpu)
768{ 772{
769 struct ttm_bo_device *bdev = bo->bdev; 773 struct ttm_bo_device *bdev = bo->bdev;
770 struct ttm_bo_global *glob = bdev->glob; 774 struct ttm_bo_global *glob = bdev->glob;
@@ -785,7 +789,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
785 } 789 }
786 spin_unlock(&glob->lru_lock); 790 spin_unlock(&glob->lru_lock);
787 ret = ttm_mem_evict_first(bdev, mem_type, interruptible, 791 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
788 no_wait); 792 no_wait_reserve, no_wait_gpu);
789 if (unlikely(ret != 0)) 793 if (unlikely(ret != 0))
790 return ret; 794 return ret;
791 } while (1); 795 } while (1);
@@ -855,7 +859,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
855int ttm_bo_mem_space(struct ttm_buffer_object *bo, 859int ttm_bo_mem_space(struct ttm_buffer_object *bo,
856 struct ttm_placement *placement, 860 struct ttm_placement *placement,
857 struct ttm_mem_reg *mem, 861 struct ttm_mem_reg *mem,
858 bool interruptible, bool no_wait) 862 bool interruptible, bool no_wait_reserve,
863 bool no_wait_gpu)
859{ 864{
860 struct ttm_bo_device *bdev = bo->bdev; 865 struct ttm_bo_device *bdev = bo->bdev;
861 struct ttm_mem_type_manager *man; 866 struct ttm_mem_type_manager *man;
@@ -952,7 +957,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
952 } 957 }
953 958
954 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 959 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
955 interruptible, no_wait); 960 interruptible, no_wait_reserve, no_wait_gpu);
956 if (ret == 0 && mem->mm_node) { 961 if (ret == 0 && mem->mm_node) {
957 mem->placement = cur_flags; 962 mem->placement = cur_flags;
958 mem->mm_node->private = bo; 963 mem->mm_node->private = bo;
@@ -978,7 +983,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
978 983
979int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 984int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
980 struct ttm_placement *placement, 985 struct ttm_placement *placement,
981 bool interruptible, bool no_wait) 986 bool interruptible, bool no_wait_reserve,
987 bool no_wait_gpu)
982{ 988{
983 struct ttm_bo_global *glob = bo->glob; 989 struct ttm_bo_global *glob = bo->glob;
984 int ret = 0; 990 int ret = 0;
@@ -992,7 +998,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
992 * instead of doing it here. 998 * instead of doing it here.
993 */ 999 */
994 spin_lock(&bo->lock); 1000 spin_lock(&bo->lock);
995 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 1001 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
996 spin_unlock(&bo->lock); 1002 spin_unlock(&bo->lock);
997 if (ret) 1003 if (ret)
998 return ret; 1004 return ret;
@@ -1002,10 +1008,10 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1002 /* 1008 /*
1003 * Determine where to move the buffer. 1009 * Determine where to move the buffer.
1004 */ 1010 */
1005 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait); 1011 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1006 if (ret) 1012 if (ret)
1007 goto out_unlock; 1013 goto out_unlock;
1008 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); 1014 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1009out_unlock: 1015out_unlock:
1010 if (ret && mem.mm_node) { 1016 if (ret && mem.mm_node) {
1011 spin_lock(&glob->lru_lock); 1017 spin_lock(&glob->lru_lock);
@@ -1039,7 +1045,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1039 1045
1040int ttm_bo_validate(struct ttm_buffer_object *bo, 1046int ttm_bo_validate(struct ttm_buffer_object *bo,
1041 struct ttm_placement *placement, 1047 struct ttm_placement *placement,
1042 bool interruptible, bool no_wait) 1048 bool interruptible, bool no_wait_reserve,
1049 bool no_wait_gpu)
1043{ 1050{
1044 int ret; 1051 int ret;
1045 1052
@@ -1054,7 +1061,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1054 */ 1061 */
1055 ret = ttm_bo_mem_compat(placement, &bo->mem); 1062 ret = ttm_bo_mem_compat(placement, &bo->mem);
1056 if (ret < 0) { 1063 if (ret < 0) {
1057 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait); 1064 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1058 if (ret) 1065 if (ret)
1059 return ret; 1066 return ret;
1060 } else { 1067 } else {
@@ -1175,7 +1182,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1175 goto out_err; 1182 goto out_err;
1176 } 1183 }
1177 1184
1178 ret = ttm_bo_validate(bo, placement, interruptible, false); 1185 ret = ttm_bo_validate(bo, placement, interruptible, false, false);
1179 if (ret) 1186 if (ret)
1180 goto out_err; 1187 goto out_err;
1181 1188
@@ -1249,7 +1256,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1249 spin_lock(&glob->lru_lock); 1256 spin_lock(&glob->lru_lock);
1250 while (!list_empty(&man->lru)) { 1257 while (!list_empty(&man->lru)) {
1251 spin_unlock(&glob->lru_lock); 1258 spin_unlock(&glob->lru_lock);
1252 ret = ttm_mem_evict_first(bdev, mem_type, false, false); 1259 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
1253 if (ret) { 1260 if (ret) {
1254 if (allow_errors) { 1261 if (allow_errors) {
1255 return ret; 1262 return ret;
@@ -1839,7 +1846,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1839 evict_mem.mem_type = TTM_PL_SYSTEM; 1846 evict_mem.mem_type = TTM_PL_SYSTEM;
1840 1847
1841 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1848 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1842 false, false); 1849 false, false, false);
1843 if (unlikely(ret != 0)) 1850 if (unlikely(ret != 0))
1844 goto out; 1851 goto out;
1845 } 1852 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 5ca37a58a98..865b2a826e1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -49,7 +49,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
49} 49}
50 50
51int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 51int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
52 bool evict, bool no_wait, struct ttm_mem_reg *new_mem) 52 bool evict, bool no_wait_reserve,
53 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
53{ 54{
54 struct ttm_tt *ttm = bo->ttm; 55 struct ttm_tt *ttm = bo->ttm;
55 struct ttm_mem_reg *old_mem = &bo->mem; 56 struct ttm_mem_reg *old_mem = &bo->mem;
@@ -207,7 +208,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
207} 208}
208 209
209int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 210int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
210 bool evict, bool no_wait, struct ttm_mem_reg *new_mem) 211 bool evict, bool no_wait_reserve, bool no_wait_gpu,
212 struct ttm_mem_reg *new_mem)
211{ 213{
212 struct ttm_bo_device *bdev = bo->bdev; 214 struct ttm_bo_device *bdev = bo->bdev;
213 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 215 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
@@ -525,7 +527,8 @@ int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
525int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 527int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
526 void *sync_obj, 528 void *sync_obj,
527 void *sync_obj_arg, 529 void *sync_obj_arg,
528 bool evict, bool no_wait, 530 bool evict, bool no_wait_reserve,
531 bool no_wait_gpu,
529 struct ttm_mem_reg *new_mem) 532 struct ttm_mem_reg *new_mem)
530{ 533{
531 struct ttm_bo_device *bdev = bo->bdev; 534 struct ttm_bo_device *bdev = bo->bdev;