aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c174
1 files changed, 74 insertions, 100 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ad5bf86ee8a3..952e0bf3bc84 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -110,7 +110,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
110 ring = adev->mman.buffer_funcs_ring; 110 ring = adev->mman.buffer_funcs_ring;
111 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; 111 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
112 r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, 112 r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
113 rq, amdgpu_sched_jobs); 113 rq, amdgpu_sched_jobs, NULL);
114 if (r) { 114 if (r) {
115 DRM_ERROR("Failed setting up TTM BO move run queue.\n"); 115 DRM_ERROR("Failed setting up TTM BO move run queue.\n");
116 goto error_entity; 116 goto error_entity;
@@ -282,8 +282,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
282{ 282{
283 uint64_t addr = 0; 283 uint64_t addr = 0;
284 284
285 if (mem->mem_type != TTM_PL_TT || 285 if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
286 amdgpu_gtt_mgr_is_allocated(mem)) {
287 addr = mm_node->start << PAGE_SHIFT; 286 addr = mm_node->start << PAGE_SHIFT;
288 addr += bo->bdev->man[mem->mem_type].gpu_offset; 287 addr += bo->bdev->man[mem->mem_type].gpu_offset;
289 } 288 }
@@ -369,7 +368,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
369 * dst to window 1 368 * dst to window 1
370 */ 369 */
371 if (src->mem->mem_type == TTM_PL_TT && 370 if (src->mem->mem_type == TTM_PL_TT &&
372 !amdgpu_gtt_mgr_is_allocated(src->mem)) { 371 !amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
373 r = amdgpu_map_buffer(src->bo, src->mem, 372 r = amdgpu_map_buffer(src->bo, src->mem,
374 PFN_UP(cur_size + src_page_offset), 373 PFN_UP(cur_size + src_page_offset),
375 src_node_start, 0, ring, 374 src_node_start, 0, ring,
@@ -383,7 +382,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
383 } 382 }
384 383
385 if (dst->mem->mem_type == TTM_PL_TT && 384 if (dst->mem->mem_type == TTM_PL_TT &&
386 !amdgpu_gtt_mgr_is_allocated(dst->mem)) { 385 !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
387 r = amdgpu_map_buffer(dst->bo, dst->mem, 386 r = amdgpu_map_buffer(dst->bo, dst->mem,
388 PFN_UP(cur_size + dst_page_offset), 387 PFN_UP(cur_size + dst_page_offset),
389 dst_node_start, 1, ring, 388 dst_node_start, 1, ring,
@@ -467,9 +466,8 @@ error:
467 return r; 466 return r;
468} 467}
469 468
470static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, 469static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
471 bool evict, bool interruptible, 470 struct ttm_operation_ctx *ctx,
472 bool no_wait_gpu,
473 struct ttm_mem_reg *new_mem) 471 struct ttm_mem_reg *new_mem)
474{ 472{
475 struct amdgpu_device *adev; 473 struct amdgpu_device *adev;
@@ -489,8 +487,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
489 placements.fpfn = 0; 487 placements.fpfn = 0;
490 placements.lpfn = 0; 488 placements.lpfn = 0;
491 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 489 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
492 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 490 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
493 interruptible, no_wait_gpu);
494 if (unlikely(r)) { 491 if (unlikely(r)) {
495 return r; 492 return r;
496 } 493 }
@@ -504,19 +501,18 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
504 if (unlikely(r)) { 501 if (unlikely(r)) {
505 goto out_cleanup; 502 goto out_cleanup;
506 } 503 }
507 r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); 504 r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
508 if (unlikely(r)) { 505 if (unlikely(r)) {
509 goto out_cleanup; 506 goto out_cleanup;
510 } 507 }
511 r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem); 508 r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, new_mem);
512out_cleanup: 509out_cleanup:
513 ttm_bo_mem_put(bo, &tmp_mem); 510 ttm_bo_mem_put(bo, &tmp_mem);
514 return r; 511 return r;
515} 512}
516 513
517static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, 514static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
518 bool evict, bool interruptible, 515 struct ttm_operation_ctx *ctx,
519 bool no_wait_gpu,
520 struct ttm_mem_reg *new_mem) 516 struct ttm_mem_reg *new_mem)
521{ 517{
522 struct amdgpu_device *adev; 518 struct amdgpu_device *adev;
@@ -536,16 +532,15 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
536 placements.fpfn = 0; 532 placements.fpfn = 0;
537 placements.lpfn = 0; 533 placements.lpfn = 0;
538 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 534 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
539 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 535 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
540 interruptible, no_wait_gpu);
541 if (unlikely(r)) { 536 if (unlikely(r)) {
542 return r; 537 return r;
543 } 538 }
544 r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem); 539 r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, &tmp_mem);
545 if (unlikely(r)) { 540 if (unlikely(r)) {
546 goto out_cleanup; 541 goto out_cleanup;
547 } 542 }
548 r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); 543 r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
549 if (unlikely(r)) { 544 if (unlikely(r)) {
550 goto out_cleanup; 545 goto out_cleanup;
551 } 546 }
@@ -554,10 +549,9 @@ out_cleanup:
554 return r; 549 return r;
555} 550}
556 551
557static int amdgpu_bo_move(struct ttm_buffer_object *bo, 552static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
558 bool evict, bool interruptible, 553 struct ttm_operation_ctx *ctx,
559 bool no_wait_gpu, 554 struct ttm_mem_reg *new_mem)
560 struct ttm_mem_reg *new_mem)
561{ 555{
562 struct amdgpu_device *adev; 556 struct amdgpu_device *adev;
563 struct amdgpu_bo *abo; 557 struct amdgpu_bo *abo;
@@ -592,19 +586,19 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
592 586
593 if (old_mem->mem_type == TTM_PL_VRAM && 587 if (old_mem->mem_type == TTM_PL_VRAM &&
594 new_mem->mem_type == TTM_PL_SYSTEM) { 588 new_mem->mem_type == TTM_PL_SYSTEM) {
595 r = amdgpu_move_vram_ram(bo, evict, interruptible, 589 r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
596 no_wait_gpu, new_mem);
597 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 590 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
598 new_mem->mem_type == TTM_PL_VRAM) { 591 new_mem->mem_type == TTM_PL_VRAM) {
599 r = amdgpu_move_ram_vram(bo, evict, interruptible, 592 r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
600 no_wait_gpu, new_mem);
601 } else { 593 } else {
602 r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); 594 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
595 new_mem, old_mem);
603 } 596 }
604 597
605 if (r) { 598 if (r) {
606memcpy: 599memcpy:
607 r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem); 600 r = ttm_bo_move_memcpy(bo, ctx->interruptible,
601 ctx->no_wait_gpu, new_mem);
608 if (r) { 602 if (r) {
609 return r; 603 return r;
610 } 604 }
@@ -690,7 +684,6 @@ struct amdgpu_ttm_tt {
690 struct list_head guptasks; 684 struct list_head guptasks;
691 atomic_t mmu_invalidations; 685 atomic_t mmu_invalidations;
692 uint32_t last_set_pages; 686 uint32_t last_set_pages;
693 struct list_head list;
694}; 687};
695 688
696int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) 689int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
@@ -861,44 +854,35 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
861 bo_mem->mem_type == AMDGPU_PL_OA) 854 bo_mem->mem_type == AMDGPU_PL_OA)
862 return -EINVAL; 855 return -EINVAL;
863 856
864 if (!amdgpu_gtt_mgr_is_allocated(bo_mem)) 857 if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
858 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
865 return 0; 859 return 0;
860 }
866 861
867 spin_lock(&gtt->adev->gtt_list_lock);
868 flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); 862 flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
869 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; 863 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
870 r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, 864 r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
871 ttm->pages, gtt->ttm.dma_address, flags); 865 ttm->pages, gtt->ttm.dma_address, flags);
872 866
873 if (r) { 867 if (r)
874 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", 868 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
875 ttm->num_pages, gtt->offset); 869 ttm->num_pages, gtt->offset);
876 goto error_gart_bind;
877 }
878
879 list_add_tail(&gtt->list, &gtt->adev->gtt_list);
880error_gart_bind:
881 spin_unlock(&gtt->adev->gtt_list_lock);
882 return r; 870 return r;
883} 871}
884 872
885bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) 873int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
886{
887 struct amdgpu_ttm_tt *gtt = (void *)ttm;
888
889 return gtt && !list_empty(&gtt->list);
890}
891
892int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
893{ 874{
894 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 875 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
895 struct ttm_tt *ttm = bo->ttm; 876 struct ttm_operation_ctx ctx = { false, false };
877 struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
896 struct ttm_mem_reg tmp; 878 struct ttm_mem_reg tmp;
897 struct ttm_placement placement; 879 struct ttm_placement placement;
898 struct ttm_place placements; 880 struct ttm_place placements;
881 uint64_t flags;
899 int r; 882 int r;
900 883
901 if (!ttm || amdgpu_ttm_is_bound(ttm)) 884 if (bo->mem.mem_type != TTM_PL_TT ||
885 amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
902 return 0; 886 return 0;
903 887
904 tmp = bo->mem; 888 tmp = bo->mem;
@@ -912,43 +896,44 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
912 placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | 896 placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
913 TTM_PL_FLAG_TT; 897 TTM_PL_FLAG_TT;
914 898
915 r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); 899 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
916 if (unlikely(r)) 900 if (unlikely(r))
917 return r; 901 return r;
918 902
919 r = ttm_bo_move_ttm(bo, true, false, &tmp); 903 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
920 if (unlikely(r)) 904 gtt->offset = (u64)tmp.start << PAGE_SHIFT;
905 r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages,
906 bo->ttm->pages, gtt->ttm.dma_address, flags);
907 if (unlikely(r)) {
921 ttm_bo_mem_put(bo, &tmp); 908 ttm_bo_mem_put(bo, &tmp);
922 else 909 return r;
923 bo->offset = (bo->mem.start << PAGE_SHIFT) + 910 }
924 bo->bdev->man[bo->mem.mem_type].gpu_offset;
925 911
926 return r; 912 ttm_bo_mem_put(bo, &bo->mem);
913 bo->mem = tmp;
914 bo->offset = (bo->mem.start << PAGE_SHIFT) +
915 bo->bdev->man[bo->mem.mem_type].gpu_offset;
916
917 return 0;
927} 918}
928 919
929int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) 920int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
930{ 921{
931 struct amdgpu_ttm_tt *gtt, *tmp; 922 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
932 struct ttm_mem_reg bo_mem; 923 struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
933 uint64_t flags; 924 uint64_t flags;
934 int r; 925 int r;
935 926
936 bo_mem.mem_type = TTM_PL_TT; 927 if (!gtt)
937 spin_lock(&adev->gtt_list_lock); 928 return 0;
938 list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) { 929
939 flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem); 930 flags = amdgpu_ttm_tt_pte_flags(adev, &gtt->ttm.ttm, &tbo->mem);
940 r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages, 931 r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
941 gtt->ttm.ttm.pages, gtt->ttm.dma_address, 932 gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags);
942 flags); 933 if (r)
943 if (r) { 934 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
944 spin_unlock(&adev->gtt_list_lock); 935 gtt->ttm.ttm.num_pages, gtt->offset);
945 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", 936 return r;
946 gtt->ttm.ttm.num_pages, gtt->offset);
947 return r;
948 }
949 }
950 spin_unlock(&adev->gtt_list_lock);
951 return 0;
952} 937}
953 938
954static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) 939static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
@@ -959,20 +944,14 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
959 if (gtt->userptr) 944 if (gtt->userptr)
960 amdgpu_ttm_tt_unpin_userptr(ttm); 945 amdgpu_ttm_tt_unpin_userptr(ttm);
961 946
962 if (!amdgpu_ttm_is_bound(ttm)) 947 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
963 return 0; 948 return 0;
964 949
965 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ 950 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
966 spin_lock(&gtt->adev->gtt_list_lock);
967 r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); 951 r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
968 if (r) { 952 if (r)
969 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", 953 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
970 gtt->ttm.ttm.num_pages, gtt->offset); 954 gtt->ttm.ttm.num_pages, gtt->offset);
971 goto error_unbind;
972 }
973 list_del_init(&gtt->list);
974error_unbind:
975 spin_unlock(&gtt->adev->gtt_list_lock);
976 return r; 955 return r;
977} 956}
978 957
@@ -1009,7 +988,6 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
1009 kfree(gtt); 988 kfree(gtt);
1010 return NULL; 989 return NULL;
1011 } 990 }
1012 INIT_LIST_HEAD(&gtt->list);
1013 return &gtt->ttm.ttm; 991 return &gtt->ttm.ttm;
1014} 992}
1015 993
@@ -1348,10 +1326,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
1348 DRM_INFO("amdgpu: %uM of VRAM memory ready\n", 1326 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1349 (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); 1327 (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
1350 1328
1351 if (amdgpu_gtt_size == -1) 1329 if (amdgpu_gtt_size == -1) {
1352 gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), 1330 struct sysinfo si;
1353 adev->mc.mc_vram_size); 1331
1354 else 1332 si_meminfo(&si);
1333 gtt_size = max(AMDGPU_DEFAULT_GTT_SIZE_MB << 20,
1334 (uint64_t)si.totalram * si.mem_unit * 3/4);
1335 } else
1355 gtt_size = (uint64_t)amdgpu_gtt_size << 20; 1336 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1356 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT); 1337 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
1357 if (r) { 1338 if (r) {
@@ -1410,19 +1391,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
1410 1391
1411void amdgpu_ttm_fini(struct amdgpu_device *adev) 1392void amdgpu_ttm_fini(struct amdgpu_device *adev)
1412{ 1393{
1413 int r;
1414
1415 if (!adev->mman.initialized) 1394 if (!adev->mman.initialized)
1416 return; 1395 return;
1396
1417 amdgpu_ttm_debugfs_fini(adev); 1397 amdgpu_ttm_debugfs_fini(adev);
1418 if (adev->stolen_vga_memory) { 1398 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
1419 r = amdgpu_bo_reserve(adev->stolen_vga_memory, true); 1399 amdgpu_fw_reserve_vram_fini(adev);
1420 if (r == 0) { 1400
1421 amdgpu_bo_unpin(adev->stolen_vga_memory);
1422 amdgpu_bo_unreserve(adev->stolen_vga_memory);
1423 }
1424 amdgpu_bo_unref(&adev->stolen_vga_memory);
1425 }
1426 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); 1401 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
1427 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); 1402 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
1428 if (adev->gds.mem.total_size) 1403 if (adev->gds.mem.total_size)
@@ -1432,7 +1407,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
1432 if (adev->gds.oa.total_size) 1407 if (adev->gds.oa.total_size)
1433 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); 1408 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
1434 ttm_bo_device_release(&adev->mman.bdev); 1409 ttm_bo_device_release(&adev->mman.bdev);
1435 amdgpu_gart_fini(adev);
1436 amdgpu_ttm_global_fini(adev); 1410 amdgpu_ttm_global_fini(adev);
1437 adev->mman.initialized = false; 1411 adev->mman.initialized = false;
1438 DRM_INFO("amdgpu: ttm finalized\n"); 1412 DRM_INFO("amdgpu: ttm finalized\n");
@@ -1628,7 +1602,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1628 } 1602 }
1629 1603
1630 if (bo->tbo.mem.mem_type == TTM_PL_TT) { 1604 if (bo->tbo.mem.mem_type == TTM_PL_TT) {
1631 r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); 1605 r = amdgpu_ttm_alloc_gart(&bo->tbo);
1632 if (r) 1606 if (r)
1633 return r; 1607 return r;
1634 } 1608 }