aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-09-15 08:58:48 -0400
committerAlex Deucher <alexander.deucher@amd.com>2016-10-25 14:38:26 -0400
commita7d64de659946e852cd8f2a9691a21ddbb4ebc86 (patch)
tree23d8072cb76d150d6ab08961f233cbffde97ce1a /drivers
parentf3fd451263f0dbfb99adaa40d7ac7cc458f9c533 (diff)
drm/amdgpu: remove adev pointer from struct amdgpu_bo v2
It's completely pointless to have two pointers to the device in the same structure. v2: rename function to amdgpu_ttm_adev, fix typos Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c2
9 files changed, 65 insertions, 62 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 4e51009283a4..1c874fd525a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -465,7 +465,6 @@ struct amdgpu_bo {
465 */ 465 */
466 struct list_head va; 466 struct list_head va;
467 /* Constant after initialization */ 467 /* Constant after initialization */
468 struct amdgpu_device *adev;
469 struct drm_gem_object gem_base; 468 struct drm_gem_object gem_base;
470 struct amdgpu_bo *parent; 469 struct amdgpu_bo *parent;
471 struct amdgpu_bo *shadow; 470 struct amdgpu_bo *shadow;
@@ -2127,6 +2126,11 @@ struct amdgpu_device {
2127 2126
2128}; 2127};
2129 2128
2129static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
2130{
2131 return container_of(bdev, struct amdgpu_device, mman.bdev);
2132}
2133
2130bool amdgpu_device_is_px(struct drm_device *dev); 2134bool amdgpu_device_is_px(struct drm_device *dev);
2131int amdgpu_device_init(struct amdgpu_device *adev, 2135int amdgpu_device_init(struct amdgpu_device *adev,
2132 struct drm_device *ddev, 2136 struct drm_device *ddev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 187c3662b72c..504ae09d3991 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -355,6 +355,7 @@ static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
355static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, 355static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
356 struct amdgpu_bo *bo) 356 struct amdgpu_bo *bo)
357{ 357{
358 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
358 u64 initial_bytes_moved; 359 u64 initial_bytes_moved;
359 uint32_t domain; 360 uint32_t domain;
360 int r; 361 int r;
@@ -372,9 +373,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
372 373
373retry: 374retry:
374 amdgpu_ttm_placement_from_domain(bo, domain); 375 amdgpu_ttm_placement_from_domain(bo, domain);
375 initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); 376 initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
376 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 377 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
377 p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - 378 p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
378 initial_bytes_moved; 379 initial_bytes_moved;
379 380
380 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 381 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
@@ -400,6 +401,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
400 401
401 struct amdgpu_bo_list_entry *candidate = p->evictable; 402 struct amdgpu_bo_list_entry *candidate = p->evictable;
402 struct amdgpu_bo *bo = candidate->robj; 403 struct amdgpu_bo *bo = candidate->robj;
404 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
403 u64 initial_bytes_moved; 405 u64 initial_bytes_moved;
404 uint32_t other; 406 uint32_t other;
405 407
@@ -420,9 +422,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
420 422
421 /* Good we can try to move this BO somewhere else */ 423 /* Good we can try to move this BO somewhere else */
422 amdgpu_ttm_placement_from_domain(bo, other); 424 amdgpu_ttm_placement_from_domain(bo, other);
423 initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); 425 initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
424 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 426 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
425 p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - 427 p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
426 initial_bytes_moved; 428 initial_bytes_moved;
427 429
428 if (unlikely(r)) 430 if (unlikely(r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 3ad0bf6ce3e4..d4fce326502b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -116,10 +116,11 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
116 * Call from drm_gem_handle_create which appear in both new and open ioctl 116 * Call from drm_gem_handle_create which appear in both new and open ioctl
117 * case. 117 * case.
118 */ 118 */
119int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 119int amdgpu_gem_object_open(struct drm_gem_object *obj,
120 struct drm_file *file_priv)
120{ 121{
121 struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); 122 struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
122 struct amdgpu_device *adev = abo->adev; 123 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
123 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 124 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
124 struct amdgpu_vm *vm = &fpriv->vm; 125 struct amdgpu_vm *vm = &fpriv->vm;
125 struct amdgpu_bo_va *bo_va; 126 struct amdgpu_bo_va *bo_va;
@@ -142,7 +143,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
142 struct drm_file *file_priv) 143 struct drm_file *file_priv)
143{ 144{
144 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 145 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
145 struct amdgpu_device *adev = bo->adev; 146 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
146 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 147 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
147 struct amdgpu_vm *vm = &fpriv->vm; 148 struct amdgpu_vm *vm = &fpriv->vm;
148 149
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 32fa7b7913f7..7ea3cacf9f9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -285,7 +285,7 @@ free_rmn:
285int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) 285int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
286{ 286{
287 unsigned long end = addr + amdgpu_bo_size(bo) - 1; 287 unsigned long end = addr + amdgpu_bo_size(bo) - 1;
288 struct amdgpu_device *adev = bo->adev; 288 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
289 struct amdgpu_mn *rmn; 289 struct amdgpu_mn *rmn;
290 struct amdgpu_mn_node *node = NULL; 290 struct amdgpu_mn_node *node = NULL;
291 struct list_head bos; 291 struct list_head bos;
@@ -340,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
340 */ 340 */
341void amdgpu_mn_unregister(struct amdgpu_bo *bo) 341void amdgpu_mn_unregister(struct amdgpu_bo *bo)
342{ 342{
343 struct amdgpu_device *adev = bo->adev; 343 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
344 struct amdgpu_mn *rmn; 344 struct amdgpu_mn *rmn;
345 struct list_head *head; 345 struct list_head *head;
346 346
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index c6754e71e1f5..6efa8d73b394 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -88,18 +88,19 @@ static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
88 88
89static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) 89static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
90{ 90{
91 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
91 struct amdgpu_bo *bo; 92 struct amdgpu_bo *bo;
92 93
93 bo = container_of(tbo, struct amdgpu_bo, tbo); 94 bo = container_of(tbo, struct amdgpu_bo, tbo);
94 95
95 amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); 96 amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
96 97
97 drm_gem_object_release(&bo->gem_base); 98 drm_gem_object_release(&bo->gem_base);
98 amdgpu_bo_unref(&bo->parent); 99 amdgpu_bo_unref(&bo->parent);
99 if (!list_empty(&bo->shadow_list)) { 100 if (!list_empty(&bo->shadow_list)) {
100 mutex_lock(&bo->adev->shadow_list_lock); 101 mutex_lock(&adev->shadow_list_lock);
101 list_del_init(&bo->shadow_list); 102 list_del_init(&bo->shadow_list);
102 mutex_unlock(&bo->adev->shadow_list_lock); 103 mutex_unlock(&adev->shadow_list_lock);
103 } 104 }
104 kfree(bo->metadata); 105 kfree(bo->metadata);
105 kfree(bo); 106 kfree(bo);
@@ -210,8 +211,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
210 211
211void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) 212void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
212{ 213{
213 amdgpu_ttm_placement_init(abo->adev, &abo->placement, 214 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
214 abo->placements, domain, abo->flags); 215
216 amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
217 domain, abo->flags);
215} 218}
216 219
217static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, 220static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
@@ -357,7 +360,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
357 kfree(bo); 360 kfree(bo);
358 return r; 361 return r;
359 } 362 }
360 bo->adev = adev;
361 INIT_LIST_HEAD(&bo->shadow_list); 363 INIT_LIST_HEAD(&bo->shadow_list);
362 INIT_LIST_HEAD(&bo->va); 364 INIT_LIST_HEAD(&bo->va);
363 bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | 365 bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
@@ -622,6 +624,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
622 u64 min_offset, u64 max_offset, 624 u64 min_offset, u64 max_offset,
623 u64 *gpu_addr) 625 u64 *gpu_addr)
624{ 626{
627 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
625 int r, i; 628 int r, i;
626 unsigned fpfn, lpfn; 629 unsigned fpfn, lpfn;
627 630
@@ -657,12 +660,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
657 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && 660 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
658 !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && 661 !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
659 (!max_offset || max_offset > 662 (!max_offset || max_offset >
660 bo->adev->mc.visible_vram_size)) { 663 adev->mc.visible_vram_size)) {
661 if (WARN_ON_ONCE(min_offset > 664 if (WARN_ON_ONCE(min_offset >
662 bo->adev->mc.visible_vram_size)) 665 adev->mc.visible_vram_size))
663 return -EINVAL; 666 return -EINVAL;
664 fpfn = min_offset >> PAGE_SHIFT; 667 fpfn = min_offset >> PAGE_SHIFT;
665 lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT; 668 lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
666 } else { 669 } else {
667 fpfn = min_offset >> PAGE_SHIFT; 670 fpfn = min_offset >> PAGE_SHIFT;
668 lpfn = max_offset >> PAGE_SHIFT; 671 lpfn = max_offset >> PAGE_SHIFT;
@@ -677,12 +680,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
677 680
678 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 681 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
679 if (unlikely(r)) { 682 if (unlikely(r)) {
680 dev_err(bo->adev->dev, "%p pin failed\n", bo); 683 dev_err(adev->dev, "%p pin failed\n", bo);
681 goto error; 684 goto error;
682 } 685 }
683 r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); 686 r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
684 if (unlikely(r)) { 687 if (unlikely(r)) {
685 dev_err(bo->adev->dev, "%p bind failed\n", bo); 688 dev_err(adev->dev, "%p bind failed\n", bo);
686 goto error; 689 goto error;
687 } 690 }
688 691
@@ -690,11 +693,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
690 if (gpu_addr != NULL) 693 if (gpu_addr != NULL)
691 *gpu_addr = amdgpu_bo_gpu_offset(bo); 694 *gpu_addr = amdgpu_bo_gpu_offset(bo);
692 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 695 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
693 bo->adev->vram_pin_size += amdgpu_bo_size(bo); 696 adev->vram_pin_size += amdgpu_bo_size(bo);
694 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 697 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
695 bo->adev->invisible_pin_size += amdgpu_bo_size(bo); 698 adev->invisible_pin_size += amdgpu_bo_size(bo);
696 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { 699 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
697 bo->adev->gart_pin_size += amdgpu_bo_size(bo); 700 adev->gart_pin_size += amdgpu_bo_size(bo);
698 } 701 }
699 702
700error: 703error:
@@ -708,10 +711,11 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
708 711
709int amdgpu_bo_unpin(struct amdgpu_bo *bo) 712int amdgpu_bo_unpin(struct amdgpu_bo *bo)
710{ 713{
714 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
711 int r, i; 715 int r, i;
712 716
713 if (!bo->pin_count) { 717 if (!bo->pin_count) {
714 dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo); 718 dev_warn(adev->dev, "%p unpin not necessary\n", bo);
715 return 0; 719 return 0;
716 } 720 }
717 bo->pin_count--; 721 bo->pin_count--;
@@ -723,16 +727,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
723 } 727 }
724 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 728 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
725 if (unlikely(r)) { 729 if (unlikely(r)) {
726 dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); 730 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
727 goto error; 731 goto error;
728 } 732 }
729 733
730 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { 734 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
731 bo->adev->vram_pin_size -= amdgpu_bo_size(bo); 735 adev->vram_pin_size -= amdgpu_bo_size(bo);
732 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 736 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
733 bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); 737 adev->invisible_pin_size -= amdgpu_bo_size(bo);
734 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { 738 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
735 bo->adev->gart_pin_size -= amdgpu_bo_size(bo); 739 adev->gart_pin_size -= amdgpu_bo_size(bo);
736 } 740 }
737 741
738error: 742error:
@@ -857,6 +861,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
857void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, 861void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
858 struct ttm_mem_reg *new_mem) 862 struct ttm_mem_reg *new_mem)
859{ 863{
864 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
860 struct amdgpu_bo *abo; 865 struct amdgpu_bo *abo;
861 struct ttm_mem_reg *old_mem = &bo->mem; 866 struct ttm_mem_reg *old_mem = &bo->mem;
862 867
@@ -864,21 +869,21 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
864 return; 869 return;
865 870
866 abo = container_of(bo, struct amdgpu_bo, tbo); 871 abo = container_of(bo, struct amdgpu_bo, tbo);
867 amdgpu_vm_bo_invalidate(abo->adev, abo); 872 amdgpu_vm_bo_invalidate(adev, abo);
868 873
869 /* update statistics */ 874 /* update statistics */
870 if (!new_mem) 875 if (!new_mem)
871 return; 876 return;
872 877
873 /* move_notify is called before move happens */ 878 /* move_notify is called before move happens */
874 amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem); 879 amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
875 880
876 trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); 881 trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
877} 882}
878 883
879int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 884int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
880{ 885{
881 struct amdgpu_device *adev; 886 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
882 struct amdgpu_bo *abo; 887 struct amdgpu_bo *abo;
883 unsigned long offset, size, lpfn; 888 unsigned long offset, size, lpfn;
884 int i, r; 889 int i, r;
@@ -887,7 +892,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
887 return 0; 892 return 0;
888 893
889 abo = container_of(bo, struct amdgpu_bo, tbo); 894 abo = container_of(bo, struct amdgpu_bo, tbo);
890 adev = abo->adev;
891 if (bo->mem.mem_type != TTM_PL_VRAM) 895 if (bo->mem.mem_type != TTM_PL_VRAM)
892 return 0; 896 return 0;
893 897
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 8255034d73eb..d3baf834ac24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -71,12 +71,13 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
71 */ 71 */
72static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) 72static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
73{ 73{
74 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
74 int r; 75 int r;
75 76
76 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); 77 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
77 if (unlikely(r != 0)) { 78 if (unlikely(r != 0)) {
78 if (r != -ERESTARTSYS) 79 if (r != -ERESTARTSYS)
79 dev_err(bo->adev->dev, "%p reserve failed\n", bo); 80 dev_err(adev->dev, "%p reserve failed\n", bo);
80 return r; 81 return r;
81 } 82 }
82 return 0; 83 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index f757f68418b7..414fa8fa8488 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -51,16 +51,6 @@
51static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); 51static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
52static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); 52static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
53 53
54static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev)
55{
56 struct amdgpu_mman *mman;
57 struct amdgpu_device *adev;
58
59 mman = container_of(bdev, struct amdgpu_mman, bdev);
60 adev = container_of(mman, struct amdgpu_device, mman);
61 return adev;
62}
63
64 54
65/* 55/*
66 * Global memory. 56 * Global memory.
@@ -150,7 +140,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
150{ 140{
151 struct amdgpu_device *adev; 141 struct amdgpu_device *adev;
152 142
153 adev = amdgpu_get_adev(bdev); 143 adev = amdgpu_ttm_adev(bdev);
154 144
155 switch (type) { 145 switch (type) {
156 case TTM_PL_SYSTEM: 146 case TTM_PL_SYSTEM:
@@ -195,6 +185,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
195static void amdgpu_evict_flags(struct ttm_buffer_object *bo, 185static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
196 struct ttm_placement *placement) 186 struct ttm_placement *placement)
197{ 187{
188 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
198 struct amdgpu_bo *abo; 189 struct amdgpu_bo *abo;
199 static struct ttm_place placements = { 190 static struct ttm_place placements = {
200 .fpfn = 0, 191 .fpfn = 0,
@@ -213,7 +204,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
213 abo = container_of(bo, struct amdgpu_bo, tbo); 204 abo = container_of(bo, struct amdgpu_bo, tbo);
214 switch (bo->mem.mem_type) { 205 switch (bo->mem.mem_type) {
215 case TTM_PL_VRAM: 206 case TTM_PL_VRAM:
216 if (abo->adev->mman.buffer_funcs_ring->ready == false) { 207 if (adev->mman.buffer_funcs_ring->ready == false) {
217 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 208 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
218 } else { 209 } else {
219 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); 210 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
@@ -229,7 +220,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
229 * allocating address space for the BO. 220 * allocating address space for the BO.
230 */ 221 */
231 abo->placements[i].lpfn = 222 abo->placements[i].lpfn =
232 abo->adev->mc.gtt_size >> PAGE_SHIFT; 223 adev->mc.gtt_size >> PAGE_SHIFT;
233 } 224 }
234 } 225 }
235 break; 226 break;
@@ -290,7 +281,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
290 struct ttm_mem_reg *new_mem, 281 struct ttm_mem_reg *new_mem,
291 struct ttm_mem_reg *old_mem) 282 struct ttm_mem_reg *old_mem)
292{ 283{
293 struct amdgpu_device *adev = amdgpu_get_adev(bo->bdev); 284 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
294 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 285 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
295 286
296 struct drm_mm_node *old_mm, *new_mm; 287 struct drm_mm_node *old_mm, *new_mm;
@@ -384,7 +375,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
384 struct ttm_placement placement; 375 struct ttm_placement placement;
385 int r; 376 int r;
386 377
387 adev = amdgpu_get_adev(bo->bdev); 378 adev = amdgpu_ttm_adev(bo->bdev);
388 tmp_mem = *new_mem; 379 tmp_mem = *new_mem;
389 tmp_mem.mm_node = NULL; 380 tmp_mem.mm_node = NULL;
390 placement.num_placement = 1; 381 placement.num_placement = 1;
@@ -431,7 +422,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
431 struct ttm_place placements; 422 struct ttm_place placements;
432 int r; 423 int r;
433 424
434 adev = amdgpu_get_adev(bo->bdev); 425 adev = amdgpu_ttm_adev(bo->bdev);
435 tmp_mem = *new_mem; 426 tmp_mem = *new_mem;
436 tmp_mem.mm_node = NULL; 427 tmp_mem.mm_node = NULL;
437 placement.num_placement = 1; 428 placement.num_placement = 1;
@@ -474,7 +465,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
474 if (WARN_ON_ONCE(abo->pin_count > 0)) 465 if (WARN_ON_ONCE(abo->pin_count > 0))
475 return -EINVAL; 466 return -EINVAL;
476 467
477 adev = amdgpu_get_adev(bo->bdev); 468 adev = amdgpu_ttm_adev(bo->bdev);
478 469
479 /* remember the eviction */ 470 /* remember the eviction */
480 if (evict) 471 if (evict)
@@ -527,7 +518,7 @@ memcpy:
527static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 518static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
528{ 519{
529 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 520 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
530 struct amdgpu_device *adev = amdgpu_get_adev(bdev); 521 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
531 522
532 mem->bus.addr = NULL; 523 mem->bus.addr = NULL;
533 mem->bus.offset = 0; 524 mem->bus.offset = 0;
@@ -659,7 +650,7 @@ release_pages:
659/* prepare the sg table with the user pages */ 650/* prepare the sg table with the user pages */
660static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) 651static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
661{ 652{
662 struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); 653 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
663 struct amdgpu_ttm_tt *gtt = (void *)ttm; 654 struct amdgpu_ttm_tt *gtt = (void *)ttm;
664 unsigned nents; 655 unsigned nents;
665 int r; 656 int r;
@@ -691,7 +682,7 @@ release_sg:
691 682
692static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) 683static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
693{ 684{
694 struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); 685 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
695 struct amdgpu_ttm_tt *gtt = (void *)ttm; 686 struct amdgpu_ttm_tt *gtt = (void *)ttm;
696 struct sg_page_iter sg_iter; 687 struct sg_page_iter sg_iter;
697 688
@@ -851,7 +842,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
851 struct amdgpu_device *adev; 842 struct amdgpu_device *adev;
852 struct amdgpu_ttm_tt *gtt; 843 struct amdgpu_ttm_tt *gtt;
853 844
854 adev = amdgpu_get_adev(bdev); 845 adev = amdgpu_ttm_adev(bdev);
855 846
856 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); 847 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
857 if (gtt == NULL) { 848 if (gtt == NULL) {
@@ -895,7 +886,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
895 return 0; 886 return 0;
896 } 887 }
897 888
898 adev = amdgpu_get_adev(ttm->bdev); 889 adev = amdgpu_ttm_adev(ttm->bdev);
899 890
900#ifdef CONFIG_SWIOTLB 891#ifdef CONFIG_SWIOTLB
901 if (swiotlb_nr_tbl()) { 892 if (swiotlb_nr_tbl()) {
@@ -941,7 +932,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
941 if (slave) 932 if (slave)
942 return; 933 return;
943 934
944 adev = amdgpu_get_adev(ttm->bdev); 935 adev = amdgpu_ttm_adev(ttm->bdev);
945 936
946#ifdef CONFIG_SWIOTLB 937#ifdef CONFIG_SWIOTLB
947 if (swiotlb_nr_tbl()) { 938 if (swiotlb_nr_tbl()) {
@@ -1064,7 +1055,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1064 1055
1065static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo) 1056static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
1066{ 1057{
1067 struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev); 1058 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1068 unsigned i, j; 1059 unsigned i, j;
1069 1060
1070 for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) { 1061 for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
@@ -1081,7 +1072,7 @@ static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
1081 1072
1082static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo) 1073static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
1083{ 1074{
1084 struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev); 1075 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1085 unsigned log2_size = min(ilog2(tbo->num_pages), 1076 unsigned log2_size = min(ilog2(tbo->num_pages),
1086 AMDGPU_TTM_LRU_SIZE - 1); 1077 AMDGPU_TTM_LRU_SIZE - 1);
1087 1078
@@ -1370,7 +1361,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1370 struct reservation_object *resv, 1361 struct reservation_object *resv,
1371 struct fence **fence) 1362 struct fence **fence)
1372{ 1363{
1373 struct amdgpu_device *adev = bo->adev; 1364 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1374 struct amdgpu_job *job; 1365 struct amdgpu_job *job;
1375 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 1366 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
1376 1367
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 4cf3ca755810..d67eadaa91a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -931,7 +931,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
931 if (r) 931 if (r)
932 return r; 932 return r;
933 933
934 if (!bo->adev->uvd.address_64_bit) { 934 if (!ring->adev->uvd.address_64_bit) {
935 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); 935 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
936 amdgpu_uvd_force_into_uvd_segment(bo); 936 amdgpu_uvd_force_into_uvd_segment(bo);
937 } 937 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 840ac52f45b6..f4b78b66444d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1195,7 +1195,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1195 1195
1196 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); 1196 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
1197 gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) && 1197 gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
1198 adev == bo_va->bo->adev) ? flags : 0; 1198 adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0;
1199 1199
1200 spin_lock(&vm->status_lock); 1200 spin_lock(&vm->status_lock);
1201 if (!list_empty(&bo_va->vm_status)) 1201 if (!list_empty(&bo_va->vm_status))