diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 189 |
1 files changed, 129 insertions, 60 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index dcaf691f56b5..f1a206df9823 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -51,16 +51,6 @@ | |||
51 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); | 51 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); |
52 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); | 52 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); |
53 | 53 | ||
54 | static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev) | ||
55 | { | ||
56 | struct amdgpu_mman *mman; | ||
57 | struct amdgpu_device *adev; | ||
58 | |||
59 | mman = container_of(bdev, struct amdgpu_mman, bdev); | ||
60 | adev = container_of(mman, struct amdgpu_device, mman); | ||
61 | return adev; | ||
62 | } | ||
63 | |||
64 | 54 | ||
65 | /* | 55 | /* |
66 | * Global memory. | 56 | * Global memory. |
@@ -150,7 +140,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
150 | { | 140 | { |
151 | struct amdgpu_device *adev; | 141 | struct amdgpu_device *adev; |
152 | 142 | ||
153 | adev = amdgpu_get_adev(bdev); | 143 | adev = amdgpu_ttm_adev(bdev); |
154 | 144 | ||
155 | switch (type) { | 145 | switch (type) { |
156 | case TTM_PL_SYSTEM: | 146 | case TTM_PL_SYSTEM: |
@@ -168,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
168 | break; | 158 | break; |
169 | case TTM_PL_VRAM: | 159 | case TTM_PL_VRAM: |
170 | /* "On-card" video ram */ | 160 | /* "On-card" video ram */ |
171 | man->func = &ttm_bo_manager_func; | 161 | man->func = &amdgpu_vram_mgr_func; |
172 | man->gpu_offset = adev->mc.vram_start; | 162 | man->gpu_offset = adev->mc.vram_start; |
173 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | 163 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
174 | TTM_MEMTYPE_FLAG_MAPPABLE; | 164 | TTM_MEMTYPE_FLAG_MAPPABLE; |
@@ -195,6 +185,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
195 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | 185 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, |
196 | struct ttm_placement *placement) | 186 | struct ttm_placement *placement) |
197 | { | 187 | { |
188 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); | ||
198 | struct amdgpu_bo *abo; | 189 | struct amdgpu_bo *abo; |
199 | static struct ttm_place placements = { | 190 | static struct ttm_place placements = { |
200 | .fpfn = 0, | 191 | .fpfn = 0, |
@@ -213,7 +204,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | |||
213 | abo = container_of(bo, struct amdgpu_bo, tbo); | 204 | abo = container_of(bo, struct amdgpu_bo, tbo); |
214 | switch (bo->mem.mem_type) { | 205 | switch (bo->mem.mem_type) { |
215 | case TTM_PL_VRAM: | 206 | case TTM_PL_VRAM: |
216 | if (abo->adev->mman.buffer_funcs_ring->ready == false) { | 207 | if (adev->mman.buffer_funcs_ring->ready == false) { |
217 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); | 208 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
218 | } else { | 209 | } else { |
219 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); | 210 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); |
@@ -229,7 +220,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | |||
229 | * allocating address space for the BO. | 220 | * allocating address space for the BO. |
230 | */ | 221 | */ |
231 | abo->placements[i].lpfn = | 222 | abo->placements[i].lpfn = |
232 | abo->adev->mc.gtt_size >> PAGE_SHIFT; | 223 | adev->mc.gtt_size >> PAGE_SHIFT; |
233 | } | 224 | } |
234 | } | 225 | } |
235 | break; | 226 | break; |
@@ -260,64 +251,116 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo, | |||
260 | new_mem->mm_node = NULL; | 251 | new_mem->mm_node = NULL; |
261 | } | 252 | } |
262 | 253 | ||
263 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, | 254 | static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo, |
264 | bool evict, bool no_wait_gpu, | 255 | struct drm_mm_node *mm_node, |
265 | struct ttm_mem_reg *new_mem, | 256 | struct ttm_mem_reg *mem, |
266 | struct ttm_mem_reg *old_mem) | 257 | uint64_t *addr) |
267 | { | 258 | { |
268 | struct amdgpu_device *adev; | ||
269 | struct amdgpu_ring *ring; | ||
270 | uint64_t old_start, new_start; | ||
271 | struct fence *fence; | ||
272 | int r; | 259 | int r; |
273 | 260 | ||
274 | adev = amdgpu_get_adev(bo->bdev); | 261 | switch (mem->mem_type) { |
275 | ring = adev->mman.buffer_funcs_ring; | ||
276 | |||
277 | switch (old_mem->mem_type) { | ||
278 | case TTM_PL_TT: | 262 | case TTM_PL_TT: |
279 | r = amdgpu_ttm_bind(bo, old_mem); | 263 | r = amdgpu_ttm_bind(bo, mem); |
280 | if (r) | 264 | if (r) |
281 | return r; | 265 | return r; |
282 | 266 | ||
283 | case TTM_PL_VRAM: | 267 | case TTM_PL_VRAM: |
284 | old_start = (u64)old_mem->start << PAGE_SHIFT; | 268 | *addr = mm_node->start << PAGE_SHIFT; |
285 | old_start += bo->bdev->man[old_mem->mem_type].gpu_offset; | 269 | *addr += bo->bdev->man[mem->mem_type].gpu_offset; |
286 | break; | 270 | break; |
287 | default: | 271 | default: |
288 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | 272 | DRM_ERROR("Unknown placement %d\n", mem->mem_type); |
289 | return -EINVAL; | 273 | return -EINVAL; |
290 | } | 274 | } |
291 | switch (new_mem->mem_type) { | ||
292 | case TTM_PL_TT: | ||
293 | r = amdgpu_ttm_bind(bo, new_mem); | ||
294 | if (r) | ||
295 | return r; | ||
296 | 275 | ||
297 | case TTM_PL_VRAM: | 276 | return 0; |
298 | new_start = (u64)new_mem->start << PAGE_SHIFT; | 277 | } |
299 | new_start += bo->bdev->man[new_mem->mem_type].gpu_offset; | 278 | |
300 | break; | 279 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, |
301 | default: | 280 | bool evict, bool no_wait_gpu, |
302 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | 281 | struct ttm_mem_reg *new_mem, |
303 | return -EINVAL; | 282 | struct ttm_mem_reg *old_mem) |
304 | } | 283 | { |
284 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); | ||
285 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; | ||
286 | |||
287 | struct drm_mm_node *old_mm, *new_mm; | ||
288 | uint64_t old_start, old_size, new_start, new_size; | ||
289 | unsigned long num_pages; | ||
290 | struct fence *fence = NULL; | ||
291 | int r; | ||
292 | |||
293 | BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); | ||
294 | |||
305 | if (!ring->ready) { | 295 | if (!ring->ready) { |
306 | DRM_ERROR("Trying to move memory with ring turned off.\n"); | 296 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
307 | return -EINVAL; | 297 | return -EINVAL; |
308 | } | 298 | } |
309 | 299 | ||
310 | BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); | 300 | old_mm = old_mem->mm_node; |
301 | r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start); | ||
302 | if (r) | ||
303 | return r; | ||
304 | old_size = old_mm->size; | ||
305 | |||
311 | 306 | ||
312 | r = amdgpu_copy_buffer(ring, old_start, new_start, | 307 | new_mm = new_mem->mm_node; |
313 | new_mem->num_pages * PAGE_SIZE, /* bytes */ | 308 | r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start); |
314 | bo->resv, &fence, false); | ||
315 | if (r) | 309 | if (r) |
316 | return r; | 310 | return r; |
311 | new_size = new_mm->size; | ||
312 | |||
313 | num_pages = new_mem->num_pages; | ||
314 | while (num_pages) { | ||
315 | unsigned long cur_pages = min(old_size, new_size); | ||
316 | struct fence *next; | ||
317 | |||
318 | r = amdgpu_copy_buffer(ring, old_start, new_start, | ||
319 | cur_pages * PAGE_SIZE, | ||
320 | bo->resv, &next, false); | ||
321 | if (r) | ||
322 | goto error; | ||
323 | |||
324 | fence_put(fence); | ||
325 | fence = next; | ||
326 | |||
327 | num_pages -= cur_pages; | ||
328 | if (!num_pages) | ||
329 | break; | ||
330 | |||
331 | old_size -= cur_pages; | ||
332 | if (!old_size) { | ||
333 | r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem, | ||
334 | &old_start); | ||
335 | if (r) | ||
336 | goto error; | ||
337 | old_size = old_mm->size; | ||
338 | } else { | ||
339 | old_start += cur_pages * PAGE_SIZE; | ||
340 | } | ||
341 | |||
342 | new_size -= cur_pages; | ||
343 | if (!new_size) { | ||
344 | r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem, | ||
345 | &new_start); | ||
346 | if (r) | ||
347 | goto error; | ||
348 | |||
349 | new_size = new_mm->size; | ||
350 | } else { | ||
351 | new_start += cur_pages * PAGE_SIZE; | ||
352 | } | ||
353 | } | ||
317 | 354 | ||
318 | r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); | 355 | r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); |
319 | fence_put(fence); | 356 | fence_put(fence); |
320 | return r; | 357 | return r; |
358 | |||
359 | error: | ||
360 | if (fence) | ||
361 | fence_wait(fence, false); | ||
362 | fence_put(fence); | ||
363 | return r; | ||
321 | } | 364 | } |
322 | 365 | ||
323 | static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, | 366 | static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, |
@@ -332,7 +375,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, | |||
332 | struct ttm_placement placement; | 375 | struct ttm_placement placement; |
333 | int r; | 376 | int r; |
334 | 377 | ||
335 | adev = amdgpu_get_adev(bo->bdev); | 378 | adev = amdgpu_ttm_adev(bo->bdev); |
336 | tmp_mem = *new_mem; | 379 | tmp_mem = *new_mem; |
337 | tmp_mem.mm_node = NULL; | 380 | tmp_mem.mm_node = NULL; |
338 | placement.num_placement = 1; | 381 | placement.num_placement = 1; |
@@ -379,7 +422,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, | |||
379 | struct ttm_place placements; | 422 | struct ttm_place placements; |
380 | int r; | 423 | int r; |
381 | 424 | ||
382 | adev = amdgpu_get_adev(bo->bdev); | 425 | adev = amdgpu_ttm_adev(bo->bdev); |
383 | tmp_mem = *new_mem; | 426 | tmp_mem = *new_mem; |
384 | tmp_mem.mm_node = NULL; | 427 | tmp_mem.mm_node = NULL; |
385 | placement.num_placement = 1; | 428 | placement.num_placement = 1; |
@@ -422,7 +465,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, | |||
422 | if (WARN_ON_ONCE(abo->pin_count > 0)) | 465 | if (WARN_ON_ONCE(abo->pin_count > 0)) |
423 | return -EINVAL; | 466 | return -EINVAL; |
424 | 467 | ||
425 | adev = amdgpu_get_adev(bo->bdev); | 468 | adev = amdgpu_ttm_adev(bo->bdev); |
426 | 469 | ||
427 | /* remember the eviction */ | 470 | /* remember the eviction */ |
428 | if (evict) | 471 | if (evict) |
@@ -475,7 +518,7 @@ memcpy: | |||
475 | static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 518 | static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
476 | { | 519 | { |
477 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | 520 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
478 | struct amdgpu_device *adev = amdgpu_get_adev(bdev); | 521 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); |
479 | 522 | ||
480 | mem->bus.addr = NULL; | 523 | mem->bus.addr = NULL; |
481 | mem->bus.offset = 0; | 524 | mem->bus.offset = 0; |
@@ -607,7 +650,7 @@ release_pages: | |||
607 | /* prepare the sg table with the user pages */ | 650 | /* prepare the sg table with the user pages */ |
608 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) | 651 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) |
609 | { | 652 | { |
610 | struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); | 653 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
611 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | 654 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
612 | unsigned nents; | 655 | unsigned nents; |
613 | int r; | 656 | int r; |
@@ -639,7 +682,7 @@ release_sg: | |||
639 | 682 | ||
640 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) | 683 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) |
641 | { | 684 | { |
642 | struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); | 685 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
643 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | 686 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
644 | struct sg_page_iter sg_iter; | 687 | struct sg_page_iter sg_iter; |
645 | 688 | ||
@@ -799,7 +842,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, | |||
799 | struct amdgpu_device *adev; | 842 | struct amdgpu_device *adev; |
800 | struct amdgpu_ttm_tt *gtt; | 843 | struct amdgpu_ttm_tt *gtt; |
801 | 844 | ||
802 | adev = amdgpu_get_adev(bdev); | 845 | adev = amdgpu_ttm_adev(bdev); |
803 | 846 | ||
804 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); | 847 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); |
805 | if (gtt == NULL) { | 848 | if (gtt == NULL) { |
@@ -843,7 +886,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) | |||
843 | return 0; | 886 | return 0; |
844 | } | 887 | } |
845 | 888 | ||
846 | adev = amdgpu_get_adev(ttm->bdev); | 889 | adev = amdgpu_ttm_adev(ttm->bdev); |
847 | 890 | ||
848 | #ifdef CONFIG_SWIOTLB | 891 | #ifdef CONFIG_SWIOTLB |
849 | if (swiotlb_nr_tbl()) { | 892 | if (swiotlb_nr_tbl()) { |
@@ -889,7 +932,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) | |||
889 | if (slave) | 932 | if (slave) |
890 | return; | 933 | return; |
891 | 934 | ||
892 | adev = amdgpu_get_adev(ttm->bdev); | 935 | adev = amdgpu_ttm_adev(ttm->bdev); |
893 | 936 | ||
894 | #ifdef CONFIG_SWIOTLB | 937 | #ifdef CONFIG_SWIOTLB |
895 | if (swiotlb_nr_tbl()) { | 938 | if (swiotlb_nr_tbl()) { |
@@ -1012,7 +1055,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, | |||
1012 | 1055 | ||
1013 | static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo) | 1056 | static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo) |
1014 | { | 1057 | { |
1015 | struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev); | 1058 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); |
1016 | unsigned i, j; | 1059 | unsigned i, j; |
1017 | 1060 | ||
1018 | for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) { | 1061 | for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) { |
@@ -1029,7 +1072,7 @@ static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo) | |||
1029 | 1072 | ||
1030 | static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo) | 1073 | static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo) |
1031 | { | 1074 | { |
1032 | struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev); | 1075 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); |
1033 | unsigned log2_size = min(ilog2(tbo->num_pages), | 1076 | unsigned log2_size = min(ilog2(tbo->num_pages), |
1034 | AMDGPU_TTM_LRU_SIZE - 1); | 1077 | AMDGPU_TTM_LRU_SIZE - 1); |
1035 | 1078 | ||
@@ -1060,12 +1103,37 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo) | |||
1060 | return res; | 1103 | return res; |
1061 | } | 1104 | } |
1062 | 1105 | ||
1106 | static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, | ||
1107 | const struct ttm_place *place) | ||
1108 | { | ||
1109 | if (bo->mem.mem_type == TTM_PL_VRAM && | ||
1110 | bo->mem.start == AMDGPU_BO_INVALID_OFFSET) { | ||
1111 | unsigned long num_pages = bo->mem.num_pages; | ||
1112 | struct drm_mm_node *node = bo->mem.mm_node; | ||
1113 | |||
1114 | /* Check each drm MM node individually */ | ||
1115 | while (num_pages) { | ||
1116 | if (place->fpfn < (node->start + node->size) && | ||
1117 | !(place->lpfn && place->lpfn <= node->start)) | ||
1118 | return true; | ||
1119 | |||
1120 | num_pages -= node->size; | ||
1121 | ++node; | ||
1122 | } | ||
1123 | |||
1124 | return false; | ||
1125 | } | ||
1126 | |||
1127 | return ttm_bo_eviction_valuable(bo, place); | ||
1128 | } | ||
1129 | |||
1063 | static struct ttm_bo_driver amdgpu_bo_driver = { | 1130 | static struct ttm_bo_driver amdgpu_bo_driver = { |
1064 | .ttm_tt_create = &amdgpu_ttm_tt_create, | 1131 | .ttm_tt_create = &amdgpu_ttm_tt_create, |
1065 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, | 1132 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, |
1066 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, | 1133 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, |
1067 | .invalidate_caches = &amdgpu_invalidate_caches, | 1134 | .invalidate_caches = &amdgpu_invalidate_caches, |
1068 | .init_mem_type = &amdgpu_init_mem_type, | 1135 | .init_mem_type = &amdgpu_init_mem_type, |
1136 | .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, | ||
1069 | .evict_flags = &amdgpu_evict_flags, | 1137 | .evict_flags = &amdgpu_evict_flags, |
1070 | .move = &amdgpu_bo_move, | 1138 | .move = &amdgpu_bo_move, |
1071 | .verify_access = &amdgpu_verify_access, | 1139 | .verify_access = &amdgpu_verify_access, |
@@ -1119,7 +1187,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) | |||
1119 | 1187 | ||
1120 | r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, | 1188 | r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, |
1121 | AMDGPU_GEM_DOMAIN_VRAM, | 1189 | AMDGPU_GEM_DOMAIN_VRAM, |
1122 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 1190 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
1191 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
1123 | NULL, NULL, &adev->stollen_vga_memory); | 1192 | NULL, NULL, &adev->stollen_vga_memory); |
1124 | if (r) { | 1193 | if (r) { |
1125 | return r; | 1194 | return r; |
@@ -1317,7 +1386,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, | |||
1317 | struct reservation_object *resv, | 1386 | struct reservation_object *resv, |
1318 | struct fence **fence) | 1387 | struct fence **fence) |
1319 | { | 1388 | { |
1320 | struct amdgpu_device *adev = bo->adev; | 1389 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
1321 | struct amdgpu_job *job; | 1390 | struct amdgpu_job *job; |
1322 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; | 1391 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
1323 | 1392 | ||