aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c2
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c2
10 files changed, 38 insertions, 20 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 8ebc5f1eb4c0..700c56baf2de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -426,6 +426,8 @@ struct amdgpu_mman {
426 426
427 /* custom LRU management */ 427 /* custom LRU management */
428 struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; 428 struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
429 /* guard for log2_size array, don't add anything in between */
430 struct amdgpu_mman_lru guard;
429}; 431};
430 432
431int amdgpu_copy_buffer(struct amdgpu_ring *ring, 433int amdgpu_copy_buffer(struct amdgpu_ring *ring,
@@ -646,9 +648,9 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
646void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); 648void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
647int amdgpu_gart_init(struct amdgpu_device *adev); 649int amdgpu_gart_init(struct amdgpu_device *adev);
648void amdgpu_gart_fini(struct amdgpu_device *adev); 650void amdgpu_gart_fini(struct amdgpu_device *adev);
649void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, 651void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
650 int pages); 652 int pages);
651int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, 653int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
652 int pages, struct page **pagelist, 654 int pages, struct page **pagelist,
653 dma_addr_t *dma_addr, uint32_t flags); 655 dma_addr_t *dma_addr, uint32_t flags);
654 656
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 983175363b06..fe872b82e619 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -321,6 +321,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
321 (le16_to_cpu(path->usConnObjectId) & 321 (le16_to_cpu(path->usConnObjectId) &
322 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; 322 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
323 323
324 /* Skip TV/CV support */
325 if ((le16_to_cpu(path->usDeviceTag) ==
326 ATOM_DEVICE_TV1_SUPPORT) ||
327 (le16_to_cpu(path->usDeviceTag) ==
328 ATOM_DEVICE_CV_SUPPORT))
329 continue;
330
331 if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
332 DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
333 con_obj_id, le16_to_cpu(path->usDeviceTag));
334 continue;
335 }
336
324 connector_type = 337 connector_type =
325 object_connector_convert[con_obj_id]; 338 object_connector_convert[con_obj_id];
326 connector_object_id = con_obj_id; 339 connector_object_id = con_obj_id;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 49de92600074..10b5ddf2c588 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -200,16 +200,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
200 atpx->is_hybrid = false; 200 atpx->is_hybrid = false;
201 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { 201 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
202 printk("ATPX Hybrid Graphics\n"); 202 printk("ATPX Hybrid Graphics\n");
203#if 1
204 /* This is a temporary hack until the D3 cold support
205 * makes it upstream. The ATPX power_control method seems
206 * to still work on even if the system should be using
207 * the new standardized hybrid D3 cold ACPI interface.
208 */
209 atpx->functions.power_cntl = true;
210#else
211 atpx->functions.power_cntl = false; 203 atpx->functions.power_cntl = false;
212#endif
213 atpx->is_hybrid = true; 204 atpx->is_hybrid = true;
214 } 205 }
215 206
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 921bce2df0b0..0feea347f680 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
221 * Unbinds the requested pages from the gart page table and 221 * Unbinds the requested pages from the gart page table and
222 * replaces them with the dummy page (all asics). 222 * replaces them with the dummy page (all asics).
223 */ 223 */
224void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, 224void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
225 int pages) 225 int pages)
226{ 226{
227 unsigned t; 227 unsigned t;
@@ -268,7 +268,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
268 * (all asics). 268 * (all asics).
269 * Returns 0 for success, -EINVAL for failure. 269 * Returns 0 for success, -EINVAL for failure.
270 */ 270 */
271int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, 271int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
272 int pages, struct page **pagelist, dma_addr_t *dma_addr, 272 int pages, struct page **pagelist, dma_addr_t *dma_addr,
273 uint32_t flags) 273 uint32_t flags)
274{ 274{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 9b61c8ba7aaf..716f2afeb6a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -251,8 +251,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
251 251
252 adev = amdgpu_get_adev(bo->bdev); 252 adev = amdgpu_get_adev(bo->bdev);
253 ring = adev->mman.buffer_funcs_ring; 253 ring = adev->mman.buffer_funcs_ring;
254 old_start = old_mem->start << PAGE_SHIFT; 254 old_start = (u64)old_mem->start << PAGE_SHIFT;
255 new_start = new_mem->start << PAGE_SHIFT; 255 new_start = (u64)new_mem->start << PAGE_SHIFT;
256 256
257 switch (old_mem->mem_type) { 257 switch (old_mem->mem_type) {
258 case TTM_PL_VRAM: 258 case TTM_PL_VRAM:
@@ -950,6 +950,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
950 struct list_head *res = lru->lru[tbo->mem.mem_type]; 950 struct list_head *res = lru->lru[tbo->mem.mem_type];
951 951
952 lru->lru[tbo->mem.mem_type] = &tbo->lru; 952 lru->lru[tbo->mem.mem_type] = &tbo->lru;
953 while ((++lru)->lru[tbo->mem.mem_type] == res)
954 lru->lru[tbo->mem.mem_type] = &tbo->lru;
953 955
954 return res; 956 return res;
955} 957}
@@ -960,6 +962,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
960 struct list_head *res = lru->swap_lru; 962 struct list_head *res = lru->swap_lru;
961 963
962 lru->swap_lru = &tbo->swap; 964 lru->swap_lru = &tbo->swap;
965 while ((++lru)->swap_lru == res)
966 lru->swap_lru = &tbo->swap;
963 967
964 return res; 968 return res;
965} 969}
@@ -1011,6 +1015,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
1011 lru->swap_lru = &adev->mman.bdev.glob->swap_lru; 1015 lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
1012 } 1016 }
1013 1017
1018 for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
1019 adev->mman.guard.lru[j] = NULL;
1020 adev->mman.guard.swap_lru = NULL;
1021
1014 adev->mman.initialized = true; 1022 adev->mman.initialized = true;
1015 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, 1023 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1016 adev->mc.real_vram_size >> PAGE_SHIFT); 1024 adev->mc.real_vram_size >> PAGE_SHIFT);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index b11f4e8868d7..4aa993d19018 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1187,7 +1187,8 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1187 r = 0; 1187 r = 0;
1188 } 1188 }
1189 1189
1190error:
1191 fence_put(fence); 1190 fence_put(fence);
1191
1192error:
1192 return r; 1193 return r;
1193} 1194}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8e642fc48df4..80120fa4092c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1535,7 +1535,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1535 r = amd_sched_entity_init(&ring->sched, &vm->entity, 1535 r = amd_sched_entity_init(&ring->sched, &vm->entity,
1536 rq, amdgpu_sched_jobs); 1536 rq, amdgpu_sched_jobs);
1537 if (r) 1537 if (r)
1538 return r; 1538 goto err;
1539 1539
1540 vm->page_directory_fence = NULL; 1540 vm->page_directory_fence = NULL;
1541 1541
@@ -1565,6 +1565,9 @@ error_free_page_directory:
1565error_free_sched_entity: 1565error_free_sched_entity:
1566 amd_sched_entity_fini(&ring->sched, &vm->entity); 1566 amd_sched_entity_fini(&ring->sched, &vm->entity);
1567 1567
1568err:
1569 drm_free_large(vm->page_tables);
1570
1568 return r; 1571 return r;
1569} 1572}
1570 1573
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 1351c7e834a2..a64715d90503 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -714,7 +714,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
714 DRM_ERROR("amdgpu: IB test timed out\n"); 714 DRM_ERROR("amdgpu: IB test timed out\n");
715 r = -ETIMEDOUT; 715 r = -ETIMEDOUT;
716 goto err1; 716 goto err1;
717 } else if (r) { 717 } else if (r < 0) {
718 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 718 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
719 goto err1; 719 goto err1;
720 } 720 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index e621eba63126..a7d3cb3fead0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -184,7 +184,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
184 sizeof(u32)) + inx; 184 sizeof(u32)) + inx;
185 185
186 pr_debug("kfd: get kernel queue doorbell\n" 186 pr_debug("kfd: get kernel queue doorbell\n"
187 " doorbell offset == 0x%08d\n" 187 " doorbell offset == 0x%08X\n"
188 " kernel address == 0x%08lX\n", 188 " kernel address == 0x%08lX\n",
189 *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx)); 189 *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
190 190
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index ef312bb75fda..963a24d46a93 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -405,7 +405,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
405 spin_lock(&sched->job_list_lock); 405 spin_lock(&sched->job_list_lock);
406 s_job = list_first_entry_or_null(&sched->ring_mirror_list, 406 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
407 struct amd_sched_job, node); 407 struct amd_sched_job, node);
408 if (s_job) 408 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
409 schedule_delayed_work(&s_job->work_tdr, sched->timeout); 409 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
410 410
411 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { 411 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {