aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-07-26 22:31:07 -0400
committerDave Airlie <airlied@redhat.com>2018-07-26 22:31:48 -0400
commit6d52aacd92c60331ec8c3117522f4301b5195e28 (patch)
tree0f2584c46d269f9537bd2fe4e018313c1bb0056d
parentdaa9897560e20f397af602b7384a014546fc422c (diff)
parent586092ab4b768b01b3184d9a2541e2cf9a8d9740 (diff)
Merge branch 'drm-next-4.19' of git://people.freedesktop.org/~agd5f/linux into drm-next
Updates for 4.19. Mostly bug fixes and cleanups. Highlights: - Internal API cleanup in GPU scheduler - Decouple i2c and aux abstractions in DC - Update maintainers - Misc cleanups - Misc bug fixes Signed-off-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180725215326.2709-1-alexander.deucher@amd.com
-rw-r--r--MAINTAINERS19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c204
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c25
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c942
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c48
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/engine.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h113
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/engine.h106
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c4
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c33
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c2
-rw-r--r--include/drm/gpu_scheduler.h12
58 files changed, 1933 insertions, 292 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 07d1576fc766..93f189f0d60d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -728,6 +728,14 @@ S: Supported
728F: drivers/crypto/ccp/ 728F: drivers/crypto/ccp/
729F: include/linux/ccp.h 729F: include/linux/ccp.h
730 730
731AMD DISPLAY CORE
732M: Harry Wentland <harry.wentland@amd.com>
733M: Leo Li <sunpeng.li@amd.com>
734L: amd-gfx@lists.freedesktop.org
735T: git git://people.freedesktop.org/~agd5f/linux
736S: Supported
737F: drivers/gpu/drm/amd/display/
738
731AMD FAM15H PROCESSOR POWER MONITORING DRIVER 739AMD FAM15H PROCESSOR POWER MONITORING DRIVER
732M: Huang Rui <ray.huang@amd.com> 740M: Huang Rui <ray.huang@amd.com>
733L: linux-hwmon@vger.kernel.org 741L: linux-hwmon@vger.kernel.org
@@ -777,6 +785,14 @@ F: drivers/gpu/drm/amd/include/vi_structs.h
777F: drivers/gpu/drm/amd/include/v9_structs.h 785F: drivers/gpu/drm/amd/include/v9_structs.h
778F: include/uapi/linux/kfd_ioctl.h 786F: include/uapi/linux/kfd_ioctl.h
779 787
788AMD POWERPLAY
789M: Rex Zhu <rex.zhu@amd.com>
790M: Evan Quan <evan.quan@amd.com>
791L: amd-gfx@lists.freedesktop.org
792S: Supported
793F: drivers/gpu/drm/amd/powerplay/
794T: git git://people.freedesktop.org/~agd5f/linux
795
780AMD SEATTLE DEVICE TREE SUPPORT 796AMD SEATTLE DEVICE TREE SUPPORT
781M: Brijesh Singh <brijeshkumar.singh@amd.com> 797M: Brijesh Singh <brijeshkumar.singh@amd.com>
782M: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> 798M: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
@@ -4883,7 +4899,8 @@ F: Documentation/gpu/xen-front.rst
4883 4899
4884DRM TTM SUBSYSTEM 4900DRM TTM SUBSYSTEM
4885M: Christian Koenig <christian.koenig@amd.com> 4901M: Christian Koenig <christian.koenig@amd.com>
4886M: Roger He <Hongbo.He@amd.com> 4902M: Huang Rui <ray.huang@amd.com>
4903M: Junwei Zhang <Jerry.Zhang@amd.com>
4887T: git git://people.freedesktop.org/~agd5f/linux 4904T: git git://people.freedesktop.org/~agd5f/linux
4888S: Maintained 4905S: Maintained
4889L: dri-devel@lists.freedesktop.org 4906L: dri-devel@lists.freedesktop.org
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 44f62fda4022..0283e2b3c851 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1801,8 +1801,6 @@ void amdgpu_display_update_priority(struct amdgpu_device *adev);
1801 1801
1802void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 1802void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
1803 u64 num_vis_bytes); 1803 u64 num_vis_bytes);
1804void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
1805bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
1806void amdgpu_device_vram_location(struct amdgpu_device *adev, 1804void amdgpu_device_vram_location(struct amdgpu_device *adev,
1807 struct amdgpu_gmc *mc, u64 base); 1805 struct amdgpu_gmc *mc, u64 base);
1808void amdgpu_device_gart_location(struct amdgpu_device *adev, 1806void amdgpu_device_gart_location(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 0d8c3fc6eace..353993218f21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -364,7 +364,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
364 struct acpi_bus_event *event) 364 struct acpi_bus_event *event)
365{ 365{
366 struct amdgpu_atif *atif = adev->atif; 366 struct amdgpu_atif *atif = adev->atif;
367 struct atif_sbios_requests req;
368 int count; 367 int count;
369 368
370 DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", 369 DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@@ -379,42 +378,48 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
379 /* Not our event */ 378 /* Not our event */
380 return NOTIFY_DONE; 379 return NOTIFY_DONE;
381 380
382 /* Check pending SBIOS requests */ 381 if (atif->functions.sbios_requests) {
383 count = amdgpu_atif_get_sbios_requests(atif, &req); 382 struct atif_sbios_requests req;
384 383
385 if (count <= 0) 384 /* Check pending SBIOS requests */
386 return NOTIFY_DONE; 385 count = amdgpu_atif_get_sbios_requests(atif, &req);
386
387 if (count <= 0)
388 return NOTIFY_DONE;
387 389
388 DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count); 390 DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
389 391
390 if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) { 392 /* todo: add DC handling */
391 struct amdgpu_encoder *enc = atif->encoder_for_bl; 393 if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
394 !amdgpu_device_has_dc_support(adev)) {
395 struct amdgpu_encoder *enc = atif->encoder_for_bl;
392 396
393 if (enc) { 397 if (enc) {
394 struct amdgpu_encoder_atom_dig *dig = enc->enc_priv; 398 struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
395 399
396 DRM_DEBUG_DRIVER("Changing brightness to %d\n", 400 DRM_DEBUG_DRIVER("Changing brightness to %d\n",
397 req.backlight_level); 401 req.backlight_level);
398 402
399 amdgpu_display_backlight_set_level(adev, enc, req.backlight_level); 403 amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
400 404
401#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 405#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
402 backlight_force_update(dig->bl_dev, 406 backlight_force_update(dig->bl_dev,
403 BACKLIGHT_UPDATE_HOTKEY); 407 BACKLIGHT_UPDATE_HOTKEY);
404#endif 408#endif
409 }
405 } 410 }
406 } 411 if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
407 if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { 412 if ((adev->flags & AMD_IS_PX) &&
408 if ((adev->flags & AMD_IS_PX) && 413 amdgpu_atpx_dgpu_req_power_for_displays()) {
409 amdgpu_atpx_dgpu_req_power_for_displays()) { 414 pm_runtime_get_sync(adev->ddev->dev);
410 pm_runtime_get_sync(adev->ddev->dev); 415 /* Just fire off a uevent and let userspace tell us what to do */
411 /* Just fire off a uevent and let userspace tell us what to do */ 416 drm_helper_hpd_irq_event(adev->ddev);
412 drm_helper_hpd_irq_event(adev->ddev); 417 pm_runtime_mark_last_busy(adev->ddev->dev);
413 pm_runtime_mark_last_busy(adev->ddev->dev); 418 pm_runtime_put_autosuspend(adev->ddev->dev);
414 pm_runtime_put_autosuspend(adev->ddev->dev); 419 }
415 } 420 }
421 /* TODO: check other events */
416 } 422 }
417 /* TODO: check other events */
418 423
419 /* We've handled the event, stop the notifier chain. The ACPI interface 424 /* We've handled the event, stop the notifier chain. The ACPI interface
420 * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to 425 * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 079af8ac2636..fa38a960ce00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -334,7 +334,7 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
334 "Called with userptr BO")) 334 "Called with userptr BO"))
335 return -EINVAL; 335 return -EINVAL;
336 336
337 amdgpu_ttm_placement_from_domain(bo, domain); 337 amdgpu_bo_placement_from_domain(bo, domain);
338 338
339 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 339 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
340 if (ret) 340 if (ret)
@@ -622,7 +622,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
622 pr_err("%s: Failed to reserve BO\n", __func__); 622 pr_err("%s: Failed to reserve BO\n", __func__);
623 goto release_out; 623 goto release_out;
624 } 624 }
625 amdgpu_ttm_placement_from_domain(bo, mem->domain); 625 amdgpu_bo_placement_from_domain(bo, mem->domain);
626 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 626 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
627 if (ret) 627 if (ret)
628 pr_err("%s: failed to validate BO\n", __func__); 628 pr_err("%s: failed to validate BO\n", __func__);
@@ -1680,7 +1680,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1680 1680
1681 if (amdgpu_bo_reserve(bo, true)) 1681 if (amdgpu_bo_reserve(bo, true))
1682 return -EAGAIN; 1682 return -EAGAIN;
1683 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 1683 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1684 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1684 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1685 amdgpu_bo_unreserve(bo); 1685 amdgpu_bo_unreserve(bo);
1686 if (ret) { 1686 if (ret) {
@@ -1824,7 +1824,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1824 if (mem->user_pages[0]) { 1824 if (mem->user_pages[0]) {
1825 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, 1825 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
1826 mem->user_pages); 1826 mem->user_pages);
1827 amdgpu_ttm_placement_from_domain(bo, mem->domain); 1827 amdgpu_bo_placement_from_domain(bo, mem->domain);
1828 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1828 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1829 if (ret) { 1829 if (ret) {
1830 pr_err("%s: failed to validate BO\n", __func__); 1830 pr_err("%s: failed to validate BO\n", __func__);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7c5cc33d0cda..178d9ce4eba1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -419,7 +419,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
419 } 419 }
420 420
421retry: 421retry:
422 amdgpu_ttm_placement_from_domain(bo, domain); 422 amdgpu_bo_placement_from_domain(bo, domain);
423 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 423 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
424 424
425 p->bytes_moved += ctx.bytes_moved; 425 p->bytes_moved += ctx.bytes_moved;
@@ -478,7 +478,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
478 update_bytes_moved_vis = 478 update_bytes_moved_vis =
479 !amdgpu_gmc_vram_full_visible(&adev->gmc) && 479 !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
480 amdgpu_bo_in_cpu_visible_vram(bo); 480 amdgpu_bo_in_cpu_visible_vram(bo);
481 amdgpu_ttm_placement_from_domain(bo, other); 481 amdgpu_bo_placement_from_domain(bo, other);
482 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 482 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
483 p->bytes_moved += ctx.bytes_moved; 483 p->bytes_moved += ctx.bytes_moved;
484 if (update_bytes_moved_vis) 484 if (update_bytes_moved_vis)
@@ -532,8 +532,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
532 /* Check if we have user pages and nobody bound the BO already */ 532 /* Check if we have user pages and nobody bound the BO already */
533 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) && 533 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
534 lobj->user_pages) { 534 lobj->user_pages) {
535 amdgpu_ttm_placement_from_domain(bo, 535 amdgpu_bo_placement_from_domain(bo,
536 AMDGPU_GEM_DOMAIN_CPU); 536 AMDGPU_GEM_DOMAIN_CPU);
537 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 537 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
538 if (r) 538 if (r)
539 return r; 539 return r;
@@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1232 job = p->job; 1232 job = p->job;
1233 p->job = NULL; 1233 p->job = NULL;
1234 1234
1235 r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp); 1235 r = drm_sched_job_init(&job->base, entity, p->filp);
1236 if (r) { 1236 if (r) {
1237 amdgpu_job_free(job); 1237 amdgpu_job_free(job);
1238 amdgpu_mn_unlock(p->mn); 1238 amdgpu_mn_unlock(p->mn);
@@ -1262,7 +1262,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1262 priority = job->base.s_priority; 1262 priority = job->base.s_priority;
1263 drm_sched_entity_push_job(&job->base, entity); 1263 drm_sched_entity_push_job(&job->base, entity);
1264 1264
1265 ring = to_amdgpu_ring(entity->sched); 1265 ring = to_amdgpu_ring(entity->rq->sched);
1266 amdgpu_ring_priority_get(ring, priority); 1266 amdgpu_ring_priority_get(ring, priority);
1267 1267
1268 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); 1268 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
@@ -1655,7 +1655,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1655 1655
1656 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { 1656 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1657 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1657 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1658 amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains); 1658 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1659 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); 1659 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1660 if (r) 1660 if (r)
1661 return r; 1661 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 83e3b320a793..df6965761046 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
104 104
105failed: 105failed:
106 for (j = 0; j < i; j++) 106 for (j = 0; j < i; j++)
107 drm_sched_entity_destroy(&adev->rings[j]->sched, 107 drm_sched_entity_destroy(&ctx->rings[j].entity);
108 &ctx->rings[j].entity);
109 kfree(ctx->fences); 108 kfree(ctx->fences);
110 ctx->fences = NULL; 109 ctx->fences = NULL;
111 return r; 110 return r;
@@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
178 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) 177 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
179 continue; 178 continue;
180 179
181 drm_sched_entity_destroy(&ctx->adev->rings[i]->sched, 180 drm_sched_entity_destroy(&ctx->rings[i].entity);
182 &ctx->rings[i].entity);
183 } 181 }
184 182
185 amdgpu_ctx_fini(ref); 183 amdgpu_ctx_fini(ref);
@@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
466 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) 464 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
467 continue; 465 continue;
468 466
469 max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched, 467 max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
470 &ctx->rings[i].entity, max_wait); 468 max_wait);
471 } 469 }
472 } 470 }
473 mutex_unlock(&mgr->lock); 471 mutex_unlock(&mgr->lock);
@@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
492 continue; 490 continue;
493 491
494 if (kref_read(&ctx->refcount) == 1) 492 if (kref_read(&ctx->refcount) == 1)
495 drm_sched_entity_fini(&ctx->adev->rings[i]->sched, 493 drm_sched_entity_fini(&ctx->rings[i].entity);
496 &ctx->rings[i].entity);
497 else 494 else
498 DRM_ERROR("ctx %p is still alive\n", ctx); 495 DRM_ERROR("ctx %p is still alive\n", ctx);
499 } 496 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 386a7b34d2f4..ec53d8f96d06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1926,7 +1926,7 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
1926} 1926}
1927 1927
1928/** 1928/**
1929 * amdgpu_device_ip_suspend - run suspend for hardware IPs 1929 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
1930 * 1930 *
1931 * @adev: amdgpu_device pointer 1931 * @adev: amdgpu_device pointer
1932 * 1932 *
@@ -1936,7 +1936,55 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
1936 * in each IP into a state suitable for suspend. 1936 * in each IP into a state suitable for suspend.
1937 * Returns 0 on success, negative error code on failure. 1937 * Returns 0 on success, negative error code on failure.
1938 */ 1938 */
1939int amdgpu_device_ip_suspend(struct amdgpu_device *adev) 1939static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
1940{
1941 int i, r;
1942
1943 if (amdgpu_sriov_vf(adev))
1944 amdgpu_virt_request_full_gpu(adev, false);
1945
1946 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1947 if (!adev->ip_blocks[i].status.valid)
1948 continue;
1949 /* displays are handled separately */
1950 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
1951 /* ungate blocks so that suspend can properly shut them down */
1952 if (adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1953 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1954 AMD_CG_STATE_UNGATE);
1955 if (r) {
1956 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1957 adev->ip_blocks[i].version->funcs->name, r);
1958 }
1959 }
1960 /* XXX handle errors */
1961 r = adev->ip_blocks[i].version->funcs->suspend(adev);
1962 /* XXX handle errors */
1963 if (r) {
1964 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1965 adev->ip_blocks[i].version->funcs->name, r);
1966 }
1967 }
1968 }
1969
1970 if (amdgpu_sriov_vf(adev))
1971 amdgpu_virt_release_full_gpu(adev, false);
1972
1973 return 0;
1974}
1975
1976/**
1977 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
1978 *
1979 * @adev: amdgpu_device pointer
1980 *
1981 * Main suspend function for hardware IPs. The list of all the hardware
1982 * IPs that make up the asic is walked, clockgating is disabled and the
1983 * suspend callbacks are run. suspend puts the hardware and software state
1984 * in each IP into a state suitable for suspend.
1985 * Returns 0 on success, negative error code on failure.
1986 */
1987static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
1940{ 1988{
1941 int i, r; 1989 int i, r;
1942 1990
@@ -1957,6 +2005,9 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
1957 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2005 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1958 if (!adev->ip_blocks[i].status.valid) 2006 if (!adev->ip_blocks[i].status.valid)
1959 continue; 2007 continue;
2008 /* displays are handled in phase1 */
2009 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2010 continue;
1960 /* ungate blocks so that suspend can properly shut them down */ 2011 /* ungate blocks so that suspend can properly shut them down */
1961 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC && 2012 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
1962 adev->ip_blocks[i].version->funcs->set_clockgating_state) { 2013 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
@@ -1982,6 +2033,29 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
1982 return 0; 2033 return 0;
1983} 2034}
1984 2035
2036/**
2037 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2038 *
2039 * @adev: amdgpu_device pointer
2040 *
2041 * Main suspend function for hardware IPs. The list of all the hardware
2042 * IPs that make up the asic is walked, clockgating is disabled and the
2043 * suspend callbacks are run. suspend puts the hardware and software state
2044 * in each IP into a state suitable for suspend.
2045 * Returns 0 on success, negative error code on failure.
2046 */
2047int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2048{
2049 int r;
2050
2051 r = amdgpu_device_ip_suspend_phase1(adev);
2052 if (r)
2053 return r;
2054 r = amdgpu_device_ip_suspend_phase2(adev);
2055
2056 return r;
2057}
2058
1985static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) 2059static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
1986{ 2060{
1987 int i, r; 2061 int i, r;
@@ -2004,7 +2078,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2004 continue; 2078 continue;
2005 2079
2006 r = block->version->funcs->hw_init(adev); 2080 r = block->version->funcs->hw_init(adev);
2007 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed"); 2081 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2008 if (r) 2082 if (r)
2009 return r; 2083 return r;
2010 } 2084 }
@@ -2039,7 +2113,7 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2039 continue; 2113 continue;
2040 2114
2041 r = block->version->funcs->hw_init(adev); 2115 r = block->version->funcs->hw_init(adev);
2042 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed"); 2116 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2043 if (r) 2117 if (r)
2044 return r; 2118 return r;
2045 } 2119 }
@@ -2628,6 +2702,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2628 2702
2629 drm_kms_helper_poll_disable(dev); 2703 drm_kms_helper_poll_disable(dev);
2630 2704
2705 if (fbcon)
2706 amdgpu_fbdev_set_suspend(adev, 1);
2707
2631 if (!amdgpu_device_has_dc_support(adev)) { 2708 if (!amdgpu_device_has_dc_support(adev)) {
2632 /* turn off display hw */ 2709 /* turn off display hw */
2633 drm_modeset_lock_all(dev); 2710 drm_modeset_lock_all(dev);
@@ -2635,44 +2712,46 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2635 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 2712 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2636 } 2713 }
2637 drm_modeset_unlock_all(dev); 2714 drm_modeset_unlock_all(dev);
2638 } 2715 /* unpin the front buffers and cursors */
2639 2716 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2640 amdgpu_amdkfd_suspend(adev); 2717 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2641 2718 struct drm_framebuffer *fb = crtc->primary->fb;
2642 /* unpin the front buffers and cursors */ 2719 struct amdgpu_bo *robj;
2643 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2720
2644 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2721 if (amdgpu_crtc->cursor_bo) {
2645 struct drm_framebuffer *fb = crtc->primary->fb; 2722 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2646 struct amdgpu_bo *robj; 2723 r = amdgpu_bo_reserve(aobj, true);
2647 2724 if (r == 0) {
2648 if (amdgpu_crtc->cursor_bo) { 2725 amdgpu_bo_unpin(aobj);
2649 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2726 amdgpu_bo_unreserve(aobj);
2650 r = amdgpu_bo_reserve(aobj, true); 2727 }
2651 if (r == 0) {
2652 amdgpu_bo_unpin(aobj);
2653 amdgpu_bo_unreserve(aobj);
2654 } 2728 }
2655 }
2656 2729
2657 if (fb == NULL || fb->obj[0] == NULL) { 2730 if (fb == NULL || fb->obj[0] == NULL) {
2658 continue; 2731 continue;
2659 } 2732 }
2660 robj = gem_to_amdgpu_bo(fb->obj[0]); 2733 robj = gem_to_amdgpu_bo(fb->obj[0]);
2661 /* don't unpin kernel fb objects */ 2734 /* don't unpin kernel fb objects */
2662 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { 2735 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2663 r = amdgpu_bo_reserve(robj, true); 2736 r = amdgpu_bo_reserve(robj, true);
2664 if (r == 0) { 2737 if (r == 0) {
2665 amdgpu_bo_unpin(robj); 2738 amdgpu_bo_unpin(robj);
2666 amdgpu_bo_unreserve(robj); 2739 amdgpu_bo_unreserve(robj);
2740 }
2667 } 2741 }
2668 } 2742 }
2669 } 2743 }
2744
2745 amdgpu_amdkfd_suspend(adev);
2746
2747 r = amdgpu_device_ip_suspend_phase1(adev);
2748
2670 /* evict vram memory */ 2749 /* evict vram memory */
2671 amdgpu_bo_evict_vram(adev); 2750 amdgpu_bo_evict_vram(adev);
2672 2751
2673 amdgpu_fence_driver_suspend(adev); 2752 amdgpu_fence_driver_suspend(adev);
2674 2753
2675 r = amdgpu_device_ip_suspend(adev); 2754 r = amdgpu_device_ip_suspend_phase2(adev);
2676 2755
2677 /* evict remaining vram memory 2756 /* evict remaining vram memory
2678 * This second call to evict vram is to evict the gart page table 2757 * This second call to evict vram is to evict the gart page table
@@ -2691,11 +2770,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2691 DRM_ERROR("amdgpu asic reset failed\n"); 2770 DRM_ERROR("amdgpu asic reset failed\n");
2692 } 2771 }
2693 2772
2694 if (fbcon) {
2695 console_lock();
2696 amdgpu_fbdev_set_suspend(adev, 1);
2697 console_unlock();
2698 }
2699 return 0; 2773 return 0;
2700} 2774}
2701 2775
@@ -2720,15 +2794,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2720 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 2794 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2721 return 0; 2795 return 0;
2722 2796
2723 if (fbcon)
2724 console_lock();
2725
2726 if (resume) { 2797 if (resume) {
2727 pci_set_power_state(dev->pdev, PCI_D0); 2798 pci_set_power_state(dev->pdev, PCI_D0);
2728 pci_restore_state(dev->pdev); 2799 pci_restore_state(dev->pdev);
2729 r = pci_enable_device(dev->pdev); 2800 r = pci_enable_device(dev->pdev);
2730 if (r) 2801 if (r)
2731 goto unlock; 2802 return r;
2732 } 2803 }
2733 2804
2734 /* post card */ 2805 /* post card */
@@ -2741,28 +2812,30 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2741 r = amdgpu_device_ip_resume(adev); 2812 r = amdgpu_device_ip_resume(adev);
2742 if (r) { 2813 if (r) {
2743 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r); 2814 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
2744 goto unlock; 2815 return r;
2745 } 2816 }
2746 amdgpu_fence_driver_resume(adev); 2817 amdgpu_fence_driver_resume(adev);
2747 2818
2748 2819
2749 r = amdgpu_device_ip_late_init(adev); 2820 r = amdgpu_device_ip_late_init(adev);
2750 if (r) 2821 if (r)
2751 goto unlock; 2822 return r;
2752 2823
2753 /* pin cursors */ 2824 if (!amdgpu_device_has_dc_support(adev)) {
2754 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2825 /* pin cursors */
2755 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2826 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2756 2827 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2757 if (amdgpu_crtc->cursor_bo) { 2828
2758 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2829 if (amdgpu_crtc->cursor_bo) {
2759 r = amdgpu_bo_reserve(aobj, true); 2830 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2760 if (r == 0) { 2831 r = amdgpu_bo_reserve(aobj, true);
2761 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); 2832 if (r == 0) {
2762 if (r != 0) 2833 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2763 DRM_ERROR("Failed to pin cursor BO (%d)\n", r); 2834 if (r != 0)
2764 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); 2835 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2765 amdgpu_bo_unreserve(aobj); 2836 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2837 amdgpu_bo_unreserve(aobj);
2838 }
2766 } 2839 }
2767 } 2840 }
2768 } 2841 }
@@ -2783,6 +2856,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2783 } 2856 }
2784 drm_modeset_unlock_all(dev); 2857 drm_modeset_unlock_all(dev);
2785 } 2858 }
2859 amdgpu_fbdev_set_suspend(adev, 0);
2786 } 2860 }
2787 2861
2788 drm_kms_helper_poll_enable(dev); 2862 drm_kms_helper_poll_enable(dev);
@@ -2806,15 +2880,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2806#ifdef CONFIG_PM 2880#ifdef CONFIG_PM
2807 dev->dev->power.disable_depth--; 2881 dev->dev->power.disable_depth--;
2808#endif 2882#endif
2809 2883 return 0;
2810 if (fbcon)
2811 amdgpu_fbdev_set_suspend(adev, 0);
2812
2813unlock:
2814 if (fbcon)
2815 console_unlock();
2816
2817 return r;
2818} 2884}
2819 2885
2820/** 2886/**
@@ -3091,7 +3157,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
3091 * @adev: amdgpu device pointer 3157 * @adev: amdgpu device pointer
3092 * 3158 *
3093 * attempt to do soft-reset or full-reset and reinitialize Asic 3159 * attempt to do soft-reset or full-reset and reinitialize Asic
3094 * return 0 means successed otherwise failed 3160 * return 0 means succeeded otherwise failed
3095 */ 3161 */
3096static int amdgpu_device_reset(struct amdgpu_device *adev) 3162static int amdgpu_device_reset(struct amdgpu_device *adev)
3097{ 3163{
@@ -3169,7 +3235,7 @@ out:
3169 * @from_hypervisor: request from hypervisor 3235 * @from_hypervisor: request from hypervisor
3170 * 3236 *
3171 * do VF FLR and reinitialize Asic 3237 * do VF FLR and reinitialize Asic
3172 * return 0 means successed otherwise failed 3238 * return 0 means succeeded otherwise failed
3173 */ 3239 */
3174static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, 3240static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3175 bool from_hypervisor) 3241 bool from_hypervisor)
@@ -3294,7 +3360,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3294 dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter)); 3360 dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3295 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); 3361 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3296 } else { 3362 } else {
3297 dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter)); 3363 dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
3298 } 3364 }
3299 3365
3300 amdgpu_vf_error_trans_all(adev); 3366 amdgpu_vf_error_trans_all(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index d44b76455e89..69c5d22f29bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -373,8 +373,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
373void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state) 373void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
374{ 374{
375 if (adev->mode_info.rfbdev) 375 if (adev->mode_info.rfbdev)
376 drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper, 376 drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper,
377 state); 377 state);
378} 378}
379 379
380int amdgpu_fbdev_total_size(struct amdgpu_device *adev) 380int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index bcbdcf997d20..71792d820ae0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -344,7 +344,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
344 if (r) 344 if (r)
345 goto free_pages; 345 goto free_pages;
346 346
347 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 347 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
348 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 348 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
349 amdgpu_bo_unreserve(bo); 349 amdgpu_bo_unreserve(bo);
350 if (r) 350 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 5a2c26a85984..391e2f7c03aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
133 if (!f) 133 if (!f)
134 return -EINVAL; 134 return -EINVAL;
135 135
136 r = drm_sched_job_init(&job->base, entity->sched, entity, owner); 136 r = drm_sched_job_init(&job->base, entity, owner);
137 if (r) 137 if (r)
138 return r; 138 return r;
139 139
@@ -143,7 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
143 priority = job->base.s_priority; 143 priority = job->base.s_priority;
144 drm_sched_entity_push_job(&job->base, entity); 144 drm_sched_entity_push_job(&job->base, entity);
145 145
146 ring = to_amdgpu_ring(entity->sched); 146 ring = to_amdgpu_ring(entity->rq->sched);
147 amdgpu_ring_priority_get(ring, priority); 147 amdgpu_ring_priority_get(ring, priority);
148 148
149 return 0; 149 return 0;
@@ -167,7 +167,7 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
167static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, 167static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
168 struct drm_sched_entity *s_entity) 168 struct drm_sched_entity *s_entity)
169{ 169{
170 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched); 170 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
171 struct amdgpu_job *job = to_amdgpu_job(sched_job); 171 struct amdgpu_job *job = to_amdgpu_job(sched_job);
172 struct amdgpu_vm *vm = job->vm; 172 struct amdgpu_vm *vm = job->vm;
173 struct dma_fence *fence; 173 struct dma_fence *fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 207f238649b4..c7dce14fd47d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
286 struct drm_crtc *crtc; 286 struct drm_crtc *crtc;
287 uint32_t ui32 = 0; 287 uint32_t ui32 = 0;
288 uint64_t ui64 = 0; 288 uint64_t ui64 = 0;
289 int i, j, found; 289 int i, found;
290 int ui32_size = sizeof(ui32); 290 int ui32_size = sizeof(ui32);
291 291
292 if (!info->return_size || !info->return_pointer) 292 if (!info->return_size || !info->return_pointer)
@@ -328,64 +328,61 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
328 case AMDGPU_HW_IP_GFX: 328 case AMDGPU_HW_IP_GFX:
329 type = AMD_IP_BLOCK_TYPE_GFX; 329 type = AMD_IP_BLOCK_TYPE_GFX;
330 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 330 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
331 ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i); 331 ring_mask |= adev->gfx.gfx_ring[i].ready << i;
332 ib_start_alignment = 32; 332 ib_start_alignment = 32;
333 ib_size_alignment = 32; 333 ib_size_alignment = 32;
334 break; 334 break;
335 case AMDGPU_HW_IP_COMPUTE: 335 case AMDGPU_HW_IP_COMPUTE:
336 type = AMD_IP_BLOCK_TYPE_GFX; 336 type = AMD_IP_BLOCK_TYPE_GFX;
337 for (i = 0; i < adev->gfx.num_compute_rings; i++) 337 for (i = 0; i < adev->gfx.num_compute_rings; i++)
338 ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i); 338 ring_mask |= adev->gfx.compute_ring[i].ready << i;
339 ib_start_alignment = 32; 339 ib_start_alignment = 32;
340 ib_size_alignment = 32; 340 ib_size_alignment = 32;
341 break; 341 break;
342 case AMDGPU_HW_IP_DMA: 342 case AMDGPU_HW_IP_DMA:
343 type = AMD_IP_BLOCK_TYPE_SDMA; 343 type = AMD_IP_BLOCK_TYPE_SDMA;
344 for (i = 0; i < adev->sdma.num_instances; i++) 344 for (i = 0; i < adev->sdma.num_instances; i++)
345 ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i); 345 ring_mask |= adev->sdma.instance[i].ring.ready << i;
346 ib_start_alignment = 256; 346 ib_start_alignment = 256;
347 ib_size_alignment = 4; 347 ib_size_alignment = 4;
348 break; 348 break;
349 case AMDGPU_HW_IP_UVD: 349 case AMDGPU_HW_IP_UVD:
350 type = AMD_IP_BLOCK_TYPE_UVD; 350 type = AMD_IP_BLOCK_TYPE_UVD;
351 for (i = 0; i < adev->uvd.num_uvd_inst; i++) 351 ring_mask |= adev->uvd.inst[0].ring.ready;
352 ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
353 ib_start_alignment = 64; 352 ib_start_alignment = 64;
354 ib_size_alignment = 64; 353 ib_size_alignment = 64;
355 break; 354 break;
356 case AMDGPU_HW_IP_VCE: 355 case AMDGPU_HW_IP_VCE:
357 type = AMD_IP_BLOCK_TYPE_VCE; 356 type = AMD_IP_BLOCK_TYPE_VCE;
358 for (i = 0; i < adev->vce.num_rings; i++) 357 for (i = 0; i < adev->vce.num_rings; i++)
359 ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i); 358 ring_mask |= adev->vce.ring[i].ready << i;
360 ib_start_alignment = 4; 359 ib_start_alignment = 4;
361 ib_size_alignment = 1; 360 ib_size_alignment = 1;
362 break; 361 break;
363 case AMDGPU_HW_IP_UVD_ENC: 362 case AMDGPU_HW_IP_UVD_ENC:
364 type = AMD_IP_BLOCK_TYPE_UVD; 363 type = AMD_IP_BLOCK_TYPE_UVD;
365 for (i = 0; i < adev->uvd.num_uvd_inst; i++) 364 for (i = 0; i < adev->uvd.num_enc_rings; i++)
366 for (j = 0; j < adev->uvd.num_enc_rings; j++) 365 ring_mask |=
367 ring_mask |= 366 adev->uvd.inst[0].ring_enc[i].ready << i;
368 ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
369 (j + i * adev->uvd.num_enc_rings));
370 ib_start_alignment = 64; 367 ib_start_alignment = 64;
371 ib_size_alignment = 64; 368 ib_size_alignment = 64;
372 break; 369 break;
373 case AMDGPU_HW_IP_VCN_DEC: 370 case AMDGPU_HW_IP_VCN_DEC:
374 type = AMD_IP_BLOCK_TYPE_VCN; 371 type = AMD_IP_BLOCK_TYPE_VCN;
375 ring_mask = adev->vcn.ring_dec.ready ? 1 : 0; 372 ring_mask = adev->vcn.ring_dec.ready;
376 ib_start_alignment = 16; 373 ib_start_alignment = 16;
377 ib_size_alignment = 16; 374 ib_size_alignment = 16;
378 break; 375 break;
379 case AMDGPU_HW_IP_VCN_ENC: 376 case AMDGPU_HW_IP_VCN_ENC:
380 type = AMD_IP_BLOCK_TYPE_VCN; 377 type = AMD_IP_BLOCK_TYPE_VCN;
381 for (i = 0; i < adev->vcn.num_enc_rings; i++) 378 for (i = 0; i < adev->vcn.num_enc_rings; i++)
382 ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i); 379 ring_mask |= adev->vcn.ring_enc[i].ready << i;
383 ib_start_alignment = 64; 380 ib_start_alignment = 64;
384 ib_size_alignment = 1; 381 ib_size_alignment = 1;
385 break; 382 break;
386 case AMDGPU_HW_IP_VCN_JPEG: 383 case AMDGPU_HW_IP_VCN_JPEG:
387 type = AMD_IP_BLOCK_TYPE_VCN; 384 type = AMD_IP_BLOCK_TYPE_VCN;
388 ring_mask = adev->vcn.ring_jpeg.ready ? 1 : 0; 385 ring_mask = adev->vcn.ring_jpeg.ready;
389 ib_start_alignment = 16; 386 ib_start_alignment = 16;
390 ib_size_alignment = 16; 387 ib_size_alignment = 16;
391 break; 388 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index b12526ce1a9d..21bfa2d8039e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -51,7 +51,7 @@
51 * 51 *
52 */ 52 */
53 53
54static bool amdgpu_need_backup(struct amdgpu_device *adev) 54static bool amdgpu_bo_need_backup(struct amdgpu_device *adev)
55{ 55{
56 if (adev->flags & AMD_IS_APU) 56 if (adev->flags & AMD_IS_APU)
57 return false; 57 return false;
@@ -84,12 +84,12 @@ static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
84 } 84 }
85} 85}
86 86
87static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) 87static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
88{ 88{
89 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 89 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
90 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); 90 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
91 91
92 if (WARN_ON_ONCE(bo->pin_count > 0)) 92 if (bo->pin_count > 0)
93 amdgpu_bo_subtract_pin_size(bo); 93 amdgpu_bo_subtract_pin_size(bo);
94 94
95 if (bo->kfd_bo) 95 if (bo->kfd_bo)
@@ -111,7 +111,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
111} 111}
112 112
113/** 113/**
114 * amdgpu_ttm_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo 114 * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
115 * @bo: buffer object to be checked 115 * @bo: buffer object to be checked
116 * 116 *
117 * Uses destroy function associated with the object to determine if this is 117 * Uses destroy function associated with the object to determine if this is
@@ -120,22 +120,22 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
120 * Returns: 120 * Returns:
121 * true if the object belongs to &amdgpu_bo, false if not. 121 * true if the object belongs to &amdgpu_bo, false if not.
122 */ 122 */
123bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) 123bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
124{ 124{
125 if (bo->destroy == &amdgpu_ttm_bo_destroy) 125 if (bo->destroy == &amdgpu_bo_destroy)
126 return true; 126 return true;
127 return false; 127 return false;
128} 128}
129 129
130/** 130/**
131 * amdgpu_ttm_placement_from_domain - set buffer's placement 131 * amdgpu_bo_placement_from_domain - set buffer's placement
132 * @abo: &amdgpu_bo buffer object whose placement is to be set 132 * @abo: &amdgpu_bo buffer object whose placement is to be set
133 * @domain: requested domain 133 * @domain: requested domain
134 * 134 *
135 * Sets buffer's placement according to requested domain and the buffer's 135 * Sets buffer's placement according to requested domain and the buffer's
136 * flags. 136 * flags.
137 */ 137 */
138void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) 138void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
139{ 139{
140 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 140 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
141 struct ttm_placement *placement = &abo->placement; 141 struct ttm_placement *placement = &abo->placement;
@@ -216,6 +216,8 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
216 c++; 216 c++;
217 } 217 }
218 218
219 BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
220
219 placement->num_placement = c; 221 placement->num_placement = c;
220 placement->placement = places; 222 placement->placement = places;
221 223
@@ -488,13 +490,13 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
488#endif 490#endif
489 491
490 bo->tbo.bdev = &adev->mman.bdev; 492 bo->tbo.bdev = &adev->mman.bdev;
491 amdgpu_ttm_placement_from_domain(bo, bp->domain); 493 amdgpu_bo_placement_from_domain(bo, bp->domain);
492 if (bp->type == ttm_bo_type_kernel) 494 if (bp->type == ttm_bo_type_kernel)
493 bo->tbo.priority = 1; 495 bo->tbo.priority = 1;
494 496
495 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type, 497 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
496 &bo->placement, page_align, &ctx, acc_size, 498 &bo->placement, page_align, &ctx, acc_size,
497 NULL, bp->resv, &amdgpu_ttm_bo_destroy); 499 NULL, bp->resv, &amdgpu_bo_destroy);
498 if (unlikely(r != 0)) 500 if (unlikely(r != 0))
499 return r; 501 return r;
500 502
@@ -594,7 +596,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
594 if (r) 596 if (r)
595 return r; 597 return r;
596 598
597 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) { 599 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_bo_need_backup(adev)) {
598 if (!bp->resv) 600 if (!bp->resv)
599 WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, 601 WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
600 NULL)); 602 NULL));
@@ -682,7 +684,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
682 domain = bo->preferred_domains; 684 domain = bo->preferred_domains;
683 685
684retry: 686retry:
685 amdgpu_ttm_placement_from_domain(bo, domain); 687 amdgpu_bo_placement_from_domain(bo, domain);
686 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 688 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
687 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 689 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
688 domain = bo->allowed_domains; 690 domain = bo->allowed_domains;
@@ -915,7 +917,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
915 /* force to pin into visible video ram */ 917 /* force to pin into visible video ram */
916 if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) 918 if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
917 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 919 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
918 amdgpu_ttm_placement_from_domain(bo, domain); 920 amdgpu_bo_placement_from_domain(bo, domain);
919 for (i = 0; i < bo->placement.num_placement; i++) { 921 for (i = 0; i < bo->placement.num_placement; i++) {
920 unsigned fpfn, lpfn; 922 unsigned fpfn, lpfn;
921 923
@@ -1246,7 +1248,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1246 struct amdgpu_bo *abo; 1248 struct amdgpu_bo *abo;
1247 struct ttm_mem_reg *old_mem = &bo->mem; 1249 struct ttm_mem_reg *old_mem = &bo->mem;
1248 1250
1249 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) 1251 if (!amdgpu_bo_is_amdgpu_bo(bo))
1250 return; 1252 return;
1251 1253
1252 abo = ttm_to_amdgpu_bo(bo); 1254 abo = ttm_to_amdgpu_bo(bo);
@@ -1263,7 +1265,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1263 return; 1265 return;
1264 1266
1265 /* move_notify is called before move happens */ 1267 /* move_notify is called before move happens */
1266 trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); 1268 trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1267} 1269}
1268 1270
1269/** 1271/**
@@ -1285,7 +1287,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1285 unsigned long offset, size; 1287 unsigned long offset, size;
1286 int r; 1288 int r;
1287 1289
1288 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) 1290 if (!amdgpu_bo_is_amdgpu_bo(bo))
1289 return 0; 1291 return 0;
1290 1292
1291 abo = ttm_to_amdgpu_bo(bo); 1293 abo = ttm_to_amdgpu_bo(bo);
@@ -1307,8 +1309,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1307 1309
1308 /* hurrah the memory is not visible ! */ 1310 /* hurrah the memory is not visible ! */
1309 atomic64_inc(&adev->num_vram_cpu_page_faults); 1311 atomic64_inc(&adev->num_vram_cpu_page_faults);
1310 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | 1312 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1311 AMDGPU_GEM_DOMAIN_GTT); 1313 AMDGPU_GEM_DOMAIN_GTT);
1312 1314
1313 /* Avoid costly evictions; only set GTT as a busy placement */ 1315 /* Avoid costly evictions; only set GTT as a busy placement */
1314 abo->placement.num_busy_placement = 1; 1316 abo->placement.num_busy_placement = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 9c3e29a04eb1..18945dd6982d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -32,6 +32,7 @@
32#include "amdgpu.h" 32#include "amdgpu.h"
33 33
34#define AMDGPU_BO_INVALID_OFFSET LONG_MAX 34#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
35#define AMDGPU_BO_MAX_PLACEMENTS 3
35 36
36struct amdgpu_bo_param { 37struct amdgpu_bo_param {
37 unsigned long size; 38 unsigned long size;
@@ -77,7 +78,7 @@ struct amdgpu_bo {
77 /* Protected by tbo.reserved */ 78 /* Protected by tbo.reserved */
78 u32 preferred_domains; 79 u32 preferred_domains;
79 u32 allowed_domains; 80 u32 allowed_domains;
80 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; 81 struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS];
81 struct ttm_placement placement; 82 struct ttm_placement placement;
82 struct ttm_buffer_object tbo; 83 struct ttm_buffer_object tbo;
83 struct ttm_bo_kmap_obj kmap; 84 struct ttm_bo_kmap_obj kmap;
@@ -234,6 +235,9 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
234 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; 235 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
235} 236}
236 237
238bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
239void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
240
237int amdgpu_bo_create(struct amdgpu_device *adev, 241int amdgpu_bo_create(struct amdgpu_device *adev,
238 struct amdgpu_bo_param *bp, 242 struct amdgpu_bo_param *bp,
239 struct amdgpu_bo **bo_ptr); 243 struct amdgpu_bo **bo_ptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 3ed02f472003..1c5d97f4b4dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -323,7 +323,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
323 return ret; 323 return ret;
324 324
325 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { 325 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
326 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 326 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
327 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 327 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
328 } 328 }
329 329
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index ea9850c9224d..d8357290ad09 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -66,8 +66,6 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
66 u32 ring, 66 u32 ring,
67 struct amdgpu_ring **out_ring) 67 struct amdgpu_ring **out_ring)
68{ 68{
69 u32 instance;
70
71 switch (mapper->hw_ip) { 69 switch (mapper->hw_ip) {
72 case AMDGPU_HW_IP_GFX: 70 case AMDGPU_HW_IP_GFX:
73 *out_ring = &adev->gfx.gfx_ring[ring]; 71 *out_ring = &adev->gfx.gfx_ring[ring];
@@ -79,16 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
79 *out_ring = &adev->sdma.instance[ring].ring; 77 *out_ring = &adev->sdma.instance[ring].ring;
80 break; 78 break;
81 case AMDGPU_HW_IP_UVD: 79 case AMDGPU_HW_IP_UVD:
82 instance = ring; 80 *out_ring = &adev->uvd.inst[0].ring;
83 *out_ring = &adev->uvd.inst[instance].ring;
84 break; 81 break;
85 case AMDGPU_HW_IP_VCE: 82 case AMDGPU_HW_IP_VCE:
86 *out_ring = &adev->vce.ring[ring]; 83 *out_ring = &adev->vce.ring[ring];
87 break; 84 break;
88 case AMDGPU_HW_IP_UVD_ENC: 85 case AMDGPU_HW_IP_UVD_ENC:
89 instance = ring / adev->uvd.num_enc_rings; 86 *out_ring = &adev->uvd.inst[0].ring_enc[ring];
90 *out_ring =
91 &adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings];
92 break; 87 break;
93 case AMDGPU_HW_IP_VCN_DEC: 88 case AMDGPU_HW_IP_VCN_DEC:
94 *out_ring = &adev->vcn.ring_dec; 89 *out_ring = &adev->vcn.ring_dec;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 76920035eb22..11f262f15200 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -436,7 +436,7 @@ TRACE_EVENT(amdgpu_cs_bo_status,
436 __entry->total_bo, __entry->total_size) 436 __entry->total_bo, __entry->total_size)
437); 437);
438 438
439TRACE_EVENT(amdgpu_ttm_bo_move, 439TRACE_EVENT(amdgpu_bo_move,
440 TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement), 440 TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
441 TP_ARGS(bo, new_placement, old_placement), 441 TP_ARGS(bo, new_placement, old_placement),
442 TP_STRUCT__entry( 442 TP_STRUCT__entry(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 13977ea6a097..8c4358e36c87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -248,7 +248,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
248 } 248 }
249 249
250 /* Object isn't an AMDGPU object so ignore */ 250 /* Object isn't an AMDGPU object so ignore */
251 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { 251 if (!amdgpu_bo_is_amdgpu_bo(bo)) {
252 placement->placement = &placements; 252 placement->placement = &placements;
253 placement->busy_placement = &placements; 253 placement->busy_placement = &placements;
254 placement->num_placement = 1; 254 placement->num_placement = 1;
@@ -261,7 +261,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
261 case TTM_PL_VRAM: 261 case TTM_PL_VRAM:
262 if (!adev->mman.buffer_funcs_enabled) { 262 if (!adev->mman.buffer_funcs_enabled) {
263 /* Move to system memory */ 263 /* Move to system memory */
264 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 264 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
265 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 265 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
266 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && 266 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
267 amdgpu_bo_in_cpu_visible_vram(abo)) { 267 amdgpu_bo_in_cpu_visible_vram(abo)) {
@@ -271,7 +271,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
271 * BO will be evicted to GTT rather than causing other 271 * BO will be evicted to GTT rather than causing other
272 * BOs to be evicted from VRAM 272 * BOs to be evicted from VRAM
273 */ 273 */
274 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | 274 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
275 AMDGPU_GEM_DOMAIN_GTT); 275 AMDGPU_GEM_DOMAIN_GTT);
276 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; 276 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
277 abo->placements[0].lpfn = 0; 277 abo->placements[0].lpfn = 0;
@@ -279,12 +279,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
279 abo->placement.num_busy_placement = 1; 279 abo->placement.num_busy_placement = 1;
280 } else { 280 } else {
281 /* Move to GTT memory */ 281 /* Move to GTT memory */
282 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); 282 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
283 } 283 }
284 break; 284 break;
285 case TTM_PL_TT: 285 case TTM_PL_TT:
286 default: 286 default:
287 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 287 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
288 } 288 }
289 *placement = abo->placement; 289 *placement = abo->placement;
290} 290}
@@ -1925,8 +1925,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1925 return; 1925 return;
1926 } 1926 }
1927 } else { 1927 } else {
1928 drm_sched_entity_destroy(adev->mman.entity.sched, 1928 drm_sched_entity_destroy(&adev->mman.entity);
1929 &adev->mman.entity); 1929 dma_fence_put(man->move);
1930 man->move = NULL;
1930 } 1931 }
1931 1932
1932 /* this just adjusts TTM size idea, which sets lpfn to the correct value */ 1933 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 80b5c453f8c1..fca86d71fafc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
305{ 305{
306 int i, j; 306 int i, j;
307 307
308 drm_sched_entity_destroy(&adev->uvd.inst->ring.sched, 308 drm_sched_entity_destroy(&adev->uvd.entity);
309 &adev->uvd.entity);
310 309
311 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 310 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
312 kfree(adev->uvd.inst[j].saved_bo); 311 kfree(adev->uvd.inst[j].saved_bo);
@@ -473,7 +472,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
473 if (cmd == 0x0 || cmd == 0x3) { 472 if (cmd == 0x0 || cmd == 0x3) {
474 /* yes, force it into VRAM */ 473 /* yes, force it into VRAM */
475 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; 474 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
476 amdgpu_ttm_placement_from_domain(bo, domain); 475 amdgpu_bo_placement_from_domain(bo, domain);
477 } 476 }
478 amdgpu_uvd_force_into_uvd_segment(bo); 477 amdgpu_uvd_force_into_uvd_segment(bo);
479 478
@@ -1014,7 +1013,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
1014 if (!ring->adev->uvd.address_64_bit) { 1013 if (!ring->adev->uvd.address_64_bit) {
1015 struct ttm_operation_ctx ctx = { true, false }; 1014 struct ttm_operation_ctx ctx = { true, false };
1016 1015
1017 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); 1016 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
1018 amdgpu_uvd_force_into_uvd_segment(bo); 1017 amdgpu_uvd_force_into_uvd_segment(bo);
1019 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1018 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1020 if (r) 1019 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 86182c966ed6..b6ab4f5350c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
221 if (adev->vce.vcpu_bo == NULL) 221 if (adev->vce.vcpu_bo == NULL)
222 return 0; 222 return 0;
223 223
224 drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity); 224 drm_sched_entity_destroy(&adev->vce.entity);
225 225
226 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, 226 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
227 (void **)&adev->vce.cpu_addr); 227 (void **)&adev->vce.cpu_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 098dd1ba751a..5d7d7900ccab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -387,7 +387,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
387 ats_entries = 0; 387 ats_entries = 0;
388 } 388 }
389 389
390 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); 390 ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
391 391
392 r = reservation_object_reserve_shared(bo->tbo.resv); 392 r = reservation_object_reserve_shared(bo->tbo.resv);
393 if (r) 393 if (r)
@@ -1113,7 +1113,7 @@ restart:
1113 struct amdgpu_ring *ring; 1113 struct amdgpu_ring *ring;
1114 struct dma_fence *fence; 1114 struct dma_fence *fence;
1115 1115
1116 ring = container_of(vm->entity.sched, struct amdgpu_ring, 1116 ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
1117 sched); 1117 sched);
1118 1118
1119 amdgpu_ring_pad_ib(ring, params.ib); 1119 amdgpu_ring_pad_ib(ring, params.ib);
@@ -1403,7 +1403,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1403 addr, flags); 1403 addr, flags);
1404 } 1404 }
1405 1405
1406 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); 1406 ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
1407 1407
1408 nptes = last - start + 1; 1408 nptes = last - start + 1;
1409 1409
@@ -2642,7 +2642,7 @@ error_free_root:
2642 vm->root.base.bo = NULL; 2642 vm->root.base.bo = NULL;
2643 2643
2644error_free_sched_entity: 2644error_free_sched_entity:
2645 drm_sched_entity_destroy(&ring->sched, &vm->entity); 2645 drm_sched_entity_destroy(&vm->entity);
2646 2646
2647 return r; 2647 return r;
2648} 2648}
@@ -2779,7 +2779,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2779 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 2779 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2780 } 2780 }
2781 2781
2782 drm_sched_entity_destroy(vm->entity.sched, &vm->entity); 2782 drm_sched_entity_destroy(&vm->entity);
2783 2783
2784 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { 2784 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2785 dev_err(adev->dev, "still active bo inside vm\n"); 2785 dev_err(adev->dev, "still active bo inside vm\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 702e257a483f..78ab939ae5d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1476,7 +1476,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
1476 tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK; 1476 tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
1477 WREG32_PCIE(ixPCIE_LC_CNTL4, tmp); 1477 WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
1478 1478
1479 mdelay(100); 1479 msleep(100);
1480 1480
1481 /* linkctl */ 1481 /* linkctl */
1482 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); 1482 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 9ab39117cc4e..ef00d14f8645 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3490,7 +3490,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
3490 3490
3491 /* wait for RLC_SAFE_MODE */ 3491 /* wait for RLC_SAFE_MODE */
3492 for (i = 0; i < adev->usec_timeout; i++) { 3492 for (i = 0; i < adev->usec_timeout; i++) {
3493 if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) 3493 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3494 break; 3494 break;
3495 udelay(1); 3495 udelay(1);
3496 } 3496 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 9df94b45d17d..399a5db27649 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -269,7 +269,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
269 entry->src_id, entry->ring_id, entry->vmid, 269 entry->src_id, entry->ring_id, entry->vmid,
270 entry->pasid, task_info.process_name, task_info.tgid, 270 entry->pasid, task_info.process_name, task_info.tgid,
271 task_info.task_name, task_info.pid); 271 task_info.task_name, task_info.pid);
272 dev_err(adev->dev, " at page 0x%016llx from %d\n", 272 dev_err(adev->dev, " at address 0x%016llx from %d\n",
273 addr, entry->client_id); 273 addr, entry->client_id);
274 if (!amdgpu_sriov_vf(adev)) 274 if (!amdgpu_sriov_vf(adev))
275 dev_err(adev->dev, 275 dev_err(adev->dev,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 5fc13e71a3b5..45e062022461 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1532,10 +1532,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1532 DRM_ERROR("DM: Failed to initialize IRQ\n"); 1532 DRM_ERROR("DM: Failed to initialize IRQ\n");
1533 goto fail; 1533 goto fail;
1534 } 1534 }
1535 /*
1536 * Temporary disable until pplib/smu interaction is implemented
1537 */
1538 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1539 break; 1535 break;
1540#endif 1536#endif
1541 default: 1537 default:
@@ -1543,6 +1539,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1543 goto fail; 1539 goto fail;
1544 } 1540 }
1545 1541
1542 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1543 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1544
1546 return 0; 1545 return 0;
1547fail: 1546fail:
1548 kfree(aencoder); 1547 kfree(aencoder);
@@ -1574,18 +1573,6 @@ static void dm_bandwidth_update(struct amdgpu_device *adev)
1574 /* TODO: implement later */ 1573 /* TODO: implement later */
1575} 1574}
1576 1575
1577static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1578 u8 level)
1579{
1580 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1581}
1582
1583static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1584{
1585 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1586 return 0;
1587}
1588
1589static int amdgpu_notify_freesync(struct drm_device *dev, void *data, 1576static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1590 struct drm_file *filp) 1577 struct drm_file *filp)
1591{ 1578{
@@ -1614,10 +1601,8 @@ static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1614static const struct amdgpu_display_funcs dm_display_funcs = { 1601static const struct amdgpu_display_funcs dm_display_funcs = {
1615 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ 1602 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1616 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ 1603 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1617 .backlight_set_level = 1604 .backlight_set_level = NULL, /* never called for DC */
1618 dm_set_backlight_level,/* called unconditionally */ 1605 .backlight_get_level = NULL, /* never called for DC */
1619 .backlight_get_level =
1620 dm_get_backlight_level,/* called unconditionally */
1621 .hpd_sense = NULL,/* called unconditionally */ 1606 .hpd_sense = NULL,/* called unconditionally */
1622 .hpd_set_polarity = NULL, /* called unconditionally */ 1607 .hpd_set_polarity = NULL, /* called unconditionally */
1623 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ 1608 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index c69ae78d82b2..fbe878ae1e8c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -469,8 +469,8 @@ bool dm_pp_get_static_clocks(
469 return false; 469 return false;
470 470
471 static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state); 471 static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
472 static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock; 472 static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
473 static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock; 473 static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
474 474
475 return true; 475 return true;
476} 476}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 080f777d705e..bd039322f697 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -676,7 +676,7 @@ static void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
676} 676}
677 677
678static void hack_bounding_box(struct dcn_bw_internal_vars *v, 678static void hack_bounding_box(struct dcn_bw_internal_vars *v,
679 struct dc_debug *dbg, 679 struct dc_debug_options *dbg,
680 struct dc_state *context) 680 struct dc_state *context)
681{ 681{
682 if (dbg->pipe_split_policy == MPC_SPLIT_AVOID) 682 if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a4429c90c60c..388a0635c38d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -760,7 +760,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
760 */ 760 */
761 761
762 /* deal with non-mst cases */ 762 /* deal with non-mst cases */
763 dp_hbr_verify_link_cap(link, &link->reported_link_cap); 763 dp_verify_link_cap(link, &link->reported_link_cap);
764 } 764 }
765 765
766 /* HDMI-DVI Dongle */ 766 /* HDMI-DVI Dongle */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 08c9d73b9ab7..4019fe07d291 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -33,10 +33,8 @@
33#include "include/vector.h" 33#include "include/vector.h"
34#include "core_types.h" 34#include "core_types.h"
35#include "dc_link_ddc.h" 35#include "dc_link_ddc.h"
36#include "i2caux/engine.h" 36#include "engine.h"
37#include "i2caux/i2c_engine.h" 37#include "aux_engine.h"
38#include "i2caux/aux_engine.h"
39#include "i2caux/i2caux.h"
40 38
41#define AUX_POWER_UP_WA_DELAY 500 39#define AUX_POWER_UP_WA_DELAY 500
42#define I2C_OVER_AUX_DEFER_WA_DELAY 70 40#define I2C_OVER_AUX_DEFER_WA_DELAY 70
@@ -641,9 +639,9 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
641 enum aux_transaction_type type, 639 enum aux_transaction_type type,
642 enum i2caux_transaction_action action) 640 enum i2caux_transaction_action action)
643{ 641{
644 struct i2caux *i2caux = ddc->ctx->i2caux;
645 struct ddc *ddc_pin = ddc->ddc_pin; 642 struct ddc *ddc_pin = ddc->ddc_pin;
646 struct aux_engine *engine; 643 struct engine *engine;
644 struct aux_engine *aux_engine;
647 enum aux_channel_operation_result operation_result; 645 enum aux_channel_operation_result operation_result;
648 struct aux_request_transaction_data aux_req; 646 struct aux_request_transaction_data aux_req;
649 struct aux_reply_transaction_data aux_rep; 647 struct aux_reply_transaction_data aux_rep;
@@ -654,7 +652,8 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
654 memset(&aux_req, 0, sizeof(aux_req)); 652 memset(&aux_req, 0, sizeof(aux_req));
655 memset(&aux_rep, 0, sizeof(aux_rep)); 653 memset(&aux_rep, 0, sizeof(aux_rep));
656 654
657 engine = i2caux->funcs->acquire_aux_engine(i2caux, ddc_pin); 655 engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
656 aux_engine = engine->funcs->acquire(engine, ddc_pin);
658 657
659 aux_req.type = type; 658 aux_req.type = type;
660 aux_req.action = action; 659 aux_req.action = action;
@@ -664,15 +663,15 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
664 aux_req.length = size; 663 aux_req.length = size;
665 aux_req.data = buffer; 664 aux_req.data = buffer;
666 665
667 engine->funcs->submit_channel_request(engine, &aux_req); 666 aux_engine->funcs->submit_channel_request(aux_engine, &aux_req);
668 operation_result = engine->funcs->get_channel_status(engine, &returned_bytes); 667 operation_result = aux_engine->funcs->get_channel_status(aux_engine, &returned_bytes);
669 668
670 switch (operation_result) { 669 switch (operation_result) {
671 case AUX_CHANNEL_OPERATION_SUCCEEDED: 670 case AUX_CHANNEL_OPERATION_SUCCEEDED:
672 res = returned_bytes; 671 res = returned_bytes;
673 672
674 if (res <= size && res >= 0) 673 if (res <= size && res >= 0)
675 res = engine->funcs->read_channel_reply(engine, size, 674 res = aux_engine->funcs->read_channel_reply(aux_engine, size,
676 buffer, reply, 675 buffer, reply,
677 &status); 676 &status);
678 677
@@ -686,8 +685,7 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
686 res = -1; 685 res = -1;
687 break; 686 break;
688 } 687 }
689 688 aux_engine->base.funcs->release_engine(&aux_engine->base);
690 i2caux->funcs->release_engine(i2caux, &engine->base);
691 return res; 689 return res;
692} 690}
693 691
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 474cd3e01752..9d901ca70588 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1029,7 +1029,7 @@ enum link_training_result dc_link_dp_perform_link_training(
1029 lt_settings.lane_settings[0].PRE_EMPHASIS); 1029 lt_settings.lane_settings[0].PRE_EMPHASIS);
1030 1030
1031 if (status != LINK_TRAINING_SUCCESS) 1031 if (status != LINK_TRAINING_SUCCESS)
1032 link->ctx->dc->debug.debug_data.ltFailCount++; 1032 link->ctx->dc->debug_data.ltFailCount++;
1033 1033
1034 return status; 1034 return status;
1035} 1035}
@@ -1086,7 +1086,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
1086 return max_link_cap; 1086 return max_link_cap;
1087} 1087}
1088 1088
1089bool dp_hbr_verify_link_cap( 1089bool dp_verify_link_cap(
1090 struct dc_link *link, 1090 struct dc_link *link,
1091 struct dc_link_settings *known_limit_link_setting) 1091 struct dc_link_settings *known_limit_link_setting)
1092{ 1092{
@@ -1101,6 +1101,11 @@ bool dp_hbr_verify_link_cap(
1101 enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL; 1101 enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
1102 enum link_training_result status; 1102 enum link_training_result status;
1103 1103
1104 if (link->dc->debug.skip_detection_link_training) {
1105 link->verified_link_cap = *known_limit_link_setting;
1106 return true;
1107 }
1108
1104 success = false; 1109 success = false;
1105 skip_link_training = false; 1110 skip_link_training = false;
1106 1111
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index ceb4c3725893..7515c0dcbdd2 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
38#include "inc/compressor.h" 38#include "inc/compressor.h"
39#include "dml/display_mode_lib.h" 39#include "dml/display_mode_lib.h"
40 40
41#define DC_VER "3.1.56" 41#define DC_VER "3.1.58"
42 42
43#define MAX_SURFACES 3 43#define MAX_SURFACES 3
44#define MAX_STREAMS 6 44#define MAX_STREAMS 6
@@ -207,7 +207,7 @@ struct dc_clocks {
207 int phyclk_khz; 207 int phyclk_khz;
208}; 208};
209 209
210struct dc_debug { 210struct dc_debug_options {
211 enum visual_confirm visual_confirm; 211 enum visual_confirm visual_confirm;
212 bool sanity_checks; 212 bool sanity_checks;
213 bool max_disp_clk; 213 bool max_disp_clk;
@@ -258,13 +258,16 @@ struct dc_debug {
258 bool avoid_vbios_exec_table; 258 bool avoid_vbios_exec_table;
259 bool scl_reset_length10; 259 bool scl_reset_length10;
260 bool hdmi20_disable; 260 bool hdmi20_disable;
261 bool skip_detection_link_training;
262};
261 263
262 struct { 264struct dc_debug_data {
263 uint32_t ltFailCount; 265 uint32_t ltFailCount;
264 uint32_t i2cErrorCount; 266 uint32_t i2cErrorCount;
265 uint32_t auxErrorCount; 267 uint32_t auxErrorCount;
266 } debug_data;
267}; 268};
269
270
268struct dc_state; 271struct dc_state;
269struct resource_pool; 272struct resource_pool;
270struct dce_hwseq; 273struct dce_hwseq;
@@ -273,8 +276,7 @@ struct dc {
273 struct dc_caps caps; 276 struct dc_caps caps;
274 struct dc_cap_funcs cap_funcs; 277 struct dc_cap_funcs cap_funcs;
275 struct dc_config config; 278 struct dc_config config;
276 struct dc_debug debug; 279 struct dc_debug_options debug;
277
278 struct dc_context *ctx; 280 struct dc_context *ctx;
279 281
280 uint8_t link_count; 282 uint8_t link_count;
@@ -310,6 +312,8 @@ struct dc {
310 312
311 /* FBC compressor */ 313 /* FBC compressor */
312 struct compressor *fbc_compressor; 314 struct compressor *fbc_compressor;
315
316 struct dc_debug_data debug_data;
313}; 317};
314 318
315enum frame_buffer_mode { 319enum frame_buffer_mode {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 9cfd7ea845e3..1d1f2d5ece51 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -192,7 +192,7 @@ enum surface_pixel_format {
192 /*swaped & float*/ 192 /*swaped & float*/
193 SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F, 193 SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
194 /*grow graphics here if necessary */ 194 /*grow graphics here if necessary */
195 195 SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
196 SURFACE_PIXEL_FORMAT_VIDEO_BEGIN, 196 SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
197 SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr = 197 SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
198 SURFACE_PIXEL_FORMAT_VIDEO_BEGIN, 198 SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index 11401fd8e535..825537bd4545 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -28,7 +28,7 @@
28 28
29DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \ 29DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
30dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \ 30dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
31dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o 31dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o
32 32
33 33
34AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE)) 34AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
new file mode 100644
index 000000000000..b28e2120767e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -0,0 +1,942 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27#include "dce_aux.h"
28#include "dce/dce_11_0_sh_mask.h"
29
30#define CTX \
31 aux110->base.base.ctx
32#define REG(reg_name)\
33 (aux110->regs->reg_name)
34
35#define DC_LOGGER \
36 engine->base.ctx->logger
37
38#include "reg_helper.h"
39
40#define FROM_AUX_ENGINE(ptr) \
41 container_of((ptr), struct aux_engine_dce110, base)
42
43#define FROM_ENGINE(ptr) \
44 FROM_AUX_ENGINE(container_of((ptr), struct aux_engine, base))
45
46#define FROM_AUX_ENGINE_ENGINE(ptr) \
47 container_of((ptr), struct aux_engine, base)
48enum {
49 AUX_INVALID_REPLY_RETRY_COUNTER = 1,
50 AUX_TIMED_OUT_RETRY_COUNTER = 2,
51 AUX_DEFER_RETRY_COUNTER = 6
52};
53static void release_engine(
54 struct engine *engine)
55{
56 struct aux_engine_dce110 *aux110 = FROM_ENGINE(engine);
57
58 dal_ddc_close(engine->ddc);
59
60 engine->ddc = NULL;
61
62 REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
63}
64
65#define SW_CAN_ACCESS_AUX 1
66#define DMCU_CAN_ACCESS_AUX 2
67
68static bool is_engine_available(
69 struct aux_engine *engine)
70{
71 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
72
73 uint32_t value = REG_READ(AUX_ARB_CONTROL);
74 uint32_t field = get_reg_field_value(
75 value,
76 AUX_ARB_CONTROL,
77 AUX_REG_RW_CNTL_STATUS);
78
79 return (field != DMCU_CAN_ACCESS_AUX);
80}
81static bool acquire_engine(
82 struct aux_engine *engine)
83{
84 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
85
86 uint32_t value = REG_READ(AUX_ARB_CONTROL);
87 uint32_t field = get_reg_field_value(
88 value,
89 AUX_ARB_CONTROL,
90 AUX_REG_RW_CNTL_STATUS);
91 if (field == DMCU_CAN_ACCESS_AUX)
92 return false;
93 /* enable AUX before request SW to access AUX */
94 value = REG_READ(AUX_CONTROL);
95 field = get_reg_field_value(value,
96 AUX_CONTROL,
97 AUX_EN);
98
99 if (field == 0) {
100 set_reg_field_value(
101 value,
102 1,
103 AUX_CONTROL,
104 AUX_EN);
105
106 if (REG(AUX_RESET_MASK)) {
107 /*DP_AUX block as part of the enable sequence*/
108 set_reg_field_value(
109 value,
110 1,
111 AUX_CONTROL,
112 AUX_RESET);
113 }
114
115 REG_WRITE(AUX_CONTROL, value);
116
117 if (REG(AUX_RESET_MASK)) {
118 /*poll HW to make sure reset it done*/
119
120 REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 1,
121 1, 11);
122
123 set_reg_field_value(
124 value,
125 0,
126 AUX_CONTROL,
127 AUX_RESET);
128
129 REG_WRITE(AUX_CONTROL, value);
130
131 REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 0,
132 1, 11);
133 }
134 } /*if (field)*/
135
136 /* request SW to access AUX */
137 REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, 1);
138
139 value = REG_READ(AUX_ARB_CONTROL);
140 field = get_reg_field_value(
141 value,
142 AUX_ARB_CONTROL,
143 AUX_REG_RW_CNTL_STATUS);
144
145 return (field == SW_CAN_ACCESS_AUX);
146}
147
148#define COMPOSE_AUX_SW_DATA_16_20(command, address) \
149 ((command) | ((0xF0000 & (address)) >> 16))
150
151#define COMPOSE_AUX_SW_DATA_8_15(address) \
152 ((0xFF00 & (address)) >> 8)
153
154#define COMPOSE_AUX_SW_DATA_0_7(address) \
155 (0xFF & (address))
156
157static void submit_channel_request(
158 struct aux_engine *engine,
159 struct aux_request_transaction_data *request)
160{
161 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
162 uint32_t value;
163 uint32_t length;
164
165 bool is_write =
166 ((request->type == AUX_TRANSACTION_TYPE_DP) &&
167 (request->action == I2CAUX_TRANSACTION_ACTION_DP_WRITE)) ||
168 ((request->type == AUX_TRANSACTION_TYPE_I2C) &&
169 ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
170 (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
171 if (REG(AUXN_IMPCAL)) {
172 /* clear_aux_error */
173 REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
174 1,
175 0);
176
177 REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
178 1,
179 0);
180
181 /* force_default_calibrate */
182 REG_UPDATE_1BY1_2(AUXN_IMPCAL,
183 AUXN_IMPCAL_ENABLE, 1,
184 AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
185
186 /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
187
188 REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
189 1,
190 0);
191 }
192 /* set the delay and the number of bytes to write */
193
194 /* The length include
195 * the 4 bit header and the 20 bit address
196 * (that is 3 byte).
197 * If the requested length is non zero this means
198 * an addition byte specifying the length is required.
199 */
200
201 length = request->length ? 4 : 3;
202 if (is_write)
203 length += request->length;
204
205 REG_UPDATE_2(AUX_SW_CONTROL,
206 AUX_SW_START_DELAY, request->delay,
207 AUX_SW_WR_BYTES, length);
208
209 /* program action and address and payload data (if 'is_write') */
210 value = REG_UPDATE_4(AUX_SW_DATA,
211 AUX_SW_INDEX, 0,
212 AUX_SW_DATA_RW, 0,
213 AUX_SW_AUTOINCREMENT_DISABLE, 1,
214 AUX_SW_DATA, COMPOSE_AUX_SW_DATA_16_20(request->action, request->address));
215
216 value = REG_SET_2(AUX_SW_DATA, value,
217 AUX_SW_AUTOINCREMENT_DISABLE, 0,
218 AUX_SW_DATA, COMPOSE_AUX_SW_DATA_8_15(request->address));
219
220 value = REG_SET(AUX_SW_DATA, value,
221 AUX_SW_DATA, COMPOSE_AUX_SW_DATA_0_7(request->address));
222
223 if (request->length) {
224 value = REG_SET(AUX_SW_DATA, value,
225 AUX_SW_DATA, request->length - 1);
226 }
227
228 if (is_write) {
229 /* Load the HW buffer with the Data to be sent.
230 * This is relevant for write operation.
231 * For read, the data recived data will be
232 * processed in process_channel_reply().
233 */
234 uint32_t i = 0;
235
236 while (i < request->length) {
237 value = REG_SET(AUX_SW_DATA, value,
238 AUX_SW_DATA, request->data[i]);
239
240 ++i;
241 }
242 }
243
244 REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
245 REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
246 10, aux110->timeout_period/10);
247 REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
248}
249
250static int read_channel_reply(struct aux_engine *engine, uint32_t size,
251 uint8_t *buffer, uint8_t *reply_result,
252 uint32_t *sw_status)
253{
254 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
255 uint32_t bytes_replied;
256 uint32_t reply_result_32;
257
258 *sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT,
259 &bytes_replied);
260
261 /* In case HPD is LOW, exit AUX transaction */
262 if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
263 return -1;
264
265 /* Need at least the status byte */
266 if (!bytes_replied)
267 return -1;
268
269 REG_UPDATE_1BY1_3(AUX_SW_DATA,
270 AUX_SW_INDEX, 0,
271 AUX_SW_AUTOINCREMENT_DISABLE, 1,
272 AUX_SW_DATA_RW, 1);
273
274 REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
275 reply_result_32 = reply_result_32 >> 4;
276 *reply_result = (uint8_t)reply_result_32;
277
278 if (reply_result_32 == 0) { /* ACK */
279 uint32_t i = 0;
280
281 /* First byte was already used to get the command status */
282 --bytes_replied;
283
284 /* Do not overflow buffer */
285 if (bytes_replied > size)
286 return -1;
287
288 while (i < bytes_replied) {
289 uint32_t aux_sw_data_val;
290
291 REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val);
292 buffer[i] = aux_sw_data_val;
293 ++i;
294 }
295
296 return i;
297 }
298
299 return 0;
300}
301
302static void process_channel_reply(
303 struct aux_engine *engine,
304 struct aux_reply_transaction_data *reply)
305{
306 int bytes_replied;
307 uint8_t reply_result;
308 uint32_t sw_status;
309
310 bytes_replied = read_channel_reply(engine, reply->length, reply->data,
311 &reply_result, &sw_status);
312
313 /* in case HPD is LOW, exit AUX transaction */
314 if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
315 reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
316 return;
317 }
318
319 if (bytes_replied < 0) {
320 /* Need to handle an error case...
321 * Hopefully, upper layer function won't call this function if
322 * the number of bytes in the reply was 0, because there was
323 * surely an error that was asserted that should have been
324 * handled for hot plug case, this could happens
325 */
326 if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
327 reply->status = AUX_TRANSACTION_REPLY_INVALID;
328 ASSERT_CRITICAL(false);
329 return;
330 }
331 } else {
332
333 switch (reply_result) {
334 case 0: /* ACK */
335 reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
336 break;
337 case 1: /* NACK */
338 reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
339 break;
340 case 2: /* DEFER */
341 reply->status = AUX_TRANSACTION_REPLY_AUX_DEFER;
342 break;
343 case 4: /* AUX ACK / I2C NACK */
344 reply->status = AUX_TRANSACTION_REPLY_I2C_NACK;
345 break;
346 case 8: /* AUX ACK / I2C DEFER */
347 reply->status = AUX_TRANSACTION_REPLY_I2C_DEFER;
348 break;
349 default:
350 reply->status = AUX_TRANSACTION_REPLY_INVALID;
351 }
352 }
353}
354
355static enum aux_channel_operation_result get_channel_status(
356 struct aux_engine *engine,
357 uint8_t *returned_bytes)
358{
359 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
360
361 uint32_t value;
362
363 if (returned_bytes == NULL) {
364 /*caller pass NULL pointer*/
365 ASSERT_CRITICAL(false);
366 return AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN;
367 }
368 *returned_bytes = 0;
369
370 /* poll to make sure that SW_DONE is asserted */
371 value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
372 10, aux110->timeout_period/10);
373
374 /* in case HPD is LOW, exit AUX transaction */
375 if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
376 return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
377
378 /* Note that the following bits are set in 'status.bits'
379 * during CTS 4.2.1.2 (FW 3.3.1):
380 * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
381 * AUX_SW_RX_RECV_NO_DET, AUX_SW_RX_RECV_INVALID_H.
382 *
383 * AUX_SW_RX_MIN_COUNT_VIOL is an internal,
384 * HW debugging bit and should be ignored.
385 */
386 if (value & AUX_SW_STATUS__AUX_SW_DONE_MASK) {
387 if ((value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK) ||
388 (value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK))
389 return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
390
391 else if ((value & AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK) ||
392 (value & AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK) ||
393 (value &
394 AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK) ||
395 (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK))
396 return AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
397
398 *returned_bytes = get_reg_field_value(value,
399 AUX_SW_STATUS,
400 AUX_SW_REPLY_BYTE_COUNT);
401
402 if (*returned_bytes == 0)
403 return
404 AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
405 else {
406 *returned_bytes -= 1;
407 return AUX_CHANNEL_OPERATION_SUCCEEDED;
408 }
409 } else {
410 /*time_elapsed >= aux_engine->timeout_period
411 * AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point
412 */
413 ASSERT_CRITICAL(false);
414 return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
415 }
416}
417static void process_read_reply(
418 struct aux_engine *engine,
419 struct read_command_context *ctx)
420{
421 engine->funcs->process_channel_reply(engine, &ctx->reply);
422
423 switch (ctx->reply.status) {
424 case AUX_TRANSACTION_REPLY_AUX_ACK:
425 ctx->defer_retry_aux = 0;
426 if (ctx->returned_byte > ctx->current_read_length) {
427 ctx->status =
428 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
429 ctx->operation_succeeded = false;
430 } else if (ctx->returned_byte < ctx->current_read_length) {
431 ctx->current_read_length -= ctx->returned_byte;
432
433 ctx->offset += ctx->returned_byte;
434
435 ++ctx->invalid_reply_retry_aux_on_ack;
436
437 if (ctx->invalid_reply_retry_aux_on_ack >
438 AUX_INVALID_REPLY_RETRY_COUNTER) {
439 ctx->status =
440 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
441 ctx->operation_succeeded = false;
442 }
443 } else {
444 ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
445 ctx->transaction_complete = true;
446 ctx->operation_succeeded = true;
447 }
448 break;
449 case AUX_TRANSACTION_REPLY_AUX_NACK:
450 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
451 ctx->operation_succeeded = false;
452 break;
453 case AUX_TRANSACTION_REPLY_AUX_DEFER:
454 ++ctx->defer_retry_aux;
455
456 if (ctx->defer_retry_aux > AUX_DEFER_RETRY_COUNTER) {
457 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
458 ctx->operation_succeeded = false;
459 }
460 break;
461 case AUX_TRANSACTION_REPLY_I2C_DEFER:
462 ctx->defer_retry_aux = 0;
463
464 ++ctx->defer_retry_i2c;
465
466 if (ctx->defer_retry_i2c > AUX_DEFER_RETRY_COUNTER) {
467 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
468 ctx->operation_succeeded = false;
469 }
470 break;
471 case AUX_TRANSACTION_REPLY_HPD_DISCON:
472 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
473 ctx->operation_succeeded = false;
474 break;
475 default:
476 ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
477 ctx->operation_succeeded = false;
478 }
479}
480static void process_read_request(
481 struct aux_engine *engine,
482 struct read_command_context *ctx)
483{
484 enum aux_channel_operation_result operation_result;
485
486 engine->funcs->submit_channel_request(engine, &ctx->request);
487
488 operation_result = engine->funcs->get_channel_status(
489 engine, &ctx->returned_byte);
490
491 switch (operation_result) {
492 case AUX_CHANNEL_OPERATION_SUCCEEDED:
493 if (ctx->returned_byte > ctx->current_read_length) {
494 ctx->status =
495 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
496 ctx->operation_succeeded = false;
497 } else {
498 ctx->timed_out_retry_aux = 0;
499 ctx->invalid_reply_retry_aux = 0;
500
501 ctx->reply.length = ctx->returned_byte;
502 ctx->reply.data = ctx->buffer;
503
504 process_read_reply(engine, ctx);
505 }
506 break;
507 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
508 ++ctx->invalid_reply_retry_aux;
509
510 if (ctx->invalid_reply_retry_aux >
511 AUX_INVALID_REPLY_RETRY_COUNTER) {
512 ctx->status =
513 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
514 ctx->operation_succeeded = false;
515 } else
516 udelay(400);
517 break;
518 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
519 ++ctx->timed_out_retry_aux;
520
521 if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
522 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
523 ctx->operation_succeeded = false;
524 } else {
525 /* DP 1.2a, table 2-58:
526 * "S3: AUX Request CMD PENDING:
527 * retry 3 times, with 400usec wait on each"
528 * The HW timeout is set to 550usec,
529 * so we should not wait here
530 */
531 }
532 break;
533 case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
534 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
535 ctx->operation_succeeded = false;
536 break;
537 default:
538 ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
539 ctx->operation_succeeded = false;
540 }
541}
542static bool read_command(
543 struct aux_engine *engine,
544 struct i2caux_transaction_request *request,
545 bool middle_of_transaction)
546{
547 struct read_command_context ctx;
548
549 ctx.buffer = request->payload.data;
550 ctx.current_read_length = request->payload.length;
551 ctx.offset = 0;
552 ctx.timed_out_retry_aux = 0;
553 ctx.invalid_reply_retry_aux = 0;
554 ctx.defer_retry_aux = 0;
555 ctx.defer_retry_i2c = 0;
556 ctx.invalid_reply_retry_aux_on_ack = 0;
557 ctx.transaction_complete = false;
558 ctx.operation_succeeded = true;
559
560 if (request->payload.address_space ==
561 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
562 ctx.request.type = AUX_TRANSACTION_TYPE_DP;
563 ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ;
564 ctx.request.address = request->payload.address;
565 } else if (request->payload.address_space ==
566 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
567 ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
568 ctx.request.action = middle_of_transaction ?
569 I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
570 I2CAUX_TRANSACTION_ACTION_I2C_READ;
571 ctx.request.address = request->payload.address >> 1;
572 } else {
573 /* in DAL2, there was no return in such case */
574 BREAK_TO_DEBUGGER();
575 return false;
576 }
577
578 ctx.request.delay = 0;
579
580 do {
581 memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length);
582
583 ctx.request.data = ctx.buffer + ctx.offset;
584 ctx.request.length = ctx.current_read_length;
585
586 process_read_request(engine, &ctx);
587
588 request->status = ctx.status;
589
590 if (ctx.operation_succeeded && !ctx.transaction_complete)
591 if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
592 msleep(engine->delay);
593 } while (ctx.operation_succeeded && !ctx.transaction_complete);
594
595 if (request->payload.address_space ==
596 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
597 DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d",
598 request->payload.address,
599 request->payload.data[0],
600 ctx.operation_succeeded);
601 }
602
603 return ctx.operation_succeeded;
604}
605
606static void process_write_reply(
607 struct aux_engine *engine,
608 struct write_command_context *ctx)
609{
610 engine->funcs->process_channel_reply(engine, &ctx->reply);
611
612 switch (ctx->reply.status) {
613 case AUX_TRANSACTION_REPLY_AUX_ACK:
614 ctx->operation_succeeded = true;
615
616 if (ctx->returned_byte) {
617 ctx->request.action = ctx->mot ?
618 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
619 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
620
621 ctx->current_write_length = 0;
622
623 ++ctx->ack_m_retry;
624
625 if (ctx->ack_m_retry > AUX_DEFER_RETRY_COUNTER) {
626 ctx->status =
627 I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
628 ctx->operation_succeeded = false;
629 } else
630 udelay(300);
631 } else {
632 ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
633 ctx->defer_retry_aux = 0;
634 ctx->ack_m_retry = 0;
635 ctx->transaction_complete = true;
636 }
637 break;
638 case AUX_TRANSACTION_REPLY_AUX_NACK:
639 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
640 ctx->operation_succeeded = false;
641 break;
642 case AUX_TRANSACTION_REPLY_AUX_DEFER:
643 ++ctx->defer_retry_aux;
644
645 if (ctx->defer_retry_aux > ctx->max_defer_retry) {
646 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
647 ctx->operation_succeeded = false;
648 }
649 break;
650 case AUX_TRANSACTION_REPLY_I2C_DEFER:
651 ctx->defer_retry_aux = 0;
652 ctx->current_write_length = 0;
653
654 ctx->request.action = ctx->mot ?
655 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
656 I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
657
658 ++ctx->defer_retry_i2c;
659
660 if (ctx->defer_retry_i2c > ctx->max_defer_retry) {
661 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
662 ctx->operation_succeeded = false;
663 }
664 break;
665 case AUX_TRANSACTION_REPLY_HPD_DISCON:
666 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
667 ctx->operation_succeeded = false;
668 break;
669 default:
670 ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
671 ctx->operation_succeeded = false;
672 }
673}
674static void process_write_request(
675 struct aux_engine *engine,
676 struct write_command_context *ctx)
677{
678 enum aux_channel_operation_result operation_result;
679
680 engine->funcs->submit_channel_request(engine, &ctx->request);
681
682 operation_result = engine->funcs->get_channel_status(
683 engine, &ctx->returned_byte);
684
685 switch (operation_result) {
686 case AUX_CHANNEL_OPERATION_SUCCEEDED:
687 ctx->timed_out_retry_aux = 0;
688 ctx->invalid_reply_retry_aux = 0;
689
690 ctx->reply.length = ctx->returned_byte;
691 ctx->reply.data = ctx->reply_data;
692
693 process_write_reply(engine, ctx);
694 break;
695 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
696 ++ctx->invalid_reply_retry_aux;
697
698 if (ctx->invalid_reply_retry_aux >
699 AUX_INVALID_REPLY_RETRY_COUNTER) {
700 ctx->status =
701 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
702 ctx->operation_succeeded = false;
703 } else
704 udelay(400);
705 break;
706 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
707 ++ctx->timed_out_retry_aux;
708
709 if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
710 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
711 ctx->operation_succeeded = false;
712 } else {
713 /* DP 1.2a, table 2-58:
714 * "S3: AUX Request CMD PENDING:
715 * retry 3 times, with 400usec wait on each"
716 * The HW timeout is set to 550usec,
717 * so we should not wait here
718 */
719 }
720 break;
721 case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
722 ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
723 ctx->operation_succeeded = false;
724 break;
725 default:
726 ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
727 ctx->operation_succeeded = false;
728 }
729}
730static bool write_command(
731 struct aux_engine *engine,
732 struct i2caux_transaction_request *request,
733 bool middle_of_transaction)
734{
735 struct write_command_context ctx;
736
737 ctx.mot = middle_of_transaction;
738 ctx.buffer = request->payload.data;
739 ctx.current_write_length = request->payload.length;
740 ctx.timed_out_retry_aux = 0;
741 ctx.invalid_reply_retry_aux = 0;
742 ctx.defer_retry_aux = 0;
743 ctx.defer_retry_i2c = 0;
744 ctx.ack_m_retry = 0;
745 ctx.transaction_complete = false;
746 ctx.operation_succeeded = true;
747
748 if (request->payload.address_space ==
749 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
750 ctx.request.type = AUX_TRANSACTION_TYPE_DP;
751 ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
752 ctx.request.address = request->payload.address;
753 } else if (request->payload.address_space ==
754 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
755 ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
756 ctx.request.action = middle_of_transaction ?
757 I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
758 I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
759 ctx.request.address = request->payload.address >> 1;
760 } else {
761 /* in DAL2, there was no return in such case */
762 BREAK_TO_DEBUGGER();
763 return false;
764 }
765
766 ctx.request.delay = 0;
767
768 ctx.max_defer_retry =
769 (engine->max_defer_write_retry > AUX_DEFER_RETRY_COUNTER) ?
770 engine->max_defer_write_retry : AUX_DEFER_RETRY_COUNTER;
771
772 do {
773 ctx.request.data = ctx.buffer;
774 ctx.request.length = ctx.current_write_length;
775
776 process_write_request(engine, &ctx);
777
778 request->status = ctx.status;
779
780 if (ctx.operation_succeeded && !ctx.transaction_complete)
781 if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
782 msleep(engine->delay);
783 } while (ctx.operation_succeeded && !ctx.transaction_complete);
784
785 if (request->payload.address_space ==
786 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
787 DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d",
788 request->payload.address,
789 request->payload.data[0],
790 ctx.operation_succeeded);
791 }
792
793 return ctx.operation_succeeded;
794}
795static bool end_of_transaction_command(
796 struct aux_engine *engine,
797 struct i2caux_transaction_request *request)
798{
799 struct i2caux_transaction_request dummy_request;
800 uint8_t dummy_data;
801
802 /* [tcheng] We only need to send the stop (read with MOT = 0)
803 * for I2C-over-Aux, not native AUX
804 */
805
806 if (request->payload.address_space !=
807 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C)
808 return false;
809
810 dummy_request.operation = request->operation;
811 dummy_request.payload.address_space = request->payload.address_space;
812 dummy_request.payload.address = request->payload.address;
813
814 /*
815 * Add a dummy byte due to some receiver quirk
816 * where one byte is sent along with MOT = 0.
817 * Ideally this should be 0.
818 */
819
820 dummy_request.payload.length = 0;
821 dummy_request.payload.data = &dummy_data;
822
823 if (request->operation == I2CAUX_TRANSACTION_READ)
824 return read_command(engine, &dummy_request, false);
825 else
826 return write_command(engine, &dummy_request, false);
827
828 /* according Syed, it does not need now DoDummyMOT */
829}
830bool submit_request(
831 struct engine *engine,
832 struct i2caux_transaction_request *request,
833 bool middle_of_transaction)
834{
835 struct aux_engine *aux_engine = FROM_AUX_ENGINE_ENGINE(engine);
836
837 bool result;
838 bool mot_used = true;
839
840 switch (request->operation) {
841 case I2CAUX_TRANSACTION_READ:
842 result = read_command(aux_engine, request, mot_used);
843 break;
844 case I2CAUX_TRANSACTION_WRITE:
845 result = write_command(aux_engine, request, mot_used);
846 break;
847 default:
848 result = false;
849 }
850
851 /* [tcheng]
852 * need to send stop for the last transaction to free up the AUX
853 * if the above command fails, this would be the last transaction
854 */
855
856 if (!middle_of_transaction || !result)
857 end_of_transaction_command(aux_engine, request);
858
859 /* mask AUX interrupt */
860
861 return result;
862}
863enum i2caux_engine_type get_engine_type(
864 const struct engine *engine)
865{
866 return I2CAUX_ENGINE_TYPE_AUX;
867}
868
869static struct aux_engine *acquire(
870 struct engine *engine,
871 struct ddc *ddc)
872{
873 struct aux_engine *aux_engine = FROM_AUX_ENGINE_ENGINE(engine);
874 enum gpio_result result;
875
876 if (aux_engine->funcs->is_engine_available) {
877 /*check whether SW could use the engine*/
878 if (!aux_engine->funcs->is_engine_available(aux_engine))
879 return NULL;
880 }
881
882 result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
883 GPIO_DDC_CONFIG_TYPE_MODE_AUX);
884
885 if (result != GPIO_RESULT_OK)
886 return NULL;
887
888 if (!aux_engine->funcs->acquire_engine(aux_engine)) {
889 dal_ddc_close(ddc);
890 return NULL;
891 }
892
893 engine->ddc = ddc;
894
895 return aux_engine;
896}
897
898static const struct aux_engine_funcs aux_engine_funcs = {
899 .acquire_engine = acquire_engine,
900 .submit_channel_request = submit_channel_request,
901 .process_channel_reply = process_channel_reply,
902 .read_channel_reply = read_channel_reply,
903 .get_channel_status = get_channel_status,
904 .is_engine_available = is_engine_available,
905};
906
907static const struct engine_funcs engine_funcs = {
908 .release_engine = release_engine,
909 .destroy_engine = dce110_engine_destroy,
910 .submit_request = submit_request,
911 .get_engine_type = get_engine_type,
912 .acquire = acquire,
913};
914
915void dce110_engine_destroy(struct engine **engine)
916{
917
918 struct aux_engine_dce110 *engine110 = FROM_ENGINE(*engine);
919
920 kfree(engine110);
921 *engine = NULL;
922
923}
924struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
925 struct dc_context *ctx,
926 uint32_t inst,
927 uint32_t timeout_period,
928 const struct dce110_aux_registers *regs)
929{
930 aux_engine110->base.base.ddc = NULL;
931 aux_engine110->base.base.ctx = ctx;
932 aux_engine110->base.delay = 0;
933 aux_engine110->base.max_defer_write_retry = 0;
934 aux_engine110->base.base.funcs = &engine_funcs;
935 aux_engine110->base.funcs = &aux_engine_funcs;
936 aux_engine110->base.base.inst = inst;
937 aux_engine110->timeout_period = timeout_period;
938 aux_engine110->regs = regs;
939
940 return &aux_engine110->base;
941}
942
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
new file mode 100644
index 000000000000..c6b2aec2e367
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -0,0 +1,111 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_AUX_ENGINE_DCE110_H__
27#define __DAL_AUX_ENGINE_DCE110_H__
28#include "aux_engine.h"
29
30#define AUX_COMMON_REG_LIST(id)\
31 SRI(AUX_CONTROL, DP_AUX, id), \
32 SRI(AUX_ARB_CONTROL, DP_AUX, id), \
33 SRI(AUX_SW_DATA, DP_AUX, id), \
34 SRI(AUX_SW_CONTROL, DP_AUX, id), \
35 SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
36 SRI(AUX_SW_STATUS, DP_AUX, id), \
37 SR(AUXN_IMPCAL), \
38 SR(AUXP_IMPCAL)
39
40struct dce110_aux_registers {
41 uint32_t AUX_CONTROL;
42 uint32_t AUX_ARB_CONTROL;
43 uint32_t AUX_SW_DATA;
44 uint32_t AUX_SW_CONTROL;
45 uint32_t AUX_INTERRUPT_CONTROL;
46 uint32_t AUX_SW_STATUS;
47 uint32_t AUXN_IMPCAL;
48 uint32_t AUXP_IMPCAL;
49
50 uint32_t AUX_RESET_MASK;
51};
52
53enum { /* This is the timeout as defined in DP 1.2a,
54 * 2.3.4 "Detailed uPacket TX AUX CH State Description".
55 */
56 AUX_TIMEOUT_PERIOD = 400,
57
58 /* Ideally, the SW timeout should be just above 550usec
59 * which is programmed in HW.
60 * But the SW timeout of 600usec is not reliable,
61 * because on some systems, delay_in_microseconds()
62 * returns faster than it should.
63 * EPR #379763: by trial-and-error on different systems,
64 * 700usec is the minimum reliable SW timeout for polling
65 * the AUX_SW_STATUS.AUX_SW_DONE bit.
66 * This timeout expires *only* when there is
67 * AUX Error or AUX Timeout conditions - not during normal operation.
68 * During normal operation, AUX_SW_STATUS.AUX_SW_DONE bit is set
69 * at most within ~240usec. That means,
70 * increasing this timeout will not affect normal operation,
71 * and we'll timeout after
72 * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
73 * This timeout is especially important for
74 * resume from S3 and CTS.
75 */
76 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
77};
78struct aux_engine_dce110 {
79 struct aux_engine base;
80 const struct dce110_aux_registers *regs;
81 struct {
82 uint32_t aux_control;
83 uint32_t aux_arb_control;
84 uint32_t aux_sw_data;
85 uint32_t aux_sw_control;
86 uint32_t aux_interrupt_control;
87 uint32_t aux_sw_status;
88 } addr;
89 uint32_t timeout_period;
90};
91
92struct aux_engine_dce110_init_data {
93 uint32_t engine_id;
94 uint32_t timeout_period;
95 struct dc_context *ctx;
96 const struct dce110_aux_registers *regs;
97};
98
99struct aux_engine *dce110_aux_engine_construct(
100 struct aux_engine_dce110 *aux_engine110,
101 struct dc_context *ctx,
102 uint32_t inst,
103 uint32_t timeout_period,
104 const struct dce110_aux_registers *regs);
105
106void dce110_engine_destroy(struct engine **engine);
107
108bool dce110_aux_engine_acquire(
109 struct engine *aux_engine,
110 struct ddc *ddc);
111#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index 8f8a2abac3f3..0db8d1da3d0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -337,7 +337,7 @@ static int dce112_set_clock(
337 337
338static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce) 338static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
339{ 339{
340 struct dc_debug *debug = &clk_dce->base.ctx->dc->debug; 340 struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug;
341 struct dc_bios *bp = clk_dce->base.ctx->dc_bios; 341 struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
342 struct integrated_info info = { { { 0 } } }; 342 struct integrated_info info = { { { 0 } } };
343 struct dc_firmware_info fw_info = { { 0 } }; 343 struct dc_firmware_info fw_info = { { 0 } };
@@ -824,7 +824,7 @@ struct dccg *dce120_dccg_create(struct dc_context *ctx)
824#ifdef CONFIG_X86 824#ifdef CONFIG_X86
825struct dccg *dcn1_dccg_create(struct dc_context *ctx) 825struct dccg *dcn1_dccg_create(struct dc_context *ctx)
826{ 826{
827 struct dc_debug *debug = &ctx->dc->debug; 827 struct dc_debug_options *debug = &ctx->dc->debug;
828 struct dc_bios *bp = ctx->dc_bios; 828 struct dc_bios *bp = ctx->dc_bios;
829 struct dc_firmware_info fw_info = { { 0 } }; 829 struct dc_firmware_info fw_info = { { 0 } };
830 struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); 830 struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index ec3221333011..74c05e878807 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -149,10 +149,6 @@ static uint32_t get_max_pixel_clock_for_all_paths(
149 max_pix_clk = 149 max_pix_clk =
150 pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; 150 pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
151 } 151 }
152
153 if (max_pix_clk == 0)
154 ASSERT(0);
155
156 return max_pix_clk; 152 return max_pix_clk;
157} 153}
158 154
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 8ed8eace42be..c34c9531915e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -52,6 +52,7 @@
52#include "dce/dce_10_0_sh_mask.h" 52#include "dce/dce_10_0_sh_mask.h"
53 53
54#include "dce/dce_dmcu.h" 54#include "dce/dce_dmcu.h"
55#include "dce/dce_aux.h"
55#include "dce/dce_abm.h" 56#include "dce/dce_abm.h"
56 57
57#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT 58#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
@@ -279,7 +280,20 @@ static const struct dce_opp_shift opp_shift = {
279static const struct dce_opp_mask opp_mask = { 280static const struct dce_opp_mask opp_mask = {
280 OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK) 281 OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK)
281}; 282};
283#define aux_engine_regs(id)\
284[id] = {\
285 AUX_COMMON_REG_LIST(id), \
286 .AUX_RESET_MASK = 0 \
287}
282 288
289static const struct dce110_aux_registers aux_engine_regs[] = {
290 aux_engine_regs(0),
291 aux_engine_regs(1),
292 aux_engine_regs(2),
293 aux_engine_regs(3),
294 aux_engine_regs(4),
295 aux_engine_regs(5)
296};
283 297
284#define audio_regs(id)\ 298#define audio_regs(id)\
285[id] = {\ 299[id] = {\
@@ -572,6 +586,23 @@ struct output_pixel_processor *dce100_opp_create(
572 return &opp->base; 586 return &opp->base;
573} 587}
574 588
589struct engine *dce100_aux_engine_create(
590 struct dc_context *ctx,
591 uint32_t inst)
592{
593 struct aux_engine_dce110 *aux_engine =
594 kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
595
596 if (!aux_engine)
597 return NULL;
598
599 dce110_aux_engine_construct(aux_engine, ctx, inst,
600 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
601 &aux_engine_regs[inst]);
602
603 return &aux_engine->base.base;
604}
605
575struct clock_source *dce100_clock_source_create( 606struct clock_source *dce100_clock_source_create(
576 struct dc_context *ctx, 607 struct dc_context *ctx,
577 struct dc_bios *bios, 608 struct dc_bios *bios,
@@ -624,6 +655,10 @@ static void destruct(struct dce110_resource_pool *pool)
624 kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); 655 kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
625 pool->base.timing_generators[i] = NULL; 656 pool->base.timing_generators[i] = NULL;
626 } 657 }
658
659 if (pool->base.engines[i] != NULL)
660 dce110_engine_destroy(&pool->base.engines[i]);
661
627 } 662 }
628 663
629 for (i = 0; i < pool->base.stream_enc_count; i++) { 664 for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -678,9 +713,22 @@ bool dce100_validate_bandwidth(
678 struct dc *dc, 713 struct dc *dc,
679 struct dc_state *context) 714 struct dc_state *context)
680{ 715{
681 /* TODO implement when needed but for now hardcode max value*/ 716 int i;
682 context->bw.dce.dispclk_khz = 681000; 717 bool at_least_one_pipe = false;
683 context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER; 718
719 for (i = 0; i < dc->res_pool->pipe_count; i++) {
720 if (context->res_ctx.pipe_ctx[i].stream)
721 at_least_one_pipe = true;
722 }
723
724 if (at_least_one_pipe) {
725 /* TODO implement when needed but for now hardcode max value*/
726 context->bw.dce.dispclk_khz = 681000;
727 context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
728 } else {
729 context->bw.dce.dispclk_khz = 0;
730 context->bw.dce.yclk_khz = 0;
731 }
684 732
685 return true; 733 return true;
686} 734}
@@ -915,6 +963,13 @@ static bool construct(
915 "DC: failed to create output pixel processor!\n"); 963 "DC: failed to create output pixel processor!\n");
916 goto res_create_fail; 964 goto res_create_fail;
917 } 965 }
966 pool->base.engines[i] = dce100_aux_engine_create(ctx, i);
967 if (pool->base.engines[i] == NULL) {
968 BREAK_TO_DEBUGGER();
969 dm_error(
970 "DC:failed to create aux engine!!\n");
971 goto res_create_fail;
972 }
918 } 973 }
919 974
920 dc->caps.max_planes = pool->base.pipe_count; 975 dc->caps.max_planes = pool->base.pipe_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 1c902e49a712..4a665a29191b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -49,6 +49,7 @@
49#include "dce/dce_clock_source.h" 49#include "dce/dce_clock_source.h"
50#include "dce/dce_hwseq.h" 50#include "dce/dce_hwseq.h"
51#include "dce110/dce110_hw_sequencer.h" 51#include "dce110/dce110_hw_sequencer.h"
52#include "dce/dce_aux.h"
52#include "dce/dce_abm.h" 53#include "dce/dce_abm.h"
53#include "dce/dce_dmcu.h" 54#include "dce/dce_dmcu.h"
54 55
@@ -306,6 +307,21 @@ static const struct dce_opp_mask opp_mask = {
306 OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK) 307 OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK)
307}; 308};
308 309
310#define aux_engine_regs(id)\
311[id] = {\
312 AUX_COMMON_REG_LIST(id), \
313 .AUX_RESET_MASK = 0 \
314}
315
316static const struct dce110_aux_registers aux_engine_regs[] = {
317 aux_engine_regs(0),
318 aux_engine_regs(1),
319 aux_engine_regs(2),
320 aux_engine_regs(3),
321 aux_engine_regs(4),
322 aux_engine_regs(5)
323};
324
309#define audio_regs(id)\ 325#define audio_regs(id)\
310[id] = {\ 326[id] = {\
311 AUD_COMMON_REG_LIST(id)\ 327 AUD_COMMON_REG_LIST(id)\
@@ -588,6 +604,23 @@ static struct output_pixel_processor *dce110_opp_create(
588 return &opp->base; 604 return &opp->base;
589} 605}
590 606
607struct engine *dce110_aux_engine_create(
608 struct dc_context *ctx,
609 uint32_t inst)
610{
611 struct aux_engine_dce110 *aux_engine =
612 kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
613
614 if (!aux_engine)
615 return NULL;
616
617 dce110_aux_engine_construct(aux_engine, ctx, inst,
618 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
619 &aux_engine_regs[inst]);
620
621 return &aux_engine->base.base;
622}
623
591struct clock_source *dce110_clock_source_create( 624struct clock_source *dce110_clock_source_create(
592 struct dc_context *ctx, 625 struct dc_context *ctx,
593 struct dc_bios *bios, 626 struct dc_bios *bios,
@@ -651,6 +684,10 @@ static void destruct(struct dce110_resource_pool *pool)
651 kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); 684 kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
652 pool->base.timing_generators[i] = NULL; 685 pool->base.timing_generators[i] = NULL;
653 } 686 }
687
688 if (pool->base.engines[i] != NULL)
689 dce110_engine_destroy(&pool->base.engines[i]);
690
654 } 691 }
655 692
656 for (i = 0; i < pool->base.stream_enc_count; i++) { 693 for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -1258,6 +1295,14 @@ static bool construct(
1258 "DC: failed to create output pixel processor!\n"); 1295 "DC: failed to create output pixel processor!\n");
1259 goto res_create_fail; 1296 goto res_create_fail;
1260 } 1297 }
1298
1299 pool->base.engines[i] = dce110_aux_engine_create(ctx, i);
1300 if (pool->base.engines[i] == NULL) {
1301 BREAK_TO_DEBUGGER();
1302 dm_error(
1303 "DC:failed to create aux engine!!\n");
1304 goto res_create_fail;
1305 }
1261 } 1306 }
1262 1307
1263 dc->fbc_compressor = dce110_compressor_create(ctx); 1308 dc->fbc_compressor = dce110_compressor_create(ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 30d5b32892d6..caf90ae2cbb0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -49,6 +49,7 @@
49#include "dce112/dce112_hw_sequencer.h" 49#include "dce112/dce112_hw_sequencer.h"
50#include "dce/dce_abm.h" 50#include "dce/dce_abm.h"
51#include "dce/dce_dmcu.h" 51#include "dce/dce_dmcu.h"
52#include "dce/dce_aux.h"
52 53
53#include "reg_helper.h" 54#include "reg_helper.h"
54 55
@@ -314,6 +315,21 @@ static const struct dce_opp_mask opp_mask = {
314 OPP_COMMON_MASK_SH_LIST_DCE_112(_MASK) 315 OPP_COMMON_MASK_SH_LIST_DCE_112(_MASK)
315}; 316};
316 317
318#define aux_engine_regs(id)\
319[id] = {\
320 AUX_COMMON_REG_LIST(id), \
321 .AUX_RESET_MASK = 0 \
322}
323
324static const struct dce110_aux_registers aux_engine_regs[] = {
325 aux_engine_regs(0),
326 aux_engine_regs(1),
327 aux_engine_regs(2),
328 aux_engine_regs(3),
329 aux_engine_regs(4),
330 aux_engine_regs(5)
331};
332
317#define audio_regs(id)\ 333#define audio_regs(id)\
318[id] = {\ 334[id] = {\
319 AUD_COMMON_REG_LIST(id)\ 335 AUD_COMMON_REG_LIST(id)\
@@ -588,6 +604,23 @@ struct output_pixel_processor *dce112_opp_create(
588 return &opp->base; 604 return &opp->base;
589} 605}
590 606
607struct engine *dce112_aux_engine_create(
608 struct dc_context *ctx,
609 uint32_t inst)
610{
611 struct aux_engine_dce110 *aux_engine =
612 kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
613
614 if (!aux_engine)
615 return NULL;
616
617 dce110_aux_engine_construct(aux_engine, ctx, inst,
618 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
619 &aux_engine_regs[inst]);
620
621 return &aux_engine->base.base;
622}
623
591struct clock_source *dce112_clock_source_create( 624struct clock_source *dce112_clock_source_create(
592 struct dc_context *ctx, 625 struct dc_context *ctx,
593 struct dc_bios *bios, 626 struct dc_bios *bios,
@@ -625,6 +658,9 @@ static void destruct(struct dce110_resource_pool *pool)
625 if (pool->base.opps[i] != NULL) 658 if (pool->base.opps[i] != NULL)
626 dce110_opp_destroy(&pool->base.opps[i]); 659 dce110_opp_destroy(&pool->base.opps[i]);
627 660
661 if (pool->base.engines[i] != NULL)
662 dce110_engine_destroy(&pool->base.engines[i]);
663
628 if (pool->base.transforms[i] != NULL) 664 if (pool->base.transforms[i] != NULL)
629 dce112_transform_destroy(&pool->base.transforms[i]); 665 dce112_transform_destroy(&pool->base.transforms[i]);
630 666
@@ -640,6 +676,10 @@ static void destruct(struct dce110_resource_pool *pool)
640 kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); 676 kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
641 pool->base.timing_generators[i] = NULL; 677 pool->base.timing_generators[i] = NULL;
642 } 678 }
679
680 if (pool->base.engines[i] != NULL)
681 dce110_engine_destroy(&pool->base.engines[i]);
682
643 } 683 }
644 684
645 for (i = 0; i < pool->base.stream_enc_count; i++) { 685 for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -1208,6 +1248,13 @@ static bool construct(
1208 "DC:failed to create output pixel processor!\n"); 1248 "DC:failed to create output pixel processor!\n");
1209 goto res_create_fail; 1249 goto res_create_fail;
1210 } 1250 }
1251 pool->base.engines[i] = dce112_aux_engine_create(ctx, i);
1252 if (pool->base.engines[i] == NULL) {
1253 BREAK_TO_DEBUGGER();
1254 dm_error(
1255 "DC:failed to create aux engine!!\n");
1256 goto res_create_fail;
1257 }
1211 } 1258 }
1212 1259
1213 if (!resource_construct(num_virtual_links, dc, &pool->base, 1260 if (!resource_construct(num_virtual_links, dc, &pool->base,
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 8381f27a2361..f7d02f2190d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -53,6 +53,7 @@
53#include "dce/dce_hwseq.h" 53#include "dce/dce_hwseq.h"
54#include "dce/dce_abm.h" 54#include "dce/dce_abm.h"
55#include "dce/dce_dmcu.h" 55#include "dce/dce_dmcu.h"
56#include "dce/dce_aux.h"
56 57
57#include "dce/dce_12_0_offset.h" 58#include "dce/dce_12_0_offset.h"
58#include "dce/dce_12_0_sh_mask.h" 59#include "dce/dce_12_0_sh_mask.h"
@@ -297,6 +298,20 @@ static const struct dce_opp_shift opp_shift = {
297static const struct dce_opp_mask opp_mask = { 298static const struct dce_opp_mask opp_mask = {
298 OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK) 299 OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK)
299}; 300};
301 #define aux_engine_regs(id)\
302[id] = {\
303 AUX_COMMON_REG_LIST(id), \
304 .AUX_RESET_MASK = 0 \
305}
306
307static const struct dce110_aux_registers aux_engine_regs[] = {
308 aux_engine_regs(0),
309 aux_engine_regs(1),
310 aux_engine_regs(2),
311 aux_engine_regs(3),
312 aux_engine_regs(4),
313 aux_engine_regs(5)
314};
300 315
301#define audio_regs(id)\ 316#define audio_regs(id)\
302[id] = {\ 317[id] = {\
@@ -361,6 +376,22 @@ struct output_pixel_processor *dce120_opp_create(
361 ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); 376 ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
362 return &opp->base; 377 return &opp->base;
363} 378}
379struct engine *dce120_aux_engine_create(
380 struct dc_context *ctx,
381 uint32_t inst)
382{
383 struct aux_engine_dce110 *aux_engine =
384 kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
385
386 if (!aux_engine)
387 return NULL;
388
389 dce110_aux_engine_construct(aux_engine, ctx, inst,
390 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
391 &aux_engine_regs[inst]);
392
393 return &aux_engine->base.base;
394}
364 395
365static const struct bios_registers bios_regs = { 396static const struct bios_registers bios_regs = {
366 .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX) 397 .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
@@ -373,7 +404,7 @@ static const struct resource_caps res_cap = {
373 .num_pll = 6, 404 .num_pll = 6,
374}; 405};
375 406
376static const struct dc_debug debug_defaults = { 407static const struct dc_debug_options debug_defaults = {
377 .disable_clock_gate = true, 408 .disable_clock_gate = true,
378}; 409};
379 410
@@ -467,6 +498,10 @@ static void destruct(struct dce110_resource_pool *pool)
467 kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); 498 kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
468 pool->base.timing_generators[i] = NULL; 499 pool->base.timing_generators[i] = NULL;
469 } 500 }
501
502 if (pool->base.engines[i] != NULL)
503 dce110_engine_destroy(&pool->base.engines[i]);
504
470 } 505 }
471 506
472 for (i = 0; i < pool->base.audio_count; i++) { 507 for (i = 0; i < pool->base.audio_count; i++) {
@@ -984,6 +1019,13 @@ static bool construct(
984 dm_error( 1019 dm_error(
985 "DC: failed to create output pixel processor!\n"); 1020 "DC: failed to create output pixel processor!\n");
986 } 1021 }
1022 pool->base.engines[i] = dce120_aux_engine_create(ctx, i);
1023 if (pool->base.engines[i] == NULL) {
1024 BREAK_TO_DEBUGGER();
1025 dm_error(
1026 "DC:failed to create aux engine!!\n");
1027 goto res_create_fail;
1028 }
987 1029
988 /* check next valid pipe */ 1030 /* check next valid pipe */
989 j++; 1031 j++;
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 2ac95ec2bf96..6fb33ad2d3c8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -54,6 +54,7 @@
54#include "reg_helper.h" 54#include "reg_helper.h"
55 55
56#include "dce/dce_dmcu.h" 56#include "dce/dce_dmcu.h"
57#include "dce/dce_aux.h"
57#include "dce/dce_abm.h" 58#include "dce/dce_abm.h"
58/* TODO remove this include */ 59/* TODO remove this include */
59 60
@@ -298,6 +299,21 @@ static const struct dce_opp_mask opp_mask = {
298 OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK) 299 OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
299}; 300};
300 301
302#define aux_engine_regs(id)\
303[id] = {\
304 AUX_COMMON_REG_LIST(id), \
305 .AUX_RESET_MASK = 0 \
306}
307
308static const struct dce110_aux_registers aux_engine_regs[] = {
309 aux_engine_regs(0),
310 aux_engine_regs(1),
311 aux_engine_regs(2),
312 aux_engine_regs(3),
313 aux_engine_regs(4),
314 aux_engine_regs(5)
315};
316
301#define audio_regs(id)\ 317#define audio_regs(id)\
302[id] = {\ 318[id] = {\
303 AUD_COMMON_REG_LIST(id)\ 319 AUD_COMMON_REG_LIST(id)\
@@ -448,6 +464,23 @@ static struct output_pixel_processor *dce80_opp_create(
448 return &opp->base; 464 return &opp->base;
449} 465}
450 466
467struct engine *dce80_aux_engine_create(
468 struct dc_context *ctx,
469 uint32_t inst)
470{
471 struct aux_engine_dce110 *aux_engine =
472 kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
473
474 if (!aux_engine)
475 return NULL;
476
477 dce110_aux_engine_construct(aux_engine, ctx, inst,
478 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
479 &aux_engine_regs[inst]);
480
481 return &aux_engine->base.base;
482}
483
451static struct stream_encoder *dce80_stream_encoder_create( 484static struct stream_encoder *dce80_stream_encoder_create(
452 enum engine_id eng_id, 485 enum engine_id eng_id,
453 struct dc_context *ctx) 486 struct dc_context *ctx)
@@ -655,6 +688,9 @@ static void destruct(struct dce110_resource_pool *pool)
655 kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); 688 kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
656 pool->base.timing_generators[i] = NULL; 689 pool->base.timing_generators[i] = NULL;
657 } 690 }
691
692 if (pool->base.engines[i] != NULL)
693 dce110_engine_destroy(&pool->base.engines[i]);
658 } 694 }
659 695
660 for (i = 0; i < pool->base.stream_enc_count; i++) { 696 for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -899,6 +935,14 @@ static bool dce80_construct(
899 dm_error("DC: failed to create output pixel processor!\n"); 935 dm_error("DC: failed to create output pixel processor!\n");
900 goto res_create_fail; 936 goto res_create_fail;
901 } 937 }
938
939 pool->base.engines[i] = dce80_aux_engine_create(ctx, i);
940 if (pool->base.engines[i] == NULL) {
941 BREAK_TO_DEBUGGER();
942 dm_error(
943 "DC:failed to create aux engine!!\n");
944 goto res_create_fail;
945 }
902 } 946 }
903 947
904 dc->caps.max_planes = pool->base.pipe_count; 948 dc->caps.max_planes = pool->base.pipe_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 332354ca6529..2138cd3c5d1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -294,6 +294,10 @@ void hubp1_program_pixel_format(
294 REG_UPDATE(DCSURF_SURFACE_CONFIG, 294 REG_UPDATE(DCSURF_SURFACE_CONFIG,
295 SURFACE_PIXEL_FORMAT, 66); 295 SURFACE_PIXEL_FORMAT, 66);
296 break; 296 break;
297 case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
298 REG_UPDATE(DCSURF_SURFACE_CONFIG,
299 SURFACE_PIXEL_FORMAT, 12);
300 break;
297 default: 301 default:
298 BREAK_TO_DEBUGGER(); 302 BREAK_TO_DEBUGGER();
299 break; 303 break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 84581b3c392b..cd8c22839227 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -64,6 +64,7 @@
64#include "reg_helper.h" 64#include "reg_helper.h"
65#include "dce/dce_abm.h" 65#include "dce/dce_abm.h"
66#include "dce/dce_dmcu.h" 66#include "dce/dce_dmcu.h"
67#include "dce/dce_aux.h"
67 68
68const struct _vcs_dpi_ip_params_st dcn1_0_ip = { 69const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
69 .rob_buffer_size_kbytes = 64, 70 .rob_buffer_size_kbytes = 64,
@@ -356,6 +357,21 @@ static const struct dcn10_opp_mask opp_mask = {
356 OPP_MASK_SH_LIST_DCN10(_MASK), 357 OPP_MASK_SH_LIST_DCN10(_MASK),
357}; 358};
358 359
360#define aux_engine_regs(id)\
361[id] = {\
362 AUX_COMMON_REG_LIST(id), \
363 .AUX_RESET_MASK = 0 \
364}
365
366static const struct dce110_aux_registers aux_engine_regs[] = {
367 aux_engine_regs(0),
368 aux_engine_regs(1),
369 aux_engine_regs(2),
370 aux_engine_regs(3),
371 aux_engine_regs(4),
372 aux_engine_regs(5)
373};
374
359#define tf_regs(id)\ 375#define tf_regs(id)\
360[id] = {\ 376[id] = {\
361 TF_REG_LIST_DCN10(id),\ 377 TF_REG_LIST_DCN10(id),\
@@ -486,7 +502,7 @@ static const struct resource_caps res_cap = {
486 .num_pll = 4, 502 .num_pll = 4,
487}; 503};
488 504
489static const struct dc_debug debug_defaults_drv = { 505static const struct dc_debug_options debug_defaults_drv = {
490 .sanity_checks = true, 506 .sanity_checks = true,
491 .disable_dmcu = true, 507 .disable_dmcu = true,
492 .force_abm_enable = false, 508 .force_abm_enable = false,
@@ -514,7 +530,7 @@ static const struct dc_debug debug_defaults_drv = {
514 .max_downscale_src_width = 3840, 530 .max_downscale_src_width = 3840,
515}; 531};
516 532
517static const struct dc_debug debug_defaults_diags = { 533static const struct dc_debug_options debug_defaults_diags = {
518 .disable_dmcu = true, 534 .disable_dmcu = true,
519 .force_abm_enable = false, 535 .force_abm_enable = false,
520 .timing_trace = true, 536 .timing_trace = true,
@@ -578,6 +594,23 @@ static struct output_pixel_processor *dcn10_opp_create(
578 return &opp->base; 594 return &opp->base;
579} 595}
580 596
597struct engine *dcn10_aux_engine_create(
598 struct dc_context *ctx,
599 uint32_t inst)
600{
601 struct aux_engine_dce110 *aux_engine =
602 kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
603
604 if (!aux_engine)
605 return NULL;
606
607 dce110_aux_engine_construct(aux_engine, ctx, inst,
608 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
609 &aux_engine_regs[inst]);
610
611 return &aux_engine->base.base;
612}
613
581static struct mpc *dcn10_mpc_create(struct dc_context *ctx) 614static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
582{ 615{
583 struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc), 616 struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc),
@@ -826,6 +859,9 @@ static void destruct(struct dcn10_resource_pool *pool)
826 kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); 859 kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
827 pool->base.timing_generators[i] = NULL; 860 pool->base.timing_generators[i] = NULL;
828 } 861 }
862
863 if (pool->base.engines[i] != NULL)
864 pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]);
829 } 865 }
830 866
831 for (i = 0; i < pool->base.stream_enc_count; i++) 867 for (i = 0; i < pool->base.stream_enc_count; i++)
@@ -1255,6 +1291,14 @@ static bool construct(
1255 goto fail; 1291 goto fail;
1256 } 1292 }
1257 1293
1294 pool->base.engines[i] = dcn10_aux_engine_create(ctx, i);
1295 if (pool->base.engines[i] == NULL) {
1296 BREAK_TO_DEBUGGER();
1297 dm_error(
1298 "DC:failed to create aux engine!!\n");
1299 goto fail;
1300 }
1301
1258 /* check next valid pipe */ 1302 /* check next valid pipe */
1259 j++; 1303 j++;
1260 } 1304 }
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
index 1e8a1585e401..b16fb1ff687d 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
@@ -96,6 +96,7 @@ struct engine_funcs {
96 96
97struct engine { 97struct engine {
98 const struct engine_funcs *funcs; 98 const struct engine_funcs *funcs;
99 uint32_t inst;
99 struct ddc *ddc; 100 struct ddc *ddc;
100 struct dc_context *ctx; 101 struct dc_context *ctx;
101}; 102};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 4446652a9a9e..0fa385872ed3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -138,7 +138,7 @@ struct resource_pool {
138 struct output_pixel_processor *opps[MAX_PIPES]; 138 struct output_pixel_processor *opps[MAX_PIPES];
139 struct timing_generator *timing_generators[MAX_PIPES]; 139 struct timing_generator *timing_generators[MAX_PIPES];
140 struct stream_encoder *stream_enc[MAX_PIPES * 2]; 140 struct stream_encoder *stream_enc[MAX_PIPES * 2];
141 141 struct engine *engines[MAX_PIPES];
142 struct hubbub *hubbub; 142 struct hubbub *hubbub;
143 struct mpc *mpc; 143 struct mpc *mpc;
144 struct pp_smu_funcs_rv *pp_smu; 144 struct pp_smu_funcs_rv *pp_smu;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 2f783c650084..697b5ee73845 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -33,7 +33,7 @@ struct dc_link;
33struct dc_stream_state; 33struct dc_stream_state;
34struct dc_link_settings; 34struct dc_link_settings;
35 35
36bool dp_hbr_verify_link_cap( 36bool dp_verify_link_cap(
37 struct dc_link *link, 37 struct dc_link *link,
38 struct dc_link_settings *known_limit_link_setting); 38 struct dc_link_settings *known_limit_link_setting);
39 39
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
new file mode 100644
index 000000000000..06d7e5d4cf21
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_AUX_ENGINE_H__
27#define __DAL_AUX_ENGINE_H__
28
29#include "engine.h"
30#include "include/i2caux_interface.h"
31
32struct aux_engine;
33union aux_config;
34struct aux_engine_funcs {
35 void (*destroy)(
36 struct aux_engine **ptr);
37 bool (*acquire_engine)(
38 struct aux_engine *engine);
39 void (*configure)(
40 struct aux_engine *engine,
41 union aux_config cfg);
42 void (*submit_channel_request)(
43 struct aux_engine *engine,
44 struct aux_request_transaction_data *request);
45 void (*process_channel_reply)(
46 struct aux_engine *engine,
47 struct aux_reply_transaction_data *reply);
48 int (*read_channel_reply)(
49 struct aux_engine *engine,
50 uint32_t size,
51 uint8_t *buffer,
52 uint8_t *reply_result,
53 uint32_t *sw_status);
54 enum aux_channel_operation_result (*get_channel_status)(
55 struct aux_engine *engine,
56 uint8_t *returned_bytes);
57 bool (*is_engine_available)(struct aux_engine *engine);
58};
59struct engine;
60struct aux_engine {
61 struct engine base;
62 const struct aux_engine_funcs *funcs;
63 /* following values are expressed in milliseconds */
64 uint32_t delay;
65 uint32_t max_defer_write_retry;
66
67 bool acquire_reset;
68};
69struct read_command_context {
70 uint8_t *buffer;
71 uint32_t current_read_length;
72 uint32_t offset;
73 enum i2caux_transaction_status status;
74
75 struct aux_request_transaction_data request;
76 struct aux_reply_transaction_data reply;
77
78 uint8_t returned_byte;
79
80 uint32_t timed_out_retry_aux;
81 uint32_t invalid_reply_retry_aux;
82 uint32_t defer_retry_aux;
83 uint32_t defer_retry_i2c;
84 uint32_t invalid_reply_retry_aux_on_ack;
85
86 bool transaction_complete;
87 bool operation_succeeded;
88};
89struct write_command_context {
90 bool mot;
91
92 uint8_t *buffer;
93 uint32_t current_write_length;
94 enum i2caux_transaction_status status;
95
96 struct aux_request_transaction_data request;
97 struct aux_reply_transaction_data reply;
98
99 uint8_t returned_byte;
100
101 uint32_t timed_out_retry_aux;
102 uint32_t invalid_reply_retry_aux;
103 uint32_t defer_retry_aux;
104 uint32_t defer_retry_i2c;
105 uint32_t max_defer_retry;
106 uint32_t ack_m_retry;
107
108 uint8_t reply_data[DEFAULT_AUX_MAX_DATA_SIZE];
109
110 bool transaction_complete;
111 bool operation_succeeded;
112};
113#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/engine.h
new file mode 100644
index 000000000000..1f5476f41236
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/engine.h
@@ -0,0 +1,106 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_ENGINE_H__
27#define __DAL_ENGINE_H__
28
29#include "dc_ddc_types.h"
30
31enum i2caux_transaction_operation {
32 I2CAUX_TRANSACTION_READ,
33 I2CAUX_TRANSACTION_WRITE
34};
35
36enum i2caux_transaction_address_space {
37 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
38 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
39};
40
41struct i2caux_transaction_payload {
42 enum i2caux_transaction_address_space address_space;
43 uint32_t address;
44 uint32_t length;
45 uint8_t *data;
46};
47
48enum i2caux_transaction_status {
49 I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
50 I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
51 I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
52 I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
53 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
54 I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
55 I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
56 I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
57 I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
58 I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
59 I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
60};
61
62struct i2caux_transaction_request {
63 enum i2caux_transaction_operation operation;
64 struct i2caux_transaction_payload payload;
65 enum i2caux_transaction_status status;
66};
67
68enum i2caux_engine_type {
69 I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
70 I2CAUX_ENGINE_TYPE_AUX,
71 I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
72 I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
73 I2CAUX_ENGINE_TYPE_I2C_SW
74};
75
76enum i2c_default_speed {
77 I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
78 I2CAUX_DEFAULT_I2C_SW_SPEED = 50
79};
80
81struct engine;
82
83struct engine_funcs {
84 enum i2caux_engine_type (*get_engine_type)(
85 const struct engine *engine);
86 struct aux_engine* (*acquire)(
87 struct engine *engine,
88 struct ddc *ddc);
89 bool (*submit_request)(
90 struct engine *engine,
91 struct i2caux_transaction_request *request,
92 bool middle_of_transaction);
93 void (*release_engine)(
94 struct engine *engine);
95 void (*destroy_engine)(
96 struct engine **engine);
97};
98
99struct engine {
100 const struct engine_funcs *funcs;
101 uint32_t inst;
102 struct ddc *ddc;
103 struct dc_context *ctx;
104};
105
106#endif
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 75c208283e5f..7a646f94b478 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -998,7 +998,7 @@ static int pp_get_display_power_level(void *handle,
998static int pp_get_current_clocks(void *handle, 998static int pp_get_current_clocks(void *handle,
999 struct amd_pp_clock_info *clocks) 999 struct amd_pp_clock_info *clocks)
1000{ 1000{
1001 struct amd_pp_simple_clock_info simple_clocks; 1001 struct amd_pp_simple_clock_info simple_clocks = { 0 };
1002 struct pp_clock_info hw_clocks; 1002 struct pp_clock_info hw_clocks;
1003 struct pp_hwmgr *hwmgr = handle; 1003 struct pp_hwmgr *hwmgr = handle;
1004 int ret = 0; 1004 int ret = 0;
@@ -1034,7 +1034,10 @@ static int pp_get_current_clocks(void *handle,
1034 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1034 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1035 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1035 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1036 1036
1037 clocks->max_clocks_state = simple_clocks.level; 1037 if (simple_clocks.level == 0)
1038 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1039 else
1040 clocks->max_clocks_state = simple_clocks.level;
1038 1041
1039 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) { 1042 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1040 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1043 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
@@ -1137,6 +1140,8 @@ static int pp_get_display_mode_validation_clocks(void *handle,
1137 if (!hwmgr || !hwmgr->pm_en ||!clocks) 1140 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1138 return -EINVAL; 1141 return -EINVAL;
1139 1142
1143 clocks->level = PP_DAL_POWERLEVEL_7;
1144
1140 mutex_lock(&hwmgr->smu_lock); 1145 mutex_lock(&hwmgr->smu_lock);
1141 1146
1142 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) 1147 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 1a0dccb3fac1..fb86c24394ff 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -289,7 +289,15 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
289 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; 289 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
290 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3]; 290 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
291 struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3]; 291 struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
292 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
292 uint32_t i; 293 uint32_t i;
294 int result;
295
296 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
297 if (!result) {
298 data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
299 data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
300 }
293 301
294 od_lookup_table = &odn_table->vddc_lookup_table; 302 od_lookup_table = &odn_table->vddc_lookup_table;
295 vddc_lookup_table = table_info->vddc_lookup_table; 303 vddc_lookup_table = table_info->vddc_lookup_table;
@@ -2072,9 +2080,6 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2072 if (data->smu_features[GNLD_AVFS].supported) { 2080 if (data->smu_features[GNLD_AVFS].supported) {
2073 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params); 2081 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2074 if (!result) { 2082 if (!result) {
2075 data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
2076 data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
2077
2078 pp_table->MinVoltageVid = (uint8_t) 2083 pp_table->MinVoltageVid = (uint8_t)
2079 convert_to_vid((uint16_t)(avfs_params.ulMinVddc)); 2084 convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
2080 pp_table->MaxVoltageVid = (uint8_t) 2085 pp_table->MaxVoltageVid = (uint8_t)
@@ -3254,10 +3259,25 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3254{ 3259{
3255 int result = 0; 3260 int result = 0;
3256 struct vega10_hwmgr *data = hwmgr->backend; 3261 struct vega10_hwmgr *data = hwmgr->backend;
3262 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3263 struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table;
3264 struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk;
3265 int count;
3257 3266
3258 if (!data->need_update_dpm_table) 3267 if (!data->need_update_dpm_table)
3259 return 0; 3268 return 0;
3260 3269
3270 if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3271 for (count = 0; count < dpm_table->gfx_table.count; count++)
3272 dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
3273 }
3274
3275 odn_clk_table = &odn_table->vdd_dep_on_mclk;
3276 if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3277 for (count = 0; count < dpm_table->mem_table.count; count++)
3278 dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
3279 }
3280
3261 if (data->need_update_dpm_table & 3281 if (data->need_update_dpm_table &
3262 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) { 3282 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
3263 result = vega10_populate_all_graphic_levels(hwmgr); 3283 result = vega10_populate_all_graphic_levels(hwmgr);
@@ -3705,7 +3725,7 @@ static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
3705{ 3725{
3706 smum_send_msg_to_smc_with_parameter(hwmgr, 3726 smum_send_msg_to_smc_with_parameter(hwmgr,
3707 PPSMC_MSG_SetUclkFastSwitch, 3727 PPSMC_MSG_SetUclkFastSwitch,
3708 has_disp ? 0 : 1); 3728 has_disp ? 1 : 0);
3709} 3729}
3710 3730
3711int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 3731int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
@@ -3780,7 +3800,9 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
3780 uint32_t i; 3800 uint32_t i;
3781 struct pp_display_clock_request clock_req; 3801 struct pp_display_clock_request clock_req;
3782 3802
3783 if (hwmgr->display_config->num_display > 1) 3803 if ((hwmgr->display_config->num_display > 1) &&
3804 !hwmgr->display_config->multi_monitor_in_sync &&
3805 !hwmgr->display_config->nb_pstate_switch_disable)
3784 vega10_notify_smc_display_change(hwmgr, false); 3806 vega10_notify_smc_display_change(hwmgr, false);
3785 else 3807 else
3786 vega10_notify_smc_display_change(hwmgr, true); 3808 vega10_notify_smc_display_change(hwmgr, true);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 4ed218dd8ba7..0789d64246ca 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -1334,7 +1334,7 @@ static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
1334 if (data->smu_features[GNLD_DPM_UCLK].enabled) 1334 if (data->smu_features[GNLD_DPM_UCLK].enabled)
1335 return smum_send_msg_to_smc_with_parameter(hwmgr, 1335 return smum_send_msg_to_smc_with_parameter(hwmgr,
1336 PPSMC_MSG_SetUclkFastSwitch, 1336 PPSMC_MSG_SetUclkFastSwitch,
1337 has_disp ? 0 : 1); 1337 has_disp ? 1 : 0);
1338 1338
1339 return 0; 1339 return 0;
1340} 1340}
@@ -1389,7 +1389,8 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
1389 struct pp_display_clock_request clock_req; 1389 struct pp_display_clock_request clock_req;
1390 1390
1391 if ((hwmgr->display_config->num_display > 1) && 1391 if ((hwmgr->display_config->num_display > 1) &&
1392 !hwmgr->display_config->multi_monitor_in_sync) 1392 !hwmgr->display_config->multi_monitor_in_sync &&
1393 !hwmgr->display_config->nb_pstate_switch_disable)
1393 vega12_notify_smc_display_change(hwmgr, false); 1394 vega12_notify_smc_display_change(hwmgr, false);
1394 else 1395 else
1395 vega12_notify_smc_display_change(hwmgr, true); 1396 vega12_notify_smc_display_change(hwmgr, true);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 36414ba56b22..207532c05eb8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
78 gpu->lastctx = NULL; 78 gpu->lastctx = NULL;
79 mutex_unlock(&gpu->lock); 79 mutex_unlock(&gpu->lock);
80 80
81 drm_sched_entity_destroy(&gpu->sched, 81 drm_sched_entity_destroy(&ctx->sched_entity[i]);
82 &ctx->sched_entity[i]);
83 } 82 }
84 } 83 }
85 84
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index a74eb57af15b..590e44b0d963 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -118,8 +118,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
118{ 118{
119 int ret; 119 int ret;
120 120
121 ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched, 121 ret = drm_sched_job_init(&submit->sched_job, sched_entity,
122 sched_entity, submit->cmdbuf.ctx); 122 submit->cmdbuf.ctx);
123 if (ret) 123 if (ret)
124 return ret; 124 return ret;
125 125
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index dac71e3b4514..3f2fc5e8242a 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -185,7 +185,6 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
185 memset(entity, 0, sizeof(struct drm_sched_entity)); 185 memset(entity, 0, sizeof(struct drm_sched_entity));
186 INIT_LIST_HEAD(&entity->list); 186 INIT_LIST_HEAD(&entity->list);
187 entity->rq = rq_list[0]; 187 entity->rq = rq_list[0];
188 entity->sched = rq_list[0]->sched;
189 entity->guilty = guilty; 188 entity->guilty = guilty;
190 entity->last_scheduled = NULL; 189 entity->last_scheduled = NULL;
191 190
@@ -210,8 +209,8 @@ EXPORT_SYMBOL(drm_sched_entity_init);
210static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched, 209static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
211 struct drm_sched_entity *entity) 210 struct drm_sched_entity *entity)
212{ 211{
213 return entity->sched == sched && 212 return entity->rq != NULL &&
214 entity->rq != NULL; 213 entity->rq->sched == sched;
215} 214}
216 215
217/** 216/**
@@ -273,11 +272,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
273 * 272 *
274 * Returns the remaining time in jiffies left from the input timeout 273 * Returns the remaining time in jiffies left from the input timeout
275 */ 274 */
276long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, 275long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
277 struct drm_sched_entity *entity, long timeout)
278{ 276{
277 struct drm_gpu_scheduler *sched;
279 long ret = timeout; 278 long ret = timeout;
280 279
280 sched = entity->rq->sched;
281 if (!drm_sched_entity_is_initialized(sched, entity)) 281 if (!drm_sched_entity_is_initialized(sched, entity))
282 return ret; 282 return ret;
283 /** 283 /**
@@ -312,10 +312,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
312 * entity and signals all jobs with an error code if the process was killed. 312 * entity and signals all jobs with an error code if the process was killed.
313 * 313 *
314 */ 314 */
315void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, 315void drm_sched_entity_fini(struct drm_sched_entity *entity)
316 struct drm_sched_entity *entity)
317{ 316{
317 struct drm_gpu_scheduler *sched;
318 318
319 sched = entity->rq->sched;
319 drm_sched_entity_set_rq(entity, NULL); 320 drm_sched_entity_set_rq(entity, NULL);
320 321
321 /* Consumption of existing IBs wasn't completed. Forcefully 322 /* Consumption of existing IBs wasn't completed. Forcefully
@@ -373,11 +374,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
373 * 374 *
374 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() 375 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
375 */ 376 */
376void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched, 377void drm_sched_entity_destroy(struct drm_sched_entity *entity)
377 struct drm_sched_entity *entity)
378{ 378{
379 drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); 379 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
380 drm_sched_entity_fini(sched, entity); 380 drm_sched_entity_fini(entity);
381} 381}
382EXPORT_SYMBOL(drm_sched_entity_destroy); 382EXPORT_SYMBOL(drm_sched_entity_destroy);
383 383
@@ -387,7 +387,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb
387 container_of(cb, struct drm_sched_entity, cb); 387 container_of(cb, struct drm_sched_entity, cb);
388 entity->dependency = NULL; 388 entity->dependency = NULL;
389 dma_fence_put(f); 389 dma_fence_put(f);
390 drm_sched_wakeup(entity->sched); 390 drm_sched_wakeup(entity->rq->sched);
391} 391}
392 392
393static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) 393static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
@@ -437,7 +437,7 @@ EXPORT_SYMBOL(drm_sched_entity_set_rq);
437bool drm_sched_dependency_optimized(struct dma_fence* fence, 437bool drm_sched_dependency_optimized(struct dma_fence* fence,
438 struct drm_sched_entity *entity) 438 struct drm_sched_entity *entity)
439{ 439{
440 struct drm_gpu_scheduler *sched = entity->sched; 440 struct drm_gpu_scheduler *sched = entity->rq->sched;
441 struct drm_sched_fence *s_fence; 441 struct drm_sched_fence *s_fence;
442 442
443 if (!fence || dma_fence_is_signaled(fence)) 443 if (!fence || dma_fence_is_signaled(fence))
@@ -454,7 +454,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
454 454
455static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) 455static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
456{ 456{
457 struct drm_gpu_scheduler *sched = entity->sched; 457 struct drm_gpu_scheduler *sched = entity->rq->sched;
458 struct dma_fence * fence = entity->dependency; 458 struct dma_fence * fence = entity->dependency;
459 struct drm_sched_fence *s_fence; 459 struct drm_sched_fence *s_fence;
460 460
@@ -499,7 +499,7 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
499static struct drm_sched_job * 499static struct drm_sched_job *
500drm_sched_entity_pop_job(struct drm_sched_entity *entity) 500drm_sched_entity_pop_job(struct drm_sched_entity *entity)
501{ 501{
502 struct drm_gpu_scheduler *sched = entity->sched; 502 struct drm_gpu_scheduler *sched = entity->rq->sched;
503 struct drm_sched_job *sched_job = to_drm_sched_job( 503 struct drm_sched_job *sched_job = to_drm_sched_job(
504 spsc_queue_peek(&entity->job_queue)); 504 spsc_queue_peek(&entity->job_queue));
505 505
@@ -740,10 +740,11 @@ EXPORT_SYMBOL(drm_sched_job_recovery);
740 * Returns 0 for success, negative error code otherwise. 740 * Returns 0 for success, negative error code otherwise.
741 */ 741 */
742int drm_sched_job_init(struct drm_sched_job *job, 742int drm_sched_job_init(struct drm_sched_job *job,
743 struct drm_gpu_scheduler *sched,
744 struct drm_sched_entity *entity, 743 struct drm_sched_entity *entity,
745 void *owner) 744 void *owner)
746{ 745{
746 struct drm_gpu_scheduler *sched = entity->rq->sched;
747
747 job->sched = sched; 748 job->sched = sched;
748 job->entity = entity; 749 job->entity = entity;
749 job->s_priority = entity->rq - sched->sched_rq; 750 job->s_priority = entity->rq - sched->sched_rq;
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 45d9c3affbea..d8d2dff9ea2f 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -161,7 +161,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
161 return NULL; 161 return NULL;
162 162
163 fence->owner = owner; 163 fence->owner = owner;
164 fence->sched = entity->sched; 164 fence->sched = entity->rq->sched;
165 spin_lock_init(&fence->lock); 165 spin_lock_init(&fence->lock);
166 166
167 seq = atomic_inc_return(&entity->fence_seq); 167 seq = atomic_inc_return(&entity->fence_seq);
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 1dceba2b42fd..2a85fa68ffea 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -145,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
145static void 145static void
146v3d_postclose(struct drm_device *dev, struct drm_file *file) 146v3d_postclose(struct drm_device *dev, struct drm_file *file)
147{ 147{
148 struct v3d_dev *v3d = to_v3d_dev(dev);
149 struct v3d_file_priv *v3d_priv = file->driver_priv; 148 struct v3d_file_priv *v3d_priv = file->driver_priv;
150 enum v3d_queue q; 149 enum v3d_queue q;
151 150
152 for (q = 0; q < V3D_MAX_QUEUES; q++) { 151 for (q = 0; q < V3D_MAX_QUEUES; q++) {
153 drm_sched_entity_destroy(&v3d->queue[q].sched, 152 drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
154 &v3d_priv->sched_entity[q]);
155 } 153 }
156 154
157 kfree(v3d_priv); 155 kfree(v3d_priv);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index e1fcbb4cd0ae..5ce24098a5fd 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -553,7 +553,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
553 mutex_lock(&v3d->sched_lock); 553 mutex_lock(&v3d->sched_lock);
554 if (exec->bin.start != exec->bin.end) { 554 if (exec->bin.start != exec->bin.end) {
555 ret = drm_sched_job_init(&exec->bin.base, 555 ret = drm_sched_job_init(&exec->bin.base,
556 &v3d->queue[V3D_BIN].sched,
557 &v3d_priv->sched_entity[V3D_BIN], 556 &v3d_priv->sched_entity[V3D_BIN],
558 v3d_priv); 557 v3d_priv);
559 if (ret) 558 if (ret)
@@ -568,7 +567,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
568 } 567 }
569 568
570 ret = drm_sched_job_init(&exec->render.base, 569 ret = drm_sched_job_init(&exec->render.base,
571 &v3d->queue[V3D_RENDER].sched,
572 &v3d_priv->sched_entity[V3D_RENDER], 570 &v3d_priv->sched_entity[V3D_RENDER],
573 v3d_priv); 571 v3d_priv);
574 if (ret) 572 if (ret)
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 2205e89722f6..091b9afcd184 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -52,7 +52,6 @@ enum drm_sched_priority {
52 * runqueue. 52 * runqueue.
53 * @rq: runqueue to which this entity belongs. 53 * @rq: runqueue to which this entity belongs.
54 * @rq_lock: lock to modify the runqueue to which this entity belongs. 54 * @rq_lock: lock to modify the runqueue to which this entity belongs.
55 * @sched: the scheduler instance to which this entity is enqueued.
56 * @job_queue: the list of jobs of this entity. 55 * @job_queue: the list of jobs of this entity.
57 * @fence_seq: a linearly increasing seqno incremented with each 56 * @fence_seq: a linearly increasing seqno incremented with each
58 * new &drm_sched_fence which is part of the entity. 57 * new &drm_sched_fence which is part of the entity.
@@ -76,7 +75,6 @@ struct drm_sched_entity {
76 struct list_head list; 75 struct list_head list;
77 struct drm_sched_rq *rq; 76 struct drm_sched_rq *rq;
78 spinlock_t rq_lock; 77 spinlock_t rq_lock;
79 struct drm_gpu_scheduler *sched;
80 78
81 struct spsc_queue job_queue; 79 struct spsc_queue job_queue;
82 80
@@ -286,12 +284,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
286 struct drm_sched_rq **rq_list, 284 struct drm_sched_rq **rq_list,
287 unsigned int num_rq_list, 285 unsigned int num_rq_list,
288 atomic_t *guilty); 286 atomic_t *guilty);
289long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, 287long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
290 struct drm_sched_entity *entity, long timeout); 288void drm_sched_entity_fini(struct drm_sched_entity *entity);
291void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, 289void drm_sched_entity_destroy(struct drm_sched_entity *entity);
292 struct drm_sched_entity *entity);
293void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
294 struct drm_sched_entity *entity);
295void drm_sched_entity_push_job(struct drm_sched_job *sched_job, 290void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
296 struct drm_sched_entity *entity); 291 struct drm_sched_entity *entity);
297void drm_sched_entity_set_rq(struct drm_sched_entity *entity, 292void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
@@ -302,7 +297,6 @@ struct drm_sched_fence *drm_sched_fence_create(
302void drm_sched_fence_scheduled(struct drm_sched_fence *fence); 297void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
303void drm_sched_fence_finished(struct drm_sched_fence *fence); 298void drm_sched_fence_finished(struct drm_sched_fence *fence);
304int drm_sched_job_init(struct drm_sched_job *job, 299int drm_sched_job_init(struct drm_sched_job *job,
305 struct drm_gpu_scheduler *sched,
306 struct drm_sched_entity *entity, 300 struct drm_sched_entity *entity,
307 void *owner); 301 void *owner);
308void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, 302void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,