diff options
43 files changed, 372 insertions, 255 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 8f6f45567bfa..305143fcc1ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | |||
@@ -342,15 +342,12 @@ void get_local_mem_info(struct kgd_dev *kgd, | |||
342 | mem_info->local_mem_size_public, | 342 | mem_info->local_mem_size_public, |
343 | mem_info->local_mem_size_private); | 343 | mem_info->local_mem_size_private); |
344 | 344 | ||
345 | if (amdgpu_emu_mode == 1) { | ||
346 | mem_info->mem_clk_max = 100; | ||
347 | return; | ||
348 | } | ||
349 | |||
350 | if (amdgpu_sriov_vf(adev)) | 345 | if (amdgpu_sriov_vf(adev)) |
351 | mem_info->mem_clk_max = adev->clock.default_mclk / 100; | 346 | mem_info->mem_clk_max = adev->clock.default_mclk / 100; |
352 | else | 347 | else if (adev->powerplay.pp_funcs) |
353 | mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100; | 348 | mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100; |
349 | else | ||
350 | mem_info->mem_clk_max = 100; | ||
354 | } | 351 | } |
355 | 352 | ||
356 | uint64_t get_gpu_clock_counter(struct kgd_dev *kgd) | 353 | uint64_t get_gpu_clock_counter(struct kgd_dev *kgd) |
@@ -367,13 +364,12 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd) | |||
367 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; | 364 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; |
368 | 365 | ||
369 | /* the sclk is in quantas of 10kHz */ | 366 | /* the sclk is in quantas of 10kHz */ |
370 | if (amdgpu_emu_mode == 1) | ||
371 | return 100; | ||
372 | |||
373 | if (amdgpu_sriov_vf(adev)) | 367 | if (amdgpu_sriov_vf(adev)) |
374 | return adev->clock.default_sclk / 100; | 368 | return adev->clock.default_sclk / 100; |
375 | 369 | else if (adev->powerplay.pp_funcs) | |
376 | return amdgpu_dpm_get_sclk(adev, false) / 100; | 370 | return amdgpu_dpm_get_sclk(adev, false) / 100; |
371 | else | ||
372 | return 100; | ||
377 | } | 373 | } |
378 | 374 | ||
379 | void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) | 375 | void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 1bcb2b247335..daa06e7c5bb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | |||
@@ -569,7 +569,6 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { | |||
569 | { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, | 569 | { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, |
570 | { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, | 570 | { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, |
571 | { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, | 571 | { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, |
572 | { 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX }, | ||
573 | { 0, 0, 0, 0, 0 }, | 572 | { 0, 0, 0, 0, 0 }, |
574 | }; | 573 | }; |
575 | 574 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9c1d491d742e..82312a7bc6ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -522,6 +522,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, | |||
522 | struct amdgpu_bo_list_entry *e; | 522 | struct amdgpu_bo_list_entry *e; |
523 | struct list_head duplicates; | 523 | struct list_head duplicates; |
524 | unsigned i, tries = 10; | 524 | unsigned i, tries = 10; |
525 | struct amdgpu_bo *gds; | ||
526 | struct amdgpu_bo *gws; | ||
527 | struct amdgpu_bo *oa; | ||
525 | int r; | 528 | int r; |
526 | 529 | ||
527 | INIT_LIST_HEAD(&p->validated); | 530 | INIT_LIST_HEAD(&p->validated); |
@@ -652,31 +655,36 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, | |||
652 | 655 | ||
653 | amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, | 656 | amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, |
654 | p->bytes_moved_vis); | 657 | p->bytes_moved_vis); |
658 | |||
655 | if (p->bo_list) { | 659 | if (p->bo_list) { |
656 | struct amdgpu_bo *gds = p->bo_list->gds_obj; | ||
657 | struct amdgpu_bo *gws = p->bo_list->gws_obj; | ||
658 | struct amdgpu_bo *oa = p->bo_list->oa_obj; | ||
659 | struct amdgpu_vm *vm = &fpriv->vm; | 660 | struct amdgpu_vm *vm = &fpriv->vm; |
660 | unsigned i; | 661 | unsigned i; |
661 | 662 | ||
663 | gds = p->bo_list->gds_obj; | ||
664 | gws = p->bo_list->gws_obj; | ||
665 | oa = p->bo_list->oa_obj; | ||
662 | for (i = 0; i < p->bo_list->num_entries; i++) { | 666 | for (i = 0; i < p->bo_list->num_entries; i++) { |
663 | struct amdgpu_bo *bo = p->bo_list->array[i].robj; | 667 | struct amdgpu_bo *bo = p->bo_list->array[i].robj; |
664 | 668 | ||
665 | p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); | 669 | p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); |
666 | } | 670 | } |
671 | } else { | ||
672 | gds = p->adev->gds.gds_gfx_bo; | ||
673 | gws = p->adev->gds.gws_gfx_bo; | ||
674 | oa = p->adev->gds.oa_gfx_bo; | ||
675 | } | ||
667 | 676 | ||
668 | if (gds) { | 677 | if (gds) { |
669 | p->job->gds_base = amdgpu_bo_gpu_offset(gds); | 678 | p->job->gds_base = amdgpu_bo_gpu_offset(gds); |
670 | p->job->gds_size = amdgpu_bo_size(gds); | 679 | p->job->gds_size = amdgpu_bo_size(gds); |
671 | } | 680 | } |
672 | if (gws) { | 681 | if (gws) { |
673 | p->job->gws_base = amdgpu_bo_gpu_offset(gws); | 682 | p->job->gws_base = amdgpu_bo_gpu_offset(gws); |
674 | p->job->gws_size = amdgpu_bo_size(gws); | 683 | p->job->gws_size = amdgpu_bo_size(gws); |
675 | } | 684 | } |
676 | if (oa) { | 685 | if (oa) { |
677 | p->job->oa_base = amdgpu_bo_gpu_offset(oa); | 686 | p->job->oa_base = amdgpu_bo_gpu_offset(oa); |
678 | p->job->oa_size = amdgpu_bo_size(oa); | 687 | p->job->oa_size = amdgpu_bo_size(oa); |
679 | } | ||
680 | } | 688 | } |
681 | 689 | ||
682 | if (!r && p->uf_entry.robj) { | 690 | if (!r && p->uf_entry.robj) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 290e279abf0d..3317d1536f4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -1730,6 +1730,18 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) | |||
1730 | } | 1730 | } |
1731 | } | 1731 | } |
1732 | } | 1732 | } |
1733 | |||
1734 | if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) { | ||
1735 | /* enable gfx powergating */ | ||
1736 | amdgpu_device_ip_set_powergating_state(adev, | ||
1737 | AMD_IP_BLOCK_TYPE_GFX, | ||
1738 | AMD_PG_STATE_GATE); | ||
1739 | /* enable gfxoff */ | ||
1740 | amdgpu_device_ip_set_powergating_state(adev, | ||
1741 | AMD_IP_BLOCK_TYPE_SMC, | ||
1742 | AMD_PG_STATE_GATE); | ||
1743 | } | ||
1744 | |||
1733 | return 0; | 1745 | return 0; |
1734 | } | 1746 | } |
1735 | 1747 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 2c8e27370284..5fb156a01774 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <drm/drmP.h> | 30 | #include <drm/drmP.h> |
31 | #include <drm/amdgpu_drm.h> | 31 | #include <drm/amdgpu_drm.h> |
32 | #include "amdgpu.h" | 32 | #include "amdgpu.h" |
33 | #include "amdgpu_display.h" | ||
33 | 34 | ||
34 | void amdgpu_gem_object_free(struct drm_gem_object *gobj) | 35 | void amdgpu_gem_object_free(struct drm_gem_object *gobj) |
35 | { | 36 | { |
@@ -235,6 +236,13 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, | |||
235 | /* create a gem object to contain this object in */ | 236 | /* create a gem object to contain this object in */ |
236 | if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | | 237 | if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | |
237 | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { | 238 | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { |
239 | if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { | ||
240 | /* if gds bo is created from user space, it must be | ||
241 | * passed to bo list | ||
242 | */ | ||
243 | DRM_ERROR("GDS bo cannot be per-vm-bo\n"); | ||
244 | return -EINVAL; | ||
245 | } | ||
238 | flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; | 246 | flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; |
239 | if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) | 247 | if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) |
240 | size = size << AMDGPU_GDS_SHIFT; | 248 | size = size << AMDGPU_GDS_SHIFT; |
@@ -749,15 +757,16 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, | |||
749 | struct amdgpu_device *adev = dev->dev_private; | 757 | struct amdgpu_device *adev = dev->dev_private; |
750 | struct drm_gem_object *gobj; | 758 | struct drm_gem_object *gobj; |
751 | uint32_t handle; | 759 | uint32_t handle; |
760 | u32 domain; | ||
752 | int r; | 761 | int r; |
753 | 762 | ||
754 | args->pitch = amdgpu_align_pitch(adev, args->width, | 763 | args->pitch = amdgpu_align_pitch(adev, args->width, |
755 | DIV_ROUND_UP(args->bpp, 8), 0); | 764 | DIV_ROUND_UP(args->bpp, 8), 0); |
756 | args->size = (u64)args->pitch * args->height; | 765 | args->size = (u64)args->pitch * args->height; |
757 | args->size = ALIGN(args->size, PAGE_SIZE); | 766 | args->size = ALIGN(args->size, PAGE_SIZE); |
758 | 767 | domain = amdgpu_bo_get_preferred_pin_domain(adev, | |
759 | r = amdgpu_gem_object_create(adev, args->size, 0, | 768 | amdgpu_display_supported_domains(adev)); |
760 | AMDGPU_GEM_DOMAIN_VRAM, | 769 | r = amdgpu_gem_object_create(adev, args->size, 0, domain, |
761 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 770 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
762 | false, NULL, &gobj); | 771 | false, NULL, &gobj); |
763 | if (r) | 772 | if (r) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6a9e46ae7f0a..5e4e1bd90383 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -703,11 +703,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, | |||
703 | /* This assumes only APU display buffers are pinned with (VRAM|GTT). | 703 | /* This assumes only APU display buffers are pinned with (VRAM|GTT). |
704 | * See function amdgpu_display_supported_domains() | 704 | * See function amdgpu_display_supported_domains() |
705 | */ | 705 | */ |
706 | if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { | 706 | domain = amdgpu_bo_get_preferred_pin_domain(adev, domain); |
707 | domain = AMDGPU_GEM_DOMAIN_VRAM; | ||
708 | if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) | ||
709 | domain = AMDGPU_GEM_DOMAIN_GTT; | ||
710 | } | ||
711 | 707 | ||
712 | if (bo->pin_count) { | 708 | if (bo->pin_count) { |
713 | uint32_t mem_type = bo->tbo.mem.mem_type; | 709 | uint32_t mem_type = bo->tbo.mem.mem_type; |
@@ -1066,3 +1062,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) | |||
1066 | 1062 | ||
1067 | return bo->tbo.offset; | 1063 | return bo->tbo.offset; |
1068 | } | 1064 | } |
1065 | |||
1066 | uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, | ||
1067 | uint32_t domain) | ||
1068 | { | ||
1069 | if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { | ||
1070 | domain = AMDGPU_GEM_DOMAIN_VRAM; | ||
1071 | if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) | ||
1072 | domain = AMDGPU_GEM_DOMAIN_GTT; | ||
1073 | } | ||
1074 | return domain; | ||
1075 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 540e03fa159f..731748033878 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | |||
@@ -289,7 +289,8 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, | |||
289 | struct reservation_object *resv, | 289 | struct reservation_object *resv, |
290 | struct dma_fence **fence, | 290 | struct dma_fence **fence, |
291 | bool direct); | 291 | bool direct); |
292 | 292 | uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, | |
293 | uint32_t domain); | ||
293 | 294 | ||
294 | /* | 295 | /* |
295 | * sub allocation | 296 | * sub allocation |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 8851bcdfc260..127e87b470ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | |||
@@ -49,8 +49,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work); | |||
49 | 49 | ||
50 | int amdgpu_vcn_sw_init(struct amdgpu_device *adev) | 50 | int amdgpu_vcn_sw_init(struct amdgpu_device *adev) |
51 | { | 51 | { |
52 | struct amdgpu_ring *ring; | ||
53 | struct drm_sched_rq *rq; | ||
54 | unsigned long bo_size; | 52 | unsigned long bo_size; |
55 | const char *fw_name; | 53 | const char *fw_name; |
56 | const struct common_firmware_header *hdr; | 54 | const struct common_firmware_header *hdr; |
@@ -84,6 +82,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) | |||
84 | } | 82 | } |
85 | 83 | ||
86 | hdr = (const struct common_firmware_header *)adev->vcn.fw->data; | 84 | hdr = (const struct common_firmware_header *)adev->vcn.fw->data; |
85 | adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); | ||
87 | family_id = le32_to_cpu(hdr->ucode_version) & 0xff; | 86 | family_id = le32_to_cpu(hdr->ucode_version) & 0xff; |
88 | version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; | 87 | version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; |
89 | version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; | 88 | version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; |
@@ -102,24 +101,6 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) | |||
102 | return r; | 101 | return r; |
103 | } | 102 | } |
104 | 103 | ||
105 | ring = &adev->vcn.ring_dec; | ||
106 | rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; | ||
107 | r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec, | ||
108 | rq, NULL); | ||
109 | if (r != 0) { | ||
110 | DRM_ERROR("Failed setting up VCN dec run queue.\n"); | ||
111 | return r; | ||
112 | } | ||
113 | |||
114 | ring = &adev->vcn.ring_enc[0]; | ||
115 | rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; | ||
116 | r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc, | ||
117 | rq, NULL); | ||
118 | if (r != 0) { | ||
119 | DRM_ERROR("Failed setting up VCN enc run queue.\n"); | ||
120 | return r; | ||
121 | } | ||
122 | |||
123 | return 0; | 104 | return 0; |
124 | } | 105 | } |
125 | 106 | ||
@@ -129,10 +110,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) | |||
129 | 110 | ||
130 | kfree(adev->vcn.saved_bo); | 111 | kfree(adev->vcn.saved_bo); |
131 | 112 | ||
132 | drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec); | ||
133 | |||
134 | drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc); | ||
135 | |||
136 | amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo, | 113 | amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo, |
137 | &adev->vcn.gpu_addr, | 114 | &adev->vcn.gpu_addr, |
138 | (void **)&adev->vcn.cpu_addr); | 115 | (void **)&adev->vcn.cpu_addr); |
@@ -278,7 +255,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) | |||
278 | } | 255 | } |
279 | 256 | ||
280 | static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, | 257 | static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, |
281 | struct amdgpu_bo *bo, bool direct, | 258 | struct amdgpu_bo *bo, |
282 | struct dma_fence **fence) | 259 | struct dma_fence **fence) |
283 | { | 260 | { |
284 | struct amdgpu_device *adev = ring->adev; | 261 | struct amdgpu_device *adev = ring->adev; |
@@ -306,19 +283,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, | |||
306 | } | 283 | } |
307 | ib->length_dw = 16; | 284 | ib->length_dw = 16; |
308 | 285 | ||
309 | if (direct) { | 286 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); |
310 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); | 287 | job->fence = dma_fence_get(f); |
311 | job->fence = dma_fence_get(f); | 288 | if (r) |
312 | if (r) | 289 | goto err_free; |
313 | goto err_free; | ||
314 | 290 | ||
315 | amdgpu_job_free(job); | 291 | amdgpu_job_free(job); |
316 | } else { | ||
317 | r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec, | ||
318 | AMDGPU_FENCE_OWNER_UNDEFINED, &f); | ||
319 | if (r) | ||
320 | goto err_free; | ||
321 | } | ||
322 | 292 | ||
323 | amdgpu_bo_fence(bo, f, false); | 293 | amdgpu_bo_fence(bo, f, false); |
324 | amdgpu_bo_unreserve(bo); | 294 | amdgpu_bo_unreserve(bo); |
@@ -370,11 +340,11 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand | |||
370 | for (i = 14; i < 1024; ++i) | 340 | for (i = 14; i < 1024; ++i) |
371 | msg[i] = cpu_to_le32(0x0); | 341 | msg[i] = cpu_to_le32(0x0); |
372 | 342 | ||
373 | return amdgpu_vcn_dec_send_msg(ring, bo, true, fence); | 343 | return amdgpu_vcn_dec_send_msg(ring, bo, fence); |
374 | } | 344 | } |
375 | 345 | ||
376 | static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | 346 | static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, |
377 | bool direct, struct dma_fence **fence) | 347 | struct dma_fence **fence) |
378 | { | 348 | { |
379 | struct amdgpu_device *adev = ring->adev; | 349 | struct amdgpu_device *adev = ring->adev; |
380 | struct amdgpu_bo *bo = NULL; | 350 | struct amdgpu_bo *bo = NULL; |
@@ -396,7 +366,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han | |||
396 | for (i = 6; i < 1024; ++i) | 366 | for (i = 6; i < 1024; ++i) |
397 | msg[i] = cpu_to_le32(0x0); | 367 | msg[i] = cpu_to_le32(0x0); |
398 | 368 | ||
399 | return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence); | 369 | return amdgpu_vcn_dec_send_msg(ring, bo, fence); |
400 | } | 370 | } |
401 | 371 | ||
402 | int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) | 372 | int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
@@ -410,7 +380,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
410 | goto error; | 380 | goto error; |
411 | } | 381 | } |
412 | 382 | ||
413 | r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence); | 383 | r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence); |
414 | if (r) { | 384 | if (r) { |
415 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); | 385 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); |
416 | goto error; | 386 | goto error; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 181e6afa9847..773010b9ff15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | |||
@@ -67,8 +67,6 @@ struct amdgpu_vcn { | |||
67 | struct amdgpu_ring ring_dec; | 67 | struct amdgpu_ring ring_dec; |
68 | struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; | 68 | struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; |
69 | struct amdgpu_irq_src irq; | 69 | struct amdgpu_irq_src irq; |
70 | struct drm_sched_entity entity_dec; | ||
71 | struct drm_sched_entity entity_enc; | ||
72 | unsigned num_enc_rings; | 70 | unsigned num_enc_rings; |
73 | }; | 71 | }; |
74 | 72 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ccba88cc8c54..b0eb2f537392 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -2123,7 +2123,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, | |||
2123 | before->last = saddr - 1; | 2123 | before->last = saddr - 1; |
2124 | before->offset = tmp->offset; | 2124 | before->offset = tmp->offset; |
2125 | before->flags = tmp->flags; | 2125 | before->flags = tmp->flags; |
2126 | list_add(&before->list, &tmp->list); | 2126 | before->bo_va = tmp->bo_va; |
2127 | list_add(&before->list, &tmp->bo_va->invalids); | ||
2127 | } | 2128 | } |
2128 | 2129 | ||
2129 | /* Remember mapping split at the end */ | 2130 | /* Remember mapping split at the end */ |
@@ -2133,7 +2134,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, | |||
2133 | after->offset = tmp->offset; | 2134 | after->offset = tmp->offset; |
2134 | after->offset += after->start - tmp->start; | 2135 | after->offset += after->start - tmp->start; |
2135 | after->flags = tmp->flags; | 2136 | after->flags = tmp->flags; |
2136 | list_add(&after->list, &tmp->list); | 2137 | after->bo_va = tmp->bo_va; |
2138 | list_add(&after->list, &tmp->bo_va->invalids); | ||
2137 | } | 2139 | } |
2138 | 2140 | ||
2139 | list_del(&tmp->list); | 2141 | list_del(&tmp->list); |
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index 60608b3df881..d5ebe566809b 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c | |||
@@ -64,7 +64,7 @@ static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev) | |||
64 | int fb_channel_number; | 64 | int fb_channel_number; |
65 | 65 | ||
66 | fb_channel_number = adev->df_funcs->get_fb_channel_number(adev); | 66 | fb_channel_number = adev->df_funcs->get_fb_channel_number(adev); |
67 | if (fb_channel_number > ARRAY_SIZE(df_v3_6_channel_number)) | 67 | if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number)) |
68 | fb_channel_number = 0; | 68 | fb_channel_number = 0; |
69 | 69 | ||
70 | return df_v3_6_channel_number[fb_channel_number]; | 70 | return df_v3_6_channel_number[fb_channel_number]; |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index d7530fdfaad5..a69153435ea7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | |||
@@ -111,6 +111,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = | |||
111 | 111 | ||
112 | static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = | 112 | static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = |
113 | { | 113 | { |
114 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080), | ||
114 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000), | 115 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000), |
115 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), | 116 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), |
116 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042), | 117 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042), |
@@ -1837,13 +1838,15 @@ static void gfx_v9_1_parse_ind_reg_list(int *register_list_format, | |||
1837 | int indirect_offset, | 1838 | int indirect_offset, |
1838 | int list_size, | 1839 | int list_size, |
1839 | int *unique_indirect_regs, | 1840 | int *unique_indirect_regs, |
1840 | int *unique_indirect_reg_count, | 1841 | int unique_indirect_reg_count, |
1841 | int *indirect_start_offsets, | 1842 | int *indirect_start_offsets, |
1842 | int *indirect_start_offsets_count) | 1843 | int *indirect_start_offsets_count, |
1844 | int max_start_offsets_count) | ||
1843 | { | 1845 | { |
1844 | int idx; | 1846 | int idx; |
1845 | 1847 | ||
1846 | for (; indirect_offset < list_size; indirect_offset++) { | 1848 | for (; indirect_offset < list_size; indirect_offset++) { |
1849 | WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count); | ||
1847 | indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset; | 1850 | indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset; |
1848 | *indirect_start_offsets_count = *indirect_start_offsets_count + 1; | 1851 | *indirect_start_offsets_count = *indirect_start_offsets_count + 1; |
1849 | 1852 | ||
@@ -1851,14 +1854,14 @@ static void gfx_v9_1_parse_ind_reg_list(int *register_list_format, | |||
1851 | indirect_offset += 2; | 1854 | indirect_offset += 2; |
1852 | 1855 | ||
1853 | /* look for the matching indice */ | 1856 | /* look for the matching indice */ |
1854 | for (idx = 0; idx < *unique_indirect_reg_count; idx++) { | 1857 | for (idx = 0; idx < unique_indirect_reg_count; idx++) { |
1855 | if (unique_indirect_regs[idx] == | 1858 | if (unique_indirect_regs[idx] == |
1856 | register_list_format[indirect_offset] || | 1859 | register_list_format[indirect_offset] || |
1857 | !unique_indirect_regs[idx]) | 1860 | !unique_indirect_regs[idx]) |
1858 | break; | 1861 | break; |
1859 | } | 1862 | } |
1860 | 1863 | ||
1861 | BUG_ON(idx >= *unique_indirect_reg_count); | 1864 | BUG_ON(idx >= unique_indirect_reg_count); |
1862 | 1865 | ||
1863 | if (!unique_indirect_regs[idx]) | 1866 | if (!unique_indirect_regs[idx]) |
1864 | unique_indirect_regs[idx] = register_list_format[indirect_offset]; | 1867 | unique_indirect_regs[idx] = register_list_format[indirect_offset]; |
@@ -1893,9 +1896,10 @@ static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev) | |||
1893 | adev->gfx.rlc.reg_list_format_direct_reg_list_length, | 1896 | adev->gfx.rlc.reg_list_format_direct_reg_list_length, |
1894 | adev->gfx.rlc.reg_list_format_size_bytes >> 2, | 1897 | adev->gfx.rlc.reg_list_format_size_bytes >> 2, |
1895 | unique_indirect_regs, | 1898 | unique_indirect_regs, |
1896 | &unique_indirect_reg_count, | 1899 | unique_indirect_reg_count, |
1897 | indirect_start_offsets, | 1900 | indirect_start_offsets, |
1898 | &indirect_start_offsets_count); | 1901 | &indirect_start_offsets_count, |
1902 | ARRAY_SIZE(indirect_start_offsets)); | ||
1899 | 1903 | ||
1900 | /* enable auto inc in case it is disabled */ | 1904 | /* enable auto inc in case it is disabled */ |
1901 | tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); | 1905 | tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); |
@@ -3404,11 +3408,6 @@ static int gfx_v9_0_late_init(void *handle) | |||
3404 | if (r) | 3408 | if (r) |
3405 | return r; | 3409 | return r; |
3406 | 3410 | ||
3407 | r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, | ||
3408 | AMD_PG_STATE_GATE); | ||
3409 | if (r) | ||
3410 | return r; | ||
3411 | |||
3412 | return 0; | 3411 | return 0; |
3413 | } | 3412 | } |
3414 | 3413 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 0c768e388ace..727071fee6f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | |||
@@ -47,6 +47,8 @@ MODULE_FIRMWARE("amdgpu/vega20_asd.bin"); | |||
47 | 47 | ||
48 | #define smnMP1_FIRMWARE_FLAGS 0x3010028 | 48 | #define smnMP1_FIRMWARE_FLAGS 0x3010028 |
49 | 49 | ||
50 | static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554}; | ||
51 | |||
50 | static int | 52 | static int |
51 | psp_v3_1_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type) | 53 | psp_v3_1_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type) |
52 | { | 54 | { |
@@ -210,12 +212,31 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) | |||
210 | return ret; | 212 | return ret; |
211 | } | 213 | } |
212 | 214 | ||
215 | static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver) | ||
216 | { | ||
217 | int i; | ||
218 | |||
219 | if (ver == adev->psp.sos_fw_version) | ||
220 | return true; | ||
221 | |||
222 | /* | ||
223 | * Double check if the latest four legacy versions. | ||
224 | * If yes, it is still the right version. | ||
225 | */ | ||
226 | for (i = 0; i < sizeof(sos_old_versions) / sizeof(uint32_t); i++) { | ||
227 | if (sos_old_versions[i] == adev->psp.sos_fw_version) | ||
228 | return true; | ||
229 | } | ||
230 | |||
231 | return false; | ||
232 | } | ||
233 | |||
213 | static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) | 234 | static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) |
214 | { | 235 | { |
215 | int ret; | 236 | int ret; |
216 | unsigned int psp_gfxdrv_command_reg = 0; | 237 | unsigned int psp_gfxdrv_command_reg = 0; |
217 | struct amdgpu_device *adev = psp->adev; | 238 | struct amdgpu_device *adev = psp->adev; |
218 | uint32_t sol_reg; | 239 | uint32_t sol_reg, ver; |
219 | 240 | ||
220 | /* Check sOS sign of life register to confirm sys driver and sOS | 241 | /* Check sOS sign of life register to confirm sys driver and sOS |
221 | * are already been loaded. | 242 | * are already been loaded. |
@@ -248,6 +269,10 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) | |||
248 | RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), | 269 | RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), |
249 | 0, true); | 270 | 0, true); |
250 | 271 | ||
272 | ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); | ||
273 | if (!psp_v3_1_match_version(adev, ver)) | ||
274 | DRM_WARN("SOS version doesn't match\n"); | ||
275 | |||
251 | return ret; | 276 | return ret; |
252 | } | 277 | } |
253 | 278 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 68b4a22a8892..83f2717fcf81 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c | |||
@@ -685,6 +685,7 @@ static int soc15_common_early_init(void *handle) | |||
685 | AMD_CG_SUPPORT_BIF_MGCG | | 685 | AMD_CG_SUPPORT_BIF_MGCG | |
686 | AMD_CG_SUPPORT_BIF_LS | | 686 | AMD_CG_SUPPORT_BIF_LS | |
687 | AMD_CG_SUPPORT_HDP_MGCG | | 687 | AMD_CG_SUPPORT_HDP_MGCG | |
688 | AMD_CG_SUPPORT_HDP_LS | | ||
688 | AMD_CG_SUPPORT_ROM_MGCG | | 689 | AMD_CG_SUPPORT_ROM_MGCG | |
689 | AMD_CG_SUPPORT_VCE_MGCG | | 690 | AMD_CG_SUPPORT_VCE_MGCG | |
690 | AMD_CG_SUPPORT_UVD_MGCG; | 691 | AMD_CG_SUPPORT_UVD_MGCG; |
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 110b294ebed3..29684c3ea4ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | |||
@@ -769,14 +769,14 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev) | |||
769 | return 0; | 769 | return 0; |
770 | } | 770 | } |
771 | 771 | ||
772 | bool vcn_v1_0_is_idle(void *handle) | 772 | static bool vcn_v1_0_is_idle(void *handle) |
773 | { | 773 | { |
774 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 774 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
775 | 775 | ||
776 | return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2); | 776 | return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2); |
777 | } | 777 | } |
778 | 778 | ||
779 | int vcn_v1_0_wait_for_idle(void *handle) | 779 | static int vcn_v1_0_wait_for_idle(void *handle) |
780 | { | 780 | { |
781 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 781 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
782 | int ret = 0; | 782 | int ret = 0; |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1ce10bc2d37b..d7d1245c1050 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/moduleparam.h> | 46 | #include <linux/moduleparam.h> |
47 | #include <linux/version.h> | 47 | #include <linux/version.h> |
48 | #include <linux/types.h> | 48 | #include <linux/types.h> |
49 | #include <linux/pm_runtime.h> | ||
49 | 50 | ||
50 | #include <drm/drmP.h> | 51 | #include <drm/drmP.h> |
51 | #include <drm/drm_atomic.h> | 52 | #include <drm/drm_atomic.h> |
@@ -2095,12 +2096,6 @@ convert_color_depth_from_display_info(const struct drm_connector *connector) | |||
2095 | { | 2096 | { |
2096 | uint32_t bpc = connector->display_info.bpc; | 2097 | uint32_t bpc = connector->display_info.bpc; |
2097 | 2098 | ||
2098 | /* Limited color depth to 8bit | ||
2099 | * TODO: Still need to handle deep color | ||
2100 | */ | ||
2101 | if (bpc > 8) | ||
2102 | bpc = 8; | ||
2103 | |||
2104 | switch (bpc) { | 2099 | switch (bpc) { |
2105 | case 0: | 2100 | case 0: |
2106 | /* Temporary Work around, DRM don't parse color depth for | 2101 | /* Temporary Work around, DRM don't parse color depth for |
@@ -2316,27 +2311,22 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, | |||
2316 | } | 2311 | } |
2317 | } | 2312 | } |
2318 | 2313 | ||
2319 | static int create_fake_sink(struct amdgpu_dm_connector *aconnector) | 2314 | static struct dc_sink * |
2315 | create_fake_sink(struct amdgpu_dm_connector *aconnector) | ||
2320 | { | 2316 | { |
2321 | struct dc_sink *sink = NULL; | ||
2322 | struct dc_sink_init_data sink_init_data = { 0 }; | 2317 | struct dc_sink_init_data sink_init_data = { 0 }; |
2323 | 2318 | struct dc_sink *sink = NULL; | |
2324 | sink_init_data.link = aconnector->dc_link; | 2319 | sink_init_data.link = aconnector->dc_link; |
2325 | sink_init_data.sink_signal = aconnector->dc_link->connector_signal; | 2320 | sink_init_data.sink_signal = aconnector->dc_link->connector_signal; |
2326 | 2321 | ||
2327 | sink = dc_sink_create(&sink_init_data); | 2322 | sink = dc_sink_create(&sink_init_data); |
2328 | if (!sink) { | 2323 | if (!sink) { |
2329 | DRM_ERROR("Failed to create sink!\n"); | 2324 | DRM_ERROR("Failed to create sink!\n"); |
2330 | return -ENOMEM; | 2325 | return NULL; |
2331 | } | 2326 | } |
2332 | |||
2333 | sink->sink_signal = SIGNAL_TYPE_VIRTUAL; | 2327 | sink->sink_signal = SIGNAL_TYPE_VIRTUAL; |
2334 | aconnector->fake_enable = true; | ||
2335 | |||
2336 | aconnector->dc_sink = sink; | ||
2337 | aconnector->dc_link->local_sink = sink; | ||
2338 | 2328 | ||
2339 | return 0; | 2329 | return sink; |
2340 | } | 2330 | } |
2341 | 2331 | ||
2342 | static void set_multisync_trigger_params( | 2332 | static void set_multisync_trigger_params( |
@@ -2399,7 +2389,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, | |||
2399 | struct dc_stream_state *stream = NULL; | 2389 | struct dc_stream_state *stream = NULL; |
2400 | struct drm_display_mode mode = *drm_mode; | 2390 | struct drm_display_mode mode = *drm_mode; |
2401 | bool native_mode_found = false; | 2391 | bool native_mode_found = false; |
2402 | 2392 | struct dc_sink *sink = NULL; | |
2403 | if (aconnector == NULL) { | 2393 | if (aconnector == NULL) { |
2404 | DRM_ERROR("aconnector is NULL!\n"); | 2394 | DRM_ERROR("aconnector is NULL!\n"); |
2405 | return stream; | 2395 | return stream; |
@@ -2417,15 +2407,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, | |||
2417 | return stream; | 2407 | return stream; |
2418 | } | 2408 | } |
2419 | 2409 | ||
2420 | if (create_fake_sink(aconnector)) | 2410 | sink = create_fake_sink(aconnector); |
2411 | if (!sink) | ||
2421 | return stream; | 2412 | return stream; |
2413 | } else { | ||
2414 | sink = aconnector->dc_sink; | ||
2422 | } | 2415 | } |
2423 | 2416 | ||
2424 | stream = dc_create_stream_for_sink(aconnector->dc_sink); | 2417 | stream = dc_create_stream_for_sink(sink); |
2425 | 2418 | ||
2426 | if (stream == NULL) { | 2419 | if (stream == NULL) { |
2427 | DRM_ERROR("Failed to create stream for sink!\n"); | 2420 | DRM_ERROR("Failed to create stream for sink!\n"); |
2428 | return stream; | 2421 | goto finish; |
2429 | } | 2422 | } |
2430 | 2423 | ||
2431 | list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { | 2424 | list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { |
@@ -2464,12 +2457,15 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, | |||
2464 | fill_audio_info( | 2457 | fill_audio_info( |
2465 | &stream->audio_info, | 2458 | &stream->audio_info, |
2466 | drm_connector, | 2459 | drm_connector, |
2467 | aconnector->dc_sink); | 2460 | sink); |
2468 | 2461 | ||
2469 | update_stream_signal(stream); | 2462 | update_stream_signal(stream); |
2470 | 2463 | ||
2471 | if (dm_state && dm_state->freesync_capable) | 2464 | if (dm_state && dm_state->freesync_capable) |
2472 | stream->ignore_msa_timing_param = true; | 2465 | stream->ignore_msa_timing_param = true; |
2466 | finish: | ||
2467 | if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL) | ||
2468 | dc_sink_release(sink); | ||
2473 | 2469 | ||
2474 | return stream; | 2470 | return stream; |
2475 | } | 2471 | } |
@@ -2714,6 +2710,9 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) | |||
2714 | struct dm_connector_state *state = | 2710 | struct dm_connector_state *state = |
2715 | to_dm_connector_state(connector->state); | 2711 | to_dm_connector_state(connector->state); |
2716 | 2712 | ||
2713 | if (connector->state) | ||
2714 | __drm_atomic_helper_connector_destroy_state(connector->state); | ||
2715 | |||
2717 | kfree(state); | 2716 | kfree(state); |
2718 | 2717 | ||
2719 | state = kzalloc(sizeof(*state), GFP_KERNEL); | 2718 | state = kzalloc(sizeof(*state), GFP_KERNEL); |
@@ -2724,8 +2723,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) | |||
2724 | state->underscan_hborder = 0; | 2723 | state->underscan_hborder = 0; |
2725 | state->underscan_vborder = 0; | 2724 | state->underscan_vborder = 0; |
2726 | 2725 | ||
2727 | connector->state = &state->base; | 2726 | __drm_atomic_helper_connector_reset(connector, &state->base); |
2728 | connector->state->connector = connector; | ||
2729 | } | 2727 | } |
2730 | } | 2728 | } |
2731 | 2729 | ||
@@ -3083,17 +3081,6 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, | |||
3083 | } | 3081 | } |
3084 | } | 3082 | } |
3085 | 3083 | ||
3086 | /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer | ||
3087 | * prepare and cleanup in drm_atomic_helper_prepare_planes | ||
3088 | * and drm_atomic_helper_cleanup_planes because fb doens't in s3. | ||
3089 | * IN 4.10 kernel this code should be removed and amdgpu_device_suspend | ||
3090 | * code touching fram buffers should be avoided for DC. | ||
3091 | */ | ||
3092 | if (plane->type == DRM_PLANE_TYPE_CURSOR) { | ||
3093 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc); | ||
3094 | |||
3095 | acrtc->cursor_bo = obj; | ||
3096 | } | ||
3097 | return 0; | 3084 | return 0; |
3098 | } | 3085 | } |
3099 | 3086 | ||
@@ -4281,6 +4268,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) | |||
4281 | if (dm_old_crtc_state->stream) | 4268 | if (dm_old_crtc_state->stream) |
4282 | remove_stream(adev, acrtc, dm_old_crtc_state->stream); | 4269 | remove_stream(adev, acrtc, dm_old_crtc_state->stream); |
4283 | 4270 | ||
4271 | pm_runtime_get_noresume(dev->dev); | ||
4272 | |||
4284 | acrtc->enabled = true; | 4273 | acrtc->enabled = true; |
4285 | acrtc->hw_mode = new_crtc_state->mode; | 4274 | acrtc->hw_mode = new_crtc_state->mode; |
4286 | crtc->hwmode = new_crtc_state->mode; | 4275 | crtc->hwmode = new_crtc_state->mode; |
@@ -4469,6 +4458,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) | |||
4469 | drm_atomic_helper_wait_for_flip_done(dev, state); | 4458 | drm_atomic_helper_wait_for_flip_done(dev, state); |
4470 | 4459 | ||
4471 | drm_atomic_helper_cleanup_planes(dev, state); | 4460 | drm_atomic_helper_cleanup_planes(dev, state); |
4461 | |||
4462 | /* Finally, drop a runtime PM reference for each newly disabled CRTC, | ||
4463 | * so we can put the GPU into runtime suspend if we're not driving any | ||
4464 | * displays anymore | ||
4465 | */ | ||
4466 | pm_runtime_mark_last_busy(dev->dev); | ||
4467 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { | ||
4468 | if (old_crtc_state->active && !new_crtc_state->active) | ||
4469 | pm_runtime_put_autosuspend(dev->dev); | ||
4470 | } | ||
4472 | } | 4471 | } |
4473 | 4472 | ||
4474 | 4473 | ||
@@ -4768,15 +4767,16 @@ next_crtc: | |||
4768 | * We want to do dc stream updates that do not require a | 4767 | * We want to do dc stream updates that do not require a |
4769 | * full modeset below. | 4768 | * full modeset below. |
4770 | */ | 4769 | */ |
4771 | if (!enable || !aconnector || modereset_required(new_crtc_state)) | 4770 | if (!(enable && aconnector && new_crtc_state->enable && |
4771 | new_crtc_state->active)) | ||
4772 | continue; | 4772 | continue; |
4773 | /* | 4773 | /* |
4774 | * Given above conditions, the dc state cannot be NULL because: | 4774 | * Given above conditions, the dc state cannot be NULL because: |
4775 | * 1. We're attempting to enable a CRTC. Which has a... | 4775 | * 1. We're in the process of enabling CRTCs (just been added |
4776 | * 2. Valid connector attached, and | 4776 | * to the dc context, or already is on the context) |
4777 | * 3. User does not want to reset it (disable or mark inactive, | 4777 | * 2. Has a valid connector attached, and |
4778 | * which can happen on a CRTC that's already disabled). | 4778 | * 3. Is currently active and enabled. |
4779 | * => It currently exists. | 4779 | * => The dc stream state currently exists. |
4780 | */ | 4780 | */ |
4781 | BUG_ON(dm_new_crtc_state->stream == NULL); | 4781 | BUG_ON(dm_new_crtc_state->stream == NULL); |
4782 | 4782 | ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 4be21bf54749..a910f01838ab 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c | |||
@@ -555,6 +555,9 @@ static inline int dm_irq_state(struct amdgpu_device *adev, | |||
555 | return 0; | 555 | return 0; |
556 | } | 556 | } |
557 | 557 | ||
558 | if (acrtc->otg_inst == -1) | ||
559 | return 0; | ||
560 | |||
558 | irq_source = dal_irq_type + acrtc->otg_inst; | 561 | irq_source = dal_irq_type + acrtc->otg_inst; |
559 | 562 | ||
560 | st = (state == AMDGPU_IRQ_STATE_ENABLE); | 563 | st = (state == AMDGPU_IRQ_STATE_ENABLE); |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index 0229c7edb8ad..5a3346124a01 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | |||
@@ -234,6 +234,33 @@ static void pp_to_dc_clock_levels( | |||
234 | } | 234 | } |
235 | } | 235 | } |
236 | 236 | ||
237 | static void pp_to_dc_clock_levels_with_latency( | ||
238 | const struct pp_clock_levels_with_latency *pp_clks, | ||
239 | struct dm_pp_clock_levels_with_latency *clk_level_info, | ||
240 | enum dm_pp_clock_type dc_clk_type) | ||
241 | { | ||
242 | uint32_t i; | ||
243 | |||
244 | if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) { | ||
245 | DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", | ||
246 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), | ||
247 | pp_clks->num_levels, | ||
248 | DM_PP_MAX_CLOCK_LEVELS); | ||
249 | |||
250 | clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS; | ||
251 | } else | ||
252 | clk_level_info->num_levels = pp_clks->num_levels; | ||
253 | |||
254 | DRM_DEBUG("DM_PPLIB: values for %s clock\n", | ||
255 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); | ||
256 | |||
257 | for (i = 0; i < clk_level_info->num_levels; i++) { | ||
258 | DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz); | ||
259 | clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; | ||
260 | clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us; | ||
261 | } | ||
262 | } | ||
263 | |||
237 | bool dm_pp_get_clock_levels_by_type( | 264 | bool dm_pp_get_clock_levels_by_type( |
238 | const struct dc_context *ctx, | 265 | const struct dc_context *ctx, |
239 | enum dm_pp_clock_type clk_type, | 266 | enum dm_pp_clock_type clk_type, |
@@ -311,8 +338,22 @@ bool dm_pp_get_clock_levels_by_type_with_latency( | |||
311 | enum dm_pp_clock_type clk_type, | 338 | enum dm_pp_clock_type clk_type, |
312 | struct dm_pp_clock_levels_with_latency *clk_level_info) | 339 | struct dm_pp_clock_levels_with_latency *clk_level_info) |
313 | { | 340 | { |
314 | /* TODO: to be implemented */ | 341 | struct amdgpu_device *adev = ctx->driver_context; |
315 | return false; | 342 | void *pp_handle = adev->powerplay.pp_handle; |
343 | struct pp_clock_levels_with_latency pp_clks = { 0 }; | ||
344 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; | ||
345 | |||
346 | if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency) | ||
347 | return false; | ||
348 | |||
349 | if (pp_funcs->get_clock_by_type_with_latency(pp_handle, | ||
350 | dc_to_pp_clock_type(clk_type), | ||
351 | &pp_clks)) | ||
352 | return false; | ||
353 | |||
354 | pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type); | ||
355 | |||
356 | return true; | ||
316 | } | 357 | } |
317 | 358 | ||
318 | bool dm_pp_get_clock_levels_by_type_with_voltage( | 359 | bool dm_pp_get_clock_levels_by_type_with_voltage( |
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c index e61dd97d0928..f28989860fd8 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c +++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c | |||
@@ -449,6 +449,11 @@ static inline unsigned int clamp_ux_dy( | |||
449 | return min_clamp; | 449 | return min_clamp; |
450 | } | 450 | } |
451 | 451 | ||
452 | unsigned int dc_fixpt_u3d19(struct fixed31_32 arg) | ||
453 | { | ||
454 | return ux_dy(arg.value, 3, 19); | ||
455 | } | ||
456 | |||
452 | unsigned int dc_fixpt_u2d19(struct fixed31_32 arg) | 457 | unsigned int dc_fixpt_u2d19(struct fixed31_32 arg) |
453 | { | 458 | { |
454 | return ux_dy(arg.value, 2, 19); | 459 | return ux_dy(arg.value, 2, 19); |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 7d609c71394b..7857cb42b3e6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | |||
@@ -1630,17 +1630,42 @@ static enum dc_status read_hpd_rx_irq_data( | |||
1630 | struct dc_link *link, | 1630 | struct dc_link *link, |
1631 | union hpd_irq_data *irq_data) | 1631 | union hpd_irq_data *irq_data) |
1632 | { | 1632 | { |
1633 | static enum dc_status retval; | ||
1634 | |||
1633 | /* The HW reads 16 bytes from 200h on HPD, | 1635 | /* The HW reads 16 bytes from 200h on HPD, |
1634 | * but if we get an AUX_DEFER, the HW cannot retry | 1636 | * but if we get an AUX_DEFER, the HW cannot retry |
1635 | * and this causes the CTS tests 4.3.2.1 - 3.2.4 to | 1637 | * and this causes the CTS tests 4.3.2.1 - 3.2.4 to |
1636 | * fail, so we now explicitly read 6 bytes which is | 1638 | * fail, so we now explicitly read 6 bytes which is |
1637 | * the req from the above mentioned test cases. | 1639 | * the req from the above mentioned test cases. |
1640 | * | ||
1641 | * For DP 1.4 we need to read those from 2002h range. | ||
1638 | */ | 1642 | */ |
1639 | return core_link_read_dpcd( | 1643 | if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14) |
1640 | link, | 1644 | retval = core_link_read_dpcd( |
1641 | DP_SINK_COUNT, | 1645 | link, |
1642 | irq_data->raw, | 1646 | DP_SINK_COUNT, |
1643 | sizeof(union hpd_irq_data)); | 1647 | irq_data->raw, |
1648 | sizeof(union hpd_irq_data)); | ||
1649 | else { | ||
1650 | /* Read 2 bytes at this location,... */ | ||
1651 | retval = core_link_read_dpcd( | ||
1652 | link, | ||
1653 | DP_SINK_COUNT_ESI, | ||
1654 | irq_data->raw, | ||
1655 | 2); | ||
1656 | |||
1657 | if (retval != DC_OK) | ||
1658 | return retval; | ||
1659 | |||
1660 | /* ... then read remaining 4 at the other location */ | ||
1661 | retval = core_link_read_dpcd( | ||
1662 | link, | ||
1663 | DP_LANE0_1_STATUS_ESI, | ||
1664 | &irq_data->raw[2], | ||
1665 | 4); | ||
1666 | } | ||
1667 | |||
1668 | return retval; | ||
1644 | } | 1669 | } |
1645 | 1670 | ||
1646 | static bool allow_hpd_rx_irq(const struct dc_link *link) | 1671 | static bool allow_hpd_rx_irq(const struct dc_link *link) |
@@ -2278,7 +2303,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, | |||
2278 | 2303 | ||
2279 | static bool retrieve_link_cap(struct dc_link *link) | 2304 | static bool retrieve_link_cap(struct dc_link *link) |
2280 | { | 2305 | { |
2281 | uint8_t dpcd_data[DP_TRAINING_AUX_RD_INTERVAL - DP_DPCD_REV + 1]; | 2306 | uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1]; |
2282 | 2307 | ||
2283 | union down_stream_port_count down_strm_port_count; | 2308 | union down_stream_port_count down_strm_port_count; |
2284 | union edp_configuration_cap edp_config_cap; | 2309 | union edp_configuration_cap edp_config_cap; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 0a6d483dc046..c0e813c7ddd4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c | |||
@@ -72,7 +72,8 @@ static void dce110_update_generic_info_packet( | |||
72 | uint32_t max_retries = 50; | 72 | uint32_t max_retries = 50; |
73 | 73 | ||
74 | /*we need turn on clock before programming AFMT block*/ | 74 | /*we need turn on clock before programming AFMT block*/ |
75 | REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); | 75 | if (REG(AFMT_CNTL)) |
76 | REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); | ||
76 | 77 | ||
77 | if (REG(AFMT_VBI_PACKET_CONTROL1)) { | 78 | if (REG(AFMT_VBI_PACKET_CONTROL1)) { |
78 | if (packet_index >= 8) | 79 | if (packet_index >= 8) |
@@ -719,7 +720,8 @@ static void dce110_stream_encoder_update_hdmi_info_packets( | |||
719 | const uint32_t *content = | 720 | const uint32_t *content = |
720 | (const uint32_t *) &info_frame->avi.sb[0]; | 721 | (const uint32_t *) &info_frame->avi.sb[0]; |
721 | /*we need turn on clock before programming AFMT block*/ | 722 | /*we need turn on clock before programming AFMT block*/ |
722 | REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); | 723 | if (REG(AFMT_CNTL)) |
724 | REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); | ||
723 | 725 | ||
724 | REG_WRITE(AFMT_AVI_INFO0, content[0]); | 726 | REG_WRITE(AFMT_AVI_INFO0, content[0]); |
725 | 727 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c index 9150d2694450..e2994d337044 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c | |||
@@ -121,10 +121,10 @@ static void reset_lb_on_vblank(struct dc_context *ctx) | |||
121 | frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT); | 121 | frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT); |
122 | 122 | ||
123 | 123 | ||
124 | for (retry = 100; retry > 0; retry--) { | 124 | for (retry = 10000; retry > 0; retry--) { |
125 | if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT)) | 125 | if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT)) |
126 | break; | 126 | break; |
127 | msleep(1); | 127 | udelay(10); |
128 | } | 128 | } |
129 | if (!retry) | 129 | if (!retry) |
130 | dm_error("Frame count did not increase for 100ms.\n"); | 130 | dm_error("Frame count did not increase for 100ms.\n"); |
@@ -147,14 +147,14 @@ static void wait_for_fbc_state_changed( | |||
147 | uint32_t addr = mmFBC_STATUS; | 147 | uint32_t addr = mmFBC_STATUS; |
148 | uint32_t value; | 148 | uint32_t value; |
149 | 149 | ||
150 | while (counter < 10) { | 150 | while (counter < 1000) { |
151 | value = dm_read_reg(cp110->base.ctx, addr); | 151 | value = dm_read_reg(cp110->base.ctx, addr); |
152 | if (get_reg_field_value( | 152 | if (get_reg_field_value( |
153 | value, | 153 | value, |
154 | FBC_STATUS, | 154 | FBC_STATUS, |
155 | FBC_ENABLE_STATUS) == enabled) | 155 | FBC_ENABLE_STATUS) == enabled) |
156 | break; | 156 | break; |
157 | msleep(10); | 157 | udelay(100); |
158 | counter++; | 158 | counter++; |
159 | } | 159 | } |
160 | 160 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index a92fb0aa2ff3..c29052b6da5a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | |||
@@ -1004,9 +1004,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option) | |||
1004 | /*don't free audio if it is from retrain or internal disable stream*/ | 1004 | /*don't free audio if it is from retrain or internal disable stream*/ |
1005 | if (option == FREE_ACQUIRED_RESOURCE && dc->caps.dynamic_audio == true) { | 1005 | if (option == FREE_ACQUIRED_RESOURCE && dc->caps.dynamic_audio == true) { |
1006 | /*we have to dynamic arbitrate the audio endpoints*/ | 1006 | /*we have to dynamic arbitrate the audio endpoints*/ |
1007 | pipe_ctx->stream_res.audio = NULL; | ||
1008 | /*we free the resource, need reset is_audio_acquired*/ | 1007 | /*we free the resource, need reset is_audio_acquired*/ |
1009 | update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); | 1008 | update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); |
1009 | pipe_ctx->stream_res.audio = NULL; | ||
1010 | } | 1010 | } |
1011 | 1011 | ||
1012 | /* TODO: notify audio driver for if audio modes list changed | 1012 | /* TODO: notify audio driver for if audio modes list changed |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 46a35c7f01df..c69fa4bfab0a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | |||
@@ -132,8 +132,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp) | |||
132 | 132 | ||
133 | #define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) | 133 | #define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) |
134 | 134 | ||
135 | 135 | static bool dpp_get_optimal_number_of_taps( | |
136 | bool dpp_get_optimal_number_of_taps( | ||
137 | struct dpp *dpp, | 136 | struct dpp *dpp, |
138 | struct scaler_data *scl_data, | 137 | struct scaler_data *scl_data, |
139 | const struct scaling_taps *in_taps) | 138 | const struct scaling_taps *in_taps) |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index 5944a3ba0409..e862cafa6501 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h | |||
@@ -1424,12 +1424,8 @@ void dpp1_set_degamma( | |||
1424 | enum ipp_degamma_mode mode); | 1424 | enum ipp_degamma_mode mode); |
1425 | 1425 | ||
1426 | void dpp1_set_degamma_pwl(struct dpp *dpp_base, | 1426 | void dpp1_set_degamma_pwl(struct dpp *dpp_base, |
1427 | const struct pwl_params *params); | 1427 | const struct pwl_params *params); |
1428 | 1428 | ||
1429 | bool dpp_get_optimal_number_of_taps( | ||
1430 | struct dpp *dpp, | ||
1431 | struct scaler_data *scl_data, | ||
1432 | const struct scaling_taps *in_taps); | ||
1433 | 1429 | ||
1434 | void dpp_read_state(struct dpp *dpp_base, | 1430 | void dpp_read_state(struct dpp *dpp_base, |
1435 | struct dcn_dpp_state *s); | 1431 | struct dcn_dpp_state *s); |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index 4ddd6273d5a5..f862fd148cca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c | |||
@@ -565,16 +565,16 @@ static void dpp1_dscl_set_manual_ratio_init( | |||
565 | uint32_t init_int = 0; | 565 | uint32_t init_int = 0; |
566 | 566 | ||
567 | REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0, | 567 | REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0, |
568 | SCL_H_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.horz) << 5); | 568 | SCL_H_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.horz) << 5); |
569 | 569 | ||
570 | REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0, | 570 | REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0, |
571 | SCL_V_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.vert) << 5); | 571 | SCL_V_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.vert) << 5); |
572 | 572 | ||
573 | REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0, | 573 | REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0, |
574 | SCL_H_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.horz_c) << 5); | 574 | SCL_H_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.horz_c) << 5); |
575 | 575 | ||
576 | REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0, | 576 | REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0, |
577 | SCL_V_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.vert_c) << 5); | 577 | SCL_V_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.vert_c) << 5); |
578 | 578 | ||
579 | /* | 579 | /* |
580 | * 0.24 format for fraction, first five bits zeroed | 580 | * 0.24 format for fraction, first five bits zeroed |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index d2ab78b35a7a..c28085be39ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | |||
@@ -396,11 +396,15 @@ bool hubp1_program_surface_flip_and_addr( | |||
396 | if (address->grph_stereo.right_addr.quad_part == 0) | 396 | if (address->grph_stereo.right_addr.quad_part == 0) |
397 | break; | 397 | break; |
398 | 398 | ||
399 | REG_UPDATE_4(DCSURF_SURFACE_CONTROL, | 399 | REG_UPDATE_8(DCSURF_SURFACE_CONTROL, |
400 | PRIMARY_SURFACE_TMZ, address->tmz_surface, | 400 | PRIMARY_SURFACE_TMZ, address->tmz_surface, |
401 | PRIMARY_SURFACE_TMZ_C, address->tmz_surface, | 401 | PRIMARY_SURFACE_TMZ_C, address->tmz_surface, |
402 | PRIMARY_META_SURFACE_TMZ, address->tmz_surface, | 402 | PRIMARY_META_SURFACE_TMZ, address->tmz_surface, |
403 | PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface); | 403 | PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface, |
404 | SECONDARY_SURFACE_TMZ, address->tmz_surface, | ||
405 | SECONDARY_SURFACE_TMZ_C, address->tmz_surface, | ||
406 | SECONDARY_META_SURFACE_TMZ, address->tmz_surface, | ||
407 | SECONDARY_META_SURFACE_TMZ_C, address->tmz_surface); | ||
404 | 408 | ||
405 | if (address->grph_stereo.right_meta_addr.quad_part != 0) { | 409 | if (address->grph_stereo.right_meta_addr.quad_part != 0) { |
406 | 410 | ||
@@ -459,9 +463,11 @@ void hubp1_dcc_control(struct hubp *hubp, bool enable, | |||
459 | uint32_t dcc_ind_64b_blk = independent_64b_blks ? 1 : 0; | 463 | uint32_t dcc_ind_64b_blk = independent_64b_blks ? 1 : 0; |
460 | struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); | 464 | struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); |
461 | 465 | ||
462 | REG_UPDATE_2(DCSURF_SURFACE_CONTROL, | 466 | REG_UPDATE_4(DCSURF_SURFACE_CONTROL, |
463 | PRIMARY_SURFACE_DCC_EN, dcc_en, | 467 | PRIMARY_SURFACE_DCC_EN, dcc_en, |
464 | PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk); | 468 | PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk, |
469 | SECONDARY_SURFACE_DCC_EN, dcc_en, | ||
470 | SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk); | ||
465 | } | 471 | } |
466 | 472 | ||
467 | void hubp1_program_surface_config( | 473 | void hubp1_program_surface_config( |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index af384034398f..d901d5092969 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | |||
@@ -312,6 +312,12 @@ | |||
312 | HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_META_SURFACE_TMZ_C, mask_sh),\ | 312 | HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_META_SURFACE_TMZ_C, mask_sh),\ |
313 | HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\ | 313 | HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\ |
314 | HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\ | 314 | HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\ |
315 | HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ, mask_sh),\ | ||
316 | HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ_C, mask_sh),\ | ||
317 | HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ, mask_sh),\ | ||
318 | HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ_C, mask_sh),\ | ||
319 | HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\ | ||
320 | HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\ | ||
315 | HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\ | 321 | HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\ |
316 | HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\ | 322 | HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\ |
317 | HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\ | 323 | HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\ |
@@ -489,6 +495,8 @@ | |||
489 | type SECONDARY_META_SURFACE_TMZ_C;\ | 495 | type SECONDARY_META_SURFACE_TMZ_C;\ |
490 | type PRIMARY_SURFACE_DCC_EN;\ | 496 | type PRIMARY_SURFACE_DCC_EN;\ |
491 | type PRIMARY_SURFACE_DCC_IND_64B_BLK;\ | 497 | type PRIMARY_SURFACE_DCC_IND_64B_BLK;\ |
498 | type SECONDARY_SURFACE_DCC_EN;\ | ||
499 | type SECONDARY_SURFACE_DCC_IND_64B_BLK;\ | ||
492 | type DET_BUF_PLANE1_BASE_ADDRESS;\ | 500 | type DET_BUF_PLANE1_BASE_ADDRESS;\ |
493 | type CROSSBAR_SRC_CB_B;\ | 501 | type CROSSBAR_SRC_CB_B;\ |
494 | type CROSSBAR_SRC_CR_R;\ | 502 | type CROSSBAR_SRC_CR_R;\ |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 653b7b2efe2e..c928ee4cd382 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c | |||
@@ -319,6 +319,10 @@ void enc1_stream_encoder_dp_set_stream_attribute( | |||
319 | REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, | 319 | REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, |
320 | DP_COMPONENT_PIXEL_DEPTH_12BPC); | 320 | DP_COMPONENT_PIXEL_DEPTH_12BPC); |
321 | break; | 321 | break; |
322 | case COLOR_DEPTH_161616: | ||
323 | REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, | ||
324 | DP_COMPONENT_PIXEL_DEPTH_16BPC); | ||
325 | break; | ||
322 | default: | 326 | default: |
323 | REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, | 327 | REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, |
324 | DP_COMPONENT_PIXEL_DEPTH_6BPC); | 328 | DP_COMPONENT_PIXEL_DEPTH_6BPC); |
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index bb0d4ebba9f0..a981b3e99ab3 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h | |||
@@ -496,6 +496,8 @@ static inline int dc_fixpt_ceil(struct fixed31_32 arg) | |||
496 | * fractional | 496 | * fractional |
497 | */ | 497 | */ |
498 | 498 | ||
499 | unsigned int dc_fixpt_u3d19(struct fixed31_32 arg); | ||
500 | |||
499 | unsigned int dc_fixpt_u2d19(struct fixed31_32 arg); | 501 | unsigned int dc_fixpt_u2d19(struct fixed31_32 arg); |
500 | 502 | ||
501 | unsigned int dc_fixpt_u0d19(struct fixed31_32 arg); | 503 | unsigned int dc_fixpt_u0d19(struct fixed31_32 arg); |
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h index 88f7c69df6b9..06fac509e987 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h | |||
@@ -36,13 +36,13 @@ | |||
36 | /* DF_CS_AON0_DramBaseAddress0 */ | 36 | /* DF_CS_AON0_DramBaseAddress0 */ |
37 | #define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0 | 37 | #define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0 |
38 | #define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1 | 38 | #define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1 |
39 | #define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4 | 39 | #define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x2 |
40 | #define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8 | 40 | #define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x9 |
41 | #define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc | 41 | #define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc |
42 | #define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L | 42 | #define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L |
43 | #define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L | 43 | #define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L |
44 | #define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L | 44 | #define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x0000003CL |
45 | #define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L | 45 | #define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000E00L |
46 | #define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L | 46 | #define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L |
47 | 47 | ||
48 | #endif | 48 | #endif |
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index c6c1666ac120..092d800b703a 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h | |||
@@ -2026,17 +2026,15 @@ enum atom_smu11_syspll_id { | |||
2026 | SMU11_SYSPLL3_1_ID = 6, | 2026 | SMU11_SYSPLL3_1_ID = 6, |
2027 | }; | 2027 | }; |
2028 | 2028 | ||
2029 | |||
2030 | enum atom_smu11_syspll0_clock_id { | 2029 | enum atom_smu11_syspll0_clock_id { |
2031 | SMU11_SYSPLL0_SOCCLK_ID = 0, // SOCCLK | 2030 | SMU11_SYSPLL0_ECLK_ID = 0, // ECLK |
2032 | SMU11_SYSPLL0_MP0CLK_ID = 1, // MP0CLK | 2031 | SMU11_SYSPLL0_SOCCLK_ID = 1, // SOCCLK |
2033 | SMU11_SYSPLL0_DCLK_ID = 2, // DCLK | 2032 | SMU11_SYSPLL0_MP0CLK_ID = 2, // MP0CLK |
2034 | SMU11_SYSPLL0_VCLK_ID = 3, // VCLK | 2033 | SMU11_SYSPLL0_DCLK_ID = 3, // DCLK |
2035 | SMU11_SYSPLL0_ECLK_ID = 4, // ECLK | 2034 | SMU11_SYSPLL0_VCLK_ID = 4, // VCLK |
2036 | SMU11_SYSPLL0_DCEFCLK_ID = 5, // DCEFCLK | 2035 | SMU11_SYSPLL0_DCEFCLK_ID = 5, // DCEFCLK |
2037 | }; | 2036 | }; |
2038 | 2037 | ||
2039 | |||
2040 | enum atom_smu11_syspll1_0_clock_id { | 2038 | enum atom_smu11_syspll1_0_clock_id { |
2041 | SMU11_SYSPLL1_0_UCLKA_ID = 0, // UCLK_a | 2039 | SMU11_SYSPLL1_0_UCLKA_ID = 0, // UCLK_a |
2042 | }; | 2040 | }; |
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index b493369e6d0f..d567be49c31b 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c | |||
@@ -180,7 +180,6 @@ static int pp_late_init(void *handle) | |||
180 | { | 180 | { |
181 | struct amdgpu_device *adev = handle; | 181 | struct amdgpu_device *adev = handle; |
182 | struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; | 182 | struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; |
183 | int ret; | ||
184 | 183 | ||
185 | if (hwmgr && hwmgr->pm_en) { | 184 | if (hwmgr && hwmgr->pm_en) { |
186 | mutex_lock(&hwmgr->smu_lock); | 185 | mutex_lock(&hwmgr->smu_lock); |
@@ -191,13 +190,6 @@ static int pp_late_init(void *handle) | |||
191 | if (adev->pm.smu_prv_buffer_size != 0) | 190 | if (adev->pm.smu_prv_buffer_size != 0) |
192 | pp_reserve_vram_for_smu(adev); | 191 | pp_reserve_vram_for_smu(adev); |
193 | 192 | ||
194 | if (hwmgr->hwmgr_func->gfx_off_control && | ||
195 | (hwmgr->feature_mask & PP_GFXOFF_MASK)) { | ||
196 | ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr, true); | ||
197 | if (ret) | ||
198 | pr_err("gfx off enabling failed!\n"); | ||
199 | } | ||
200 | |||
201 | return 0; | 193 | return 0; |
202 | } | 194 | } |
203 | 195 | ||
@@ -245,7 +237,7 @@ static int pp_set_powergating_state(void *handle, | |||
245 | } | 237 | } |
246 | 238 | ||
247 | if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) { | 239 | if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) { |
248 | pr_info("%s was not implemented.\n", __func__); | 240 | pr_debug("%s was not implemented.\n", __func__); |
249 | return 0; | 241 | return 0; |
250 | } | 242 | } |
251 | 243 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index 0af13c154328..323990b77ead 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | |||
@@ -265,19 +265,18 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, | |||
265 | if (skip) | 265 | if (skip) |
266 | return 0; | 266 | return 0; |
267 | 267 | ||
268 | if (!hwmgr->ps) | ||
269 | /* | ||
270 | * for vega12/vega20 which does not support power state manager | ||
271 | * DAL clock limits should also be honoured | ||
272 | */ | ||
273 | phm_apply_clock_adjust_rules(hwmgr); | ||
274 | |||
275 | phm_pre_display_configuration_changed(hwmgr); | 268 | phm_pre_display_configuration_changed(hwmgr); |
276 | 269 | ||
277 | phm_display_configuration_changed(hwmgr); | 270 | phm_display_configuration_changed(hwmgr); |
278 | 271 | ||
279 | if (hwmgr->ps) | 272 | if (hwmgr->ps) |
280 | power_state_management(hwmgr, new_ps); | 273 | power_state_management(hwmgr, new_ps); |
274 | else | ||
275 | /* | ||
276 | * for vega12/vega20 which does not support power state manager | ||
277 | * DAL clock limits should also be honoured | ||
278 | */ | ||
279 | phm_apply_clock_adjust_rules(hwmgr); | ||
281 | 280 | ||
282 | phm_notify_smc_display_config_after_ps_adjustment(hwmgr); | 281 | phm_notify_smc_display_config_after_ps_adjustment(hwmgr); |
283 | 282 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index c97b0e5ba43b..5325661fedff 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c | |||
@@ -496,7 +496,9 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI | |||
496 | uint32_t ix; | 496 | uint32_t ix; |
497 | 497 | ||
498 | parameters.clk_id = id; | 498 | parameters.clk_id = id; |
499 | parameters.syspll_id = 0; | ||
499 | parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; | 500 | parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; |
501 | parameters.dfsdid = 0; | ||
500 | 502 | ||
501 | ix = GetIndexIntoMasterCmdTable(getsmuclockinfo); | 503 | ix = GetIndexIntoMasterCmdTable(getsmuclockinfo); |
502 | 504 | ||
@@ -505,7 +507,7 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI | |||
505 | return -EINVAL; | 507 | return -EINVAL; |
506 | 508 | ||
507 | output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)¶meters; | 509 | output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)¶meters; |
508 | *frequency = output->atom_smu_outputclkfreq.smu_clock_freq_hz / 10000; | 510 | *frequency = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; |
509 | 511 | ||
510 | return 0; | 512 | return 0; |
511 | } | 513 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index f0d48b183d22..35bd9870ab10 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c | |||
@@ -870,12 +870,6 @@ static int init_over_drive_limits( | |||
870 | hwmgr->platform_descriptor.maxOverdriveVDDC = 0; | 870 | hwmgr->platform_descriptor.maxOverdriveVDDC = 0; |
871 | hwmgr->platform_descriptor.overdriveVDDCStep = 0; | 871 | hwmgr->platform_descriptor.overdriveVDDCStep = 0; |
872 | 872 | ||
873 | if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 \ | ||
874 | || hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) { | ||
875 | hwmgr->od_enabled = false; | ||
876 | pr_debug("OverDrive feature not support by VBIOS\n"); | ||
877 | } | ||
878 | |||
879 | return 0; | 873 | return 0; |
880 | } | 874 | } |
881 | 875 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index ce64dfabd34b..925e17104f90 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c | |||
@@ -1074,12 +1074,6 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr, | |||
1074 | powerplay_table, | 1074 | powerplay_table, |
1075 | (const ATOM_FIRMWARE_INFO_V2_1 *)fw_info); | 1075 | (const ATOM_FIRMWARE_INFO_V2_1 *)fw_info); |
1076 | 1076 | ||
1077 | if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 | ||
1078 | && hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) { | ||
1079 | hwmgr->od_enabled = false; | ||
1080 | pr_debug("OverDrive feature not support by VBIOS\n"); | ||
1081 | } | ||
1082 | |||
1083 | return result; | 1077 | return result; |
1084 | } | 1078 | } |
1085 | 1079 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 85f84f4d8be5..d4bc83e81389 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | |||
@@ -53,8 +53,37 @@ static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic; | |||
53 | 53 | ||
54 | 54 | ||
55 | static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, | 55 | static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, |
56 | struct pp_display_clock_request *clock_req); | 56 | struct pp_display_clock_request *clock_req) |
57 | { | ||
58 | struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); | ||
59 | enum amd_pp_clock_type clk_type = clock_req->clock_type; | ||
60 | uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; | ||
61 | PPSMC_Msg msg; | ||
57 | 62 | ||
63 | switch (clk_type) { | ||
64 | case amd_pp_dcf_clock: | ||
65 | if (clk_freq == smu10_data->dcf_actual_hard_min_freq) | ||
66 | return 0; | ||
67 | msg = PPSMC_MSG_SetHardMinDcefclkByFreq; | ||
68 | smu10_data->dcf_actual_hard_min_freq = clk_freq; | ||
69 | break; | ||
70 | case amd_pp_soc_clock: | ||
71 | msg = PPSMC_MSG_SetHardMinSocclkByFreq; | ||
72 | break; | ||
73 | case amd_pp_f_clock: | ||
74 | if (clk_freq == smu10_data->f_actual_hard_min_freq) | ||
75 | return 0; | ||
76 | smu10_data->f_actual_hard_min_freq = clk_freq; | ||
77 | msg = PPSMC_MSG_SetHardMinFclkByFreq; | ||
78 | break; | ||
79 | default: | ||
80 | pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); | ||
81 | return -EINVAL; | ||
82 | } | ||
83 | smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq); | ||
84 | |||
85 | return 0; | ||
86 | } | ||
58 | 87 | ||
59 | static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps) | 88 | static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps) |
60 | { | 89 | { |
@@ -284,7 +313,7 @@ static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr) | |||
284 | 313 | ||
285 | static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) | 314 | static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) |
286 | { | 315 | { |
287 | return smu10_disable_gfx_off(hwmgr); | 316 | return 0; |
288 | } | 317 | } |
289 | 318 | ||
290 | static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr) | 319 | static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr) |
@@ -299,7 +328,7 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr) | |||
299 | 328 | ||
300 | static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) | 329 | static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) |
301 | { | 330 | { |
302 | return smu10_enable_gfx_off(hwmgr); | 331 | return 0; |
303 | } | 332 | } |
304 | 333 | ||
305 | static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable) | 334 | static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable) |
@@ -1000,6 +1029,12 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, | |||
1000 | case amd_pp_soc_clock: | 1029 | case amd_pp_soc_clock: |
1001 | pclk_vol_table = pinfo->vdd_dep_on_socclk; | 1030 | pclk_vol_table = pinfo->vdd_dep_on_socclk; |
1002 | break; | 1031 | break; |
1032 | case amd_pp_disp_clock: | ||
1033 | pclk_vol_table = pinfo->vdd_dep_on_dispclk; | ||
1034 | break; | ||
1035 | case amd_pp_phy_clock: | ||
1036 | pclk_vol_table = pinfo->vdd_dep_on_phyclk; | ||
1037 | break; | ||
1003 | default: | 1038 | default: |
1004 | return -EINVAL; | 1039 | return -EINVAL; |
1005 | } | 1040 | } |
@@ -1017,39 +1052,7 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, | |||
1017 | return 0; | 1052 | return 0; |
1018 | } | 1053 | } |
1019 | 1054 | ||
1020 | static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, | ||
1021 | struct pp_display_clock_request *clock_req) | ||
1022 | { | ||
1023 | struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); | ||
1024 | enum amd_pp_clock_type clk_type = clock_req->clock_type; | ||
1025 | uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; | ||
1026 | PPSMC_Msg msg; | ||
1027 | 1055 | ||
1028 | switch (clk_type) { | ||
1029 | case amd_pp_dcf_clock: | ||
1030 | if (clk_freq == smu10_data->dcf_actual_hard_min_freq) | ||
1031 | return 0; | ||
1032 | msg = PPSMC_MSG_SetHardMinDcefclkByFreq; | ||
1033 | smu10_data->dcf_actual_hard_min_freq = clk_freq; | ||
1034 | break; | ||
1035 | case amd_pp_soc_clock: | ||
1036 | msg = PPSMC_MSG_SetHardMinSocclkByFreq; | ||
1037 | break; | ||
1038 | case amd_pp_f_clock: | ||
1039 | if (clk_freq == smu10_data->f_actual_hard_min_freq) | ||
1040 | return 0; | ||
1041 | smu10_data->f_actual_hard_min_freq = clk_freq; | ||
1042 | msg = PPSMC_MSG_SetHardMinFclkByFreq; | ||
1043 | break; | ||
1044 | default: | ||
1045 | pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); | ||
1046 | return -EINVAL; | ||
1047 | } | ||
1048 | |||
1049 | smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq); | ||
1050 | |||
1051 | return 0; | ||
1052 | } | ||
1053 | 1056 | ||
1054 | static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) | 1057 | static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) |
1055 | { | 1058 | { |
@@ -1182,6 +1185,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { | |||
1182 | .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu, | 1185 | .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu, |
1183 | .smus_notify_pwe = smu10_smus_notify_pwe, | 1186 | .smus_notify_pwe = smu10_smus_notify_pwe, |
1184 | .gfx_off_control = smu10_gfx_off_control, | 1187 | .gfx_off_control = smu10_gfx_off_control, |
1188 | .display_clock_voltage_request = smu10_display_clock_voltage_request, | ||
1185 | }; | 1189 | }; |
1186 | 1190 | ||
1187 | int smu10_init_function_pointers(struct pp_hwmgr *hwmgr) | 1191 | int smu10_init_function_pointers(struct pp_hwmgr *hwmgr) |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 45e9b8cb169d..f8e866ceda02 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
@@ -791,7 +791,8 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) | |||
791 | data->dpm_table.sclk_table.count++; | 791 | data->dpm_table.sclk_table.count++; |
792 | } | 792 | } |
793 | } | 793 | } |
794 | 794 | if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0) | |
795 | hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk; | ||
795 | /* Initialize Mclk DPM table based on allow Mclk values */ | 796 | /* Initialize Mclk DPM table based on allow Mclk values */ |
796 | data->dpm_table.mclk_table.count = 0; | 797 | data->dpm_table.mclk_table.count = 0; |
797 | for (i = 0; i < dep_mclk_table->count; i++) { | 798 | for (i = 0; i < dep_mclk_table->count; i++) { |
@@ -806,6 +807,8 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) | |||
806 | } | 807 | } |
807 | } | 808 | } |
808 | 809 | ||
810 | if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) | ||
811 | hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk; | ||
809 | return 0; | 812 | return 0; |
810 | } | 813 | } |
811 | 814 | ||
@@ -3752,14 +3755,17 @@ static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr, | |||
3752 | static int smu7_generate_dpm_level_enable_mask( | 3755 | static int smu7_generate_dpm_level_enable_mask( |
3753 | struct pp_hwmgr *hwmgr, const void *input) | 3756 | struct pp_hwmgr *hwmgr, const void *input) |
3754 | { | 3757 | { |
3755 | int result; | 3758 | int result = 0; |
3756 | const struct phm_set_power_state_input *states = | 3759 | const struct phm_set_power_state_input *states = |
3757 | (const struct phm_set_power_state_input *)input; | 3760 | (const struct phm_set_power_state_input *)input; |
3758 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 3761 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
3759 | const struct smu7_power_state *smu7_ps = | 3762 | const struct smu7_power_state *smu7_ps = |
3760 | cast_const_phw_smu7_power_state(states->pnew_state); | 3763 | cast_const_phw_smu7_power_state(states->pnew_state); |
3761 | 3764 | ||
3762 | result = smu7_trim_dpm_states(hwmgr, smu7_ps); | 3765 | /*skip the trim if od is enabled*/ |
3766 | if (!hwmgr->od_enabled) | ||
3767 | result = smu7_trim_dpm_states(hwmgr, smu7_ps); | ||
3768 | |||
3763 | if (result) | 3769 | if (result) |
3764 | return result; | 3770 | return result; |
3765 | 3771 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index d156b7bb92ae..05e680d55dbb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | |||
@@ -321,8 +321,12 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr) | |||
321 | odn_table->min_vddc = dep_table[0]->entries[0].vddc; | 321 | odn_table->min_vddc = dep_table[0]->entries[0].vddc; |
322 | 322 | ||
323 | i = od_table[2]->count - 1; | 323 | i = od_table[2]->count - 1; |
324 | od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock; | 324 | od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock > od_table[2]->entries[i].clk ? |
325 | od_table[2]->entries[i].vddc = odn_table->max_vddc; | 325 | hwmgr->platform_descriptor.overdriveLimit.memoryClock : |
326 | od_table[2]->entries[i].clk; | ||
327 | od_table[2]->entries[i].vddc = odn_table->max_vddc > od_table[2]->entries[i].vddc ? | ||
328 | odn_table->max_vddc : | ||
329 | od_table[2]->entries[i].vddc; | ||
326 | 330 | ||
327 | return 0; | 331 | return 0; |
328 | } | 332 | } |
@@ -1311,6 +1315,9 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) | |||
1311 | vega10_setup_default_single_dpm_table(hwmgr, | 1315 | vega10_setup_default_single_dpm_table(hwmgr, |
1312 | dpm_table, | 1316 | dpm_table, |
1313 | dep_gfx_table); | 1317 | dep_gfx_table); |
1318 | if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0) | ||
1319 | hwmgr->platform_descriptor.overdriveLimit.engineClock = | ||
1320 | dpm_table->dpm_levels[dpm_table->count-1].value; | ||
1314 | vega10_init_dpm_state(&(dpm_table->dpm_state)); | 1321 | vega10_init_dpm_state(&(dpm_table->dpm_state)); |
1315 | 1322 | ||
1316 | /* Initialize Mclk DPM table based on allow Mclk values */ | 1323 | /* Initialize Mclk DPM table based on allow Mclk values */ |
@@ -1319,6 +1326,10 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) | |||
1319 | vega10_setup_default_single_dpm_table(hwmgr, | 1326 | vega10_setup_default_single_dpm_table(hwmgr, |
1320 | dpm_table, | 1327 | dpm_table, |
1321 | dep_mclk_table); | 1328 | dep_mclk_table); |
1329 | if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) | ||
1330 | hwmgr->platform_descriptor.overdriveLimit.memoryClock = | ||
1331 | dpm_table->dpm_levels[dpm_table->count-1].value; | ||
1332 | |||
1322 | vega10_init_dpm_state(&(dpm_table->dpm_state)); | 1333 | vega10_init_dpm_state(&(dpm_table->dpm_state)); |
1323 | 1334 | ||
1324 | data->dpm_table.eclk_table.count = 0; | 1335 | data->dpm_table.eclk_table.count = 0; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index a9efd8554fbc..dbe4b1f66784 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | |||
@@ -1104,7 +1104,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) | |||
1104 | for (count = 0; count < num_se; count++) { | 1104 | for (count = 0; count < num_se; count++) { |
1105 | data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); | 1105 | data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); |
1106 | WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); | 1106 | WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); |
1107 | result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); | 1107 | result = vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); |
1108 | result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT); | 1108 | result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT); |
1109 | result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT); | 1109 | result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT); |
1110 | result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT); | 1110 | result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT); |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c index 0768d259c07c..16b1a9cf6cf0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c | |||
@@ -267,12 +267,6 @@ static int init_over_drive_limits( | |||
267 | hwmgr->platform_descriptor.maxOverdriveVDDC = 0; | 267 | hwmgr->platform_descriptor.maxOverdriveVDDC = 0; |
268 | hwmgr->platform_descriptor.overdriveVDDCStep = 0; | 268 | hwmgr->platform_descriptor.overdriveVDDCStep = 0; |
269 | 269 | ||
270 | if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 || | ||
271 | hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) { | ||
272 | hwmgr->od_enabled = false; | ||
273 | pr_debug("OverDrive feature not support by VBIOS\n"); | ||
274 | } | ||
275 | |||
276 | return 0; | 270 | return 0; |
277 | } | 271 | } |
278 | 272 | ||
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index df1578d6f42e..44d480768dfe 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c | |||
@@ -349,8 +349,13 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) | |||
349 | struct dma_fence * fence = entity->dependency; | 349 | struct dma_fence * fence = entity->dependency; |
350 | struct drm_sched_fence *s_fence; | 350 | struct drm_sched_fence *s_fence; |
351 | 351 | ||
352 | if (fence->context == entity->fence_context) { | 352 | if (fence->context == entity->fence_context || |
353 | /* We can ignore fences from ourself */ | 353 | fence->context == entity->fence_context + 1) { |
354 | /* | ||
355 | * Fence is a scheduled/finished fence from a job | ||
356 | * which belongs to the same entity, we can ignore | ||
357 | * fences from ourself | ||
358 | */ | ||
354 | dma_fence_put(entity->dependency); | 359 | dma_fence_put(entity->dependency); |
355 | return false; | 360 | return false; |
356 | } | 361 | } |