diff options
author | Dave Airlie <airlied@redhat.com> | 2018-01-11 21:16:58 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-01-11 21:16:58 -0500 |
commit | 8563188e37b000979ab66521f4337df9a3453223 (patch) | |
tree | 59b25d1374cb7b230c410cfe281b929aab2fe292 /drivers/gpu/drm/amd/amdgpu | |
parent | 9be712ef4612268c28b9f1e2d850d3ceab06ef66 (diff) | |
parent | ad8cec7df5d4bf3b1109fabbb1d61663857045ae (diff) |
Merge branch 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux into drm-next
A few fixes for 4.16:
- Cleanup the the remains of ttm io_mem_pfn
- A couple dpm quirks for SI
- Add Chunming as another amdgpu maintainer
- A few more huge page fixes
- A few other misc fixes
* 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux:
drm/amd/pp: Implement get_max_high_clocks for CI/VI
MAINTAINERS: add David (Chunming) Zhou as additional amdgpu maintainer
drm/amdgpu: fix 64bit BAR detection
drm/amdgpu: optimize moved handling only when vm_debug is inactive
drm/amdgpu: simplify huge page handling
drm/amdgpu: update VM PDs after the PTs
drm/amdgpu: minor optimize VM moved handling v2
drm/amdgpu: loosen the criteria for huge pages a bit
drm/amd/powerplay: set pp_num_states as 0 on error situation
drm/ttm: specify DMA_ATTR_NO_WARN for huge page pools
drm/ttm: remove ttm_bo_default_io_mem_pfn
staging: remove the default io_mem_pfn set
drm/amd/powerplay: fix memory leakage when reload (v2)
drm/amdgpu/gfx9: only init the apertures used by KGD (v2)
drm/amdgpu: add atpx quirk handling (v2)
drm/amdgpu: Add dpm quirk for Jet PRO (v2)
drm/radeon: Add dpm quirk for Jet PRO (v2)
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 57 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 78 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/si_dpm.c | 5 |
7 files changed, 97 insertions, 63 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index c13c51af0b68..e2c3c5ec42d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | |||
@@ -14,6 +14,16 @@ | |||
14 | 14 | ||
15 | #include "amd_acpi.h" | 15 | #include "amd_acpi.h" |
16 | 16 | ||
17 | #define AMDGPU_PX_QUIRK_FORCE_ATPX (1 << 0) | ||
18 | |||
19 | struct amdgpu_px_quirk { | ||
20 | u32 chip_vendor; | ||
21 | u32 chip_device; | ||
22 | u32 subsys_vendor; | ||
23 | u32 subsys_device; | ||
24 | u32 px_quirk_flags; | ||
25 | }; | ||
26 | |||
17 | struct amdgpu_atpx_functions { | 27 | struct amdgpu_atpx_functions { |
18 | bool px_params; | 28 | bool px_params; |
19 | bool power_cntl; | 29 | bool power_cntl; |
@@ -35,6 +45,7 @@ struct amdgpu_atpx { | |||
35 | static struct amdgpu_atpx_priv { | 45 | static struct amdgpu_atpx_priv { |
36 | bool atpx_detected; | 46 | bool atpx_detected; |
37 | bool bridge_pm_usable; | 47 | bool bridge_pm_usable; |
48 | unsigned int quirks; | ||
38 | /* handle for device - and atpx */ | 49 | /* handle for device - and atpx */ |
39 | acpi_handle dhandle; | 50 | acpi_handle dhandle; |
40 | acpi_handle other_handle; | 51 | acpi_handle other_handle; |
@@ -205,13 +216,19 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) | |||
205 | 216 | ||
206 | atpx->is_hybrid = false; | 217 | atpx->is_hybrid = false; |
207 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { | 218 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { |
208 | printk("ATPX Hybrid Graphics\n"); | 219 | if (amdgpu_atpx_priv.quirks & AMDGPU_PX_QUIRK_FORCE_ATPX) { |
209 | /* | 220 | printk("ATPX Hybrid Graphics, forcing to ATPX\n"); |
210 | * Disable legacy PM methods only when pcie port PM is usable, | 221 | atpx->functions.power_cntl = true; |
211 | * otherwise the device might fail to power off or power on. | 222 | atpx->is_hybrid = false; |
212 | */ | 223 | } else { |
213 | atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable; | 224 | printk("ATPX Hybrid Graphics\n"); |
214 | atpx->is_hybrid = true; | 225 | /* |
226 | * Disable legacy PM methods only when pcie port PM is usable, | ||
227 | * otherwise the device might fail to power off or power on. | ||
228 | */ | ||
229 | atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable; | ||
230 | atpx->is_hybrid = true; | ||
231 | } | ||
215 | } | 232 | } |
216 | 233 | ||
217 | atpx->dgpu_req_power_for_displays = false; | 234 | atpx->dgpu_req_power_for_displays = false; |
@@ -547,6 +564,30 @@ static const struct vga_switcheroo_handler amdgpu_atpx_handler = { | |||
547 | .get_client_id = amdgpu_atpx_get_client_id, | 564 | .get_client_id = amdgpu_atpx_get_client_id, |
548 | }; | 565 | }; |
549 | 566 | ||
567 | static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { | ||
568 | /* HG _PR3 doesn't seem to work on this A+A weston board */ | ||
569 | { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, | ||
570 | { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, | ||
571 | { 0, 0, 0, 0, 0 }, | ||
572 | }; | ||
573 | |||
574 | static void amdgpu_atpx_get_quirks(struct pci_dev *pdev) | ||
575 | { | ||
576 | const struct amdgpu_px_quirk *p = amdgpu_px_quirk_list; | ||
577 | |||
578 | /* Apply PX quirks */ | ||
579 | while (p && p->chip_device != 0) { | ||
580 | if (pdev->vendor == p->chip_vendor && | ||
581 | pdev->device == p->chip_device && | ||
582 | pdev->subsystem_vendor == p->subsys_vendor && | ||
583 | pdev->subsystem_device == p->subsys_device) { | ||
584 | amdgpu_atpx_priv.quirks |= p->px_quirk_flags; | ||
585 | break; | ||
586 | } | ||
587 | ++p; | ||
588 | } | ||
589 | } | ||
590 | |||
550 | /** | 591 | /** |
551 | * amdgpu_atpx_detect - detect whether we have PX | 592 | * amdgpu_atpx_detect - detect whether we have PX |
552 | * | 593 | * |
@@ -570,6 +611,7 @@ static bool amdgpu_atpx_detect(void) | |||
570 | 611 | ||
571 | parent_pdev = pci_upstream_bridge(pdev); | 612 | parent_pdev = pci_upstream_bridge(pdev); |
572 | d3_supported |= parent_pdev && parent_pdev->bridge_d3; | 613 | d3_supported |= parent_pdev && parent_pdev->bridge_d3; |
614 | amdgpu_atpx_get_quirks(pdev); | ||
573 | } | 615 | } |
574 | 616 | ||
575 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { | 617 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { |
@@ -579,6 +621,7 @@ static bool amdgpu_atpx_detect(void) | |||
579 | 621 | ||
580 | parent_pdev = pci_upstream_bridge(pdev); | 622 | parent_pdev = pci_upstream_bridge(pdev); |
581 | d3_supported |= parent_pdev && parent_pdev->bridge_d3; | 623 | d3_supported |= parent_pdev && parent_pdev->bridge_d3; |
624 | amdgpu_atpx_get_quirks(pdev); | ||
582 | } | 625 | } |
583 | 626 | ||
584 | if (has_atpx && vga_count == 2) { | 627 | if (has_atpx && vga_count == 2) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 5e539fc5b05f..e80fc38141b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -778,10 +778,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) | |||
778 | struct amdgpu_bo *bo; | 778 | struct amdgpu_bo *bo; |
779 | int i, r; | 779 | int i, r; |
780 | 780 | ||
781 | r = amdgpu_vm_update_directories(adev, vm); | ||
782 | if (r) | ||
783 | return r; | ||
784 | |||
785 | r = amdgpu_vm_clear_freed(adev, vm, NULL); | 781 | r = amdgpu_vm_clear_freed(adev, vm, NULL); |
786 | if (r) | 782 | if (r) |
787 | return r; | 783 | return r; |
@@ -839,6 +835,10 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) | |||
839 | if (r) | 835 | if (r) |
840 | return r; | 836 | return r; |
841 | 837 | ||
838 | r = amdgpu_vm_update_directories(adev, vm); | ||
839 | if (r) | ||
840 | return r; | ||
841 | |||
842 | r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false); | 842 | r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false); |
843 | if (r) | 843 | if (r) |
844 | return r; | 844 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 357cd8bf2e55..9baf182d5418 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -626,7 +626,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) | |||
626 | root = root->parent; | 626 | root = root->parent; |
627 | 627 | ||
628 | pci_bus_for_each_resource(root, res, i) { | 628 | pci_bus_for_each_resource(root, res, i) { |
629 | if (res && res->flags & IORESOURCE_MEM_64 && | 629 | if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) && |
630 | res->start > 0x100000000ull) | 630 | res->start > 0x100000000ull) |
631 | break; | 631 | break; |
632 | } | 632 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 10805edcf964..e48b4ec88c8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -518,10 +518,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
518 | if (!amdgpu_vm_ready(vm)) | 518 | if (!amdgpu_vm_ready(vm)) |
519 | return; | 519 | return; |
520 | 520 | ||
521 | r = amdgpu_vm_update_directories(adev, vm); | ||
522 | if (r) | ||
523 | goto error; | ||
524 | |||
525 | r = amdgpu_vm_clear_freed(adev, vm, NULL); | 521 | r = amdgpu_vm_clear_freed(adev, vm, NULL); |
526 | if (r) | 522 | if (r) |
527 | goto error; | 523 | goto error; |
@@ -530,6 +526,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
530 | operation == AMDGPU_VA_OP_REPLACE) | 526 | operation == AMDGPU_VA_OP_REPLACE) |
531 | r = amdgpu_vm_bo_update(adev, bo_va, false); | 527 | r = amdgpu_vm_bo_update(adev, bo_va, false); |
532 | 528 | ||
529 | r = amdgpu_vm_update_directories(adev, vm); | ||
530 | if (r) | ||
531 | goto error; | ||
532 | |||
533 | error: | 533 | error: |
534 | if (r && r != -ERESTARTSYS) | 534 | if (r && r != -ERESTARTSYS) |
535 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); | 535 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d4510807a692..cd1752b6afa9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -946,57 +946,38 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, | |||
946 | unsigned nptes, uint64_t dst, | 946 | unsigned nptes, uint64_t dst, |
947 | uint64_t flags) | 947 | uint64_t flags) |
948 | { | 948 | { |
949 | bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes); | ||
950 | uint64_t pd_addr, pde; | 949 | uint64_t pd_addr, pde; |
951 | 950 | ||
952 | /* In the case of a mixed PT the PDE must point to it*/ | 951 | /* In the case of a mixed PT the PDE must point to it*/ |
953 | if (p->adev->asic_type < CHIP_VEGA10 || | 952 | if (p->adev->asic_type >= CHIP_VEGA10 && !p->src && |
954 | nptes != AMDGPU_VM_PTE_COUNT(p->adev) || | 953 | nptes == AMDGPU_VM_PTE_COUNT(p->adev)) { |
955 | p->src || | ||
956 | !(flags & AMDGPU_PTE_VALID)) { | ||
957 | |||
958 | dst = amdgpu_bo_gpu_offset(entry->base.bo); | ||
959 | flags = AMDGPU_PTE_VALID; | ||
960 | } else { | ||
961 | /* Set the huge page flag to stop scanning at this PDE */ | 954 | /* Set the huge page flag to stop scanning at this PDE */ |
962 | flags |= AMDGPU_PDE_PTE; | 955 | flags |= AMDGPU_PDE_PTE; |
963 | } | 956 | } |
964 | 957 | ||
965 | if (!entry->huge && !(flags & AMDGPU_PDE_PTE)) | 958 | if (!(flags & AMDGPU_PDE_PTE)) { |
959 | if (entry->huge) { | ||
960 | /* Add the entry to the relocated list to update it. */ | ||
961 | entry->huge = false; | ||
962 | spin_lock(&p->vm->status_lock); | ||
963 | list_move(&entry->base.vm_status, &p->vm->relocated); | ||
964 | spin_unlock(&p->vm->status_lock); | ||
965 | } | ||
966 | return; | 966 | return; |
967 | entry->huge = !!(flags & AMDGPU_PDE_PTE); | 967 | } |
968 | 968 | ||
969 | entry->huge = true; | ||
969 | amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0, | 970 | amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0, |
970 | &dst, &flags); | 971 | &dst, &flags); |
971 | 972 | ||
972 | if (use_cpu_update) { | 973 | if (parent->base.bo->shadow) { |
973 | /* In case a huge page is replaced with a system | 974 | pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow); |
974 | * memory mapping, p->pages_addr != NULL and | ||
975 | * amdgpu_vm_cpu_set_ptes would try to translate dst | ||
976 | * through amdgpu_vm_map_gart. But dst is already a | ||
977 | * GPU address (of the page table). Disable | ||
978 | * amdgpu_vm_map_gart temporarily. | ||
979 | */ | ||
980 | dma_addr_t *tmp; | ||
981 | |||
982 | tmp = p->pages_addr; | ||
983 | p->pages_addr = NULL; | ||
984 | |||
985 | pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo); | ||
986 | pde = pd_addr + (entry - parent->entries) * 8; | ||
987 | amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags); | ||
988 | |||
989 | p->pages_addr = tmp; | ||
990 | } else { | ||
991 | if (parent->base.bo->shadow) { | ||
992 | pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow); | ||
993 | pde = pd_addr + (entry - parent->entries) * 8; | ||
994 | amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); | ||
995 | } | ||
996 | pd_addr = amdgpu_bo_gpu_offset(parent->base.bo); | ||
997 | pde = pd_addr + (entry - parent->entries) * 8; | 975 | pde = pd_addr + (entry - parent->entries) * 8; |
998 | amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); | 976 | p->func(p, pde, dst, 1, 0, flags); |
999 | } | 977 | } |
978 | pd_addr = amdgpu_bo_gpu_offset(parent->base.bo); | ||
979 | pde = pd_addr + (entry - parent->entries) * 8; | ||
980 | p->func(p, pde, dst, 1, 0, flags); | ||
1000 | } | 981 | } |
1001 | 982 | ||
1002 | /** | 983 | /** |
@@ -1208,12 +1189,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
1208 | /* padding, etc. */ | 1189 | /* padding, etc. */ |
1209 | ndw = 64; | 1190 | ndw = 64; |
1210 | 1191 | ||
1211 | /* one PDE write for each huge page */ | ||
1212 | if (vm->root.base.bo->shadow) | ||
1213 | ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6 * 2; | ||
1214 | else | ||
1215 | ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6; | ||
1216 | |||
1217 | if (pages_addr) { | 1192 | if (pages_addr) { |
1218 | /* copy commands needed */ | 1193 | /* copy commands needed */ |
1219 | ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw; | 1194 | ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw; |
@@ -1288,8 +1263,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
1288 | 1263 | ||
1289 | error_free: | 1264 | error_free: |
1290 | amdgpu_job_free(job); | 1265 | amdgpu_job_free(job); |
1291 | amdgpu_vm_invalidate_level(adev, vm, &vm->root, | ||
1292 | adev->vm_manager.root_level); | ||
1293 | return r; | 1266 | return r; |
1294 | } | 1267 | } |
1295 | 1268 | ||
@@ -1700,18 +1673,31 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, | |||
1700 | spin_lock(&vm->status_lock); | 1673 | spin_lock(&vm->status_lock); |
1701 | while (!list_empty(&vm->moved)) { | 1674 | while (!list_empty(&vm->moved)) { |
1702 | struct amdgpu_bo_va *bo_va; | 1675 | struct amdgpu_bo_va *bo_va; |
1676 | struct reservation_object *resv; | ||
1703 | 1677 | ||
1704 | bo_va = list_first_entry(&vm->moved, | 1678 | bo_va = list_first_entry(&vm->moved, |
1705 | struct amdgpu_bo_va, base.vm_status); | 1679 | struct amdgpu_bo_va, base.vm_status); |
1706 | spin_unlock(&vm->status_lock); | 1680 | spin_unlock(&vm->status_lock); |
1707 | 1681 | ||
1682 | resv = bo_va->base.bo->tbo.resv; | ||
1683 | |||
1708 | /* Per VM BOs never need to bo cleared in the page tables */ | 1684 | /* Per VM BOs never need to bo cleared in the page tables */ |
1709 | clear = bo_va->base.bo->tbo.resv != vm->root.base.bo->tbo.resv; | 1685 | if (resv == vm->root.base.bo->tbo.resv) |
1686 | clear = false; | ||
1687 | /* Try to reserve the BO to avoid clearing its ptes */ | ||
1688 | else if (!amdgpu_vm_debug && reservation_object_trylock(resv)) | ||
1689 | clear = false; | ||
1690 | /* Somebody else is using the BO right now */ | ||
1691 | else | ||
1692 | clear = true; | ||
1710 | 1693 | ||
1711 | r = amdgpu_vm_bo_update(adev, bo_va, clear); | 1694 | r = amdgpu_vm_bo_update(adev, bo_va, clear); |
1712 | if (r) | 1695 | if (r) |
1713 | return r; | 1696 | return r; |
1714 | 1697 | ||
1698 | if (!clear && resv != vm->root.base.bo->tbo.resv) | ||
1699 | reservation_object_unlock(resv); | ||
1700 | |||
1715 | spin_lock(&vm->status_lock); | 1701 | spin_lock(&vm->status_lock); |
1716 | } | 1702 | } |
1717 | spin_unlock(&vm->status_lock); | 1703 | spin_unlock(&vm->status_lock); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 55670dbacace..fc270e2ef91a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | |||
@@ -1526,7 +1526,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev) | |||
1526 | /* XXX SH_MEM regs */ | 1526 | /* XXX SH_MEM regs */ |
1527 | /* where to put LDS, scratch, GPUVM in FSA64 space */ | 1527 | /* where to put LDS, scratch, GPUVM in FSA64 space */ |
1528 | mutex_lock(&adev->srbm_mutex); | 1528 | mutex_lock(&adev->srbm_mutex); |
1529 | for (i = 0; i < 16; i++) { | 1529 | for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) { |
1530 | soc15_grbm_select(adev, 0, 0, 0, i); | 1530 | soc15_grbm_select(adev, 0, 0, 0, i); |
1531 | /* CP and shaders */ | 1531 | /* CP and shaders */ |
1532 | if (i == 0) { | 1532 | if (i == 0) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 299cb3161b2c..ce675a7f179a 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c | |||
@@ -3464,6 +3464,11 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, | |||
3464 | (adev->pdev->device == 0x6667)) { | 3464 | (adev->pdev->device == 0x6667)) { |
3465 | max_sclk = 75000; | 3465 | max_sclk = 75000; |
3466 | } | 3466 | } |
3467 | if ((adev->pdev->revision == 0xC3) || | ||
3468 | (adev->pdev->device == 0x6665)) { | ||
3469 | max_sclk = 60000; | ||
3470 | max_mclk = 80000; | ||
3471 | } | ||
3467 | } else if (adev->asic_type == CHIP_OLAND) { | 3472 | } else if (adev->asic_type == CHIP_OLAND) { |
3468 | if ((adev->pdev->revision == 0xC7) || | 3473 | if ((adev->pdev->revision == 0xC7) || |
3469 | (adev->pdev->revision == 0x80) || | 3474 | (adev->pdev->revision == 0x80) || |